Containerization

Container Logging and Monitoring

Complete guide to logging and monitoring containerized Node.js applications, covering log drivers, structured logging, centralized aggregation, metrics collection, and alerting strategies.

Container Logging and Monitoring

Containers are ephemeral. When a container stops, its logs disappear unless you have planned for it. In production, you cannot SSH into containers and tail log files — you need structured logging piped to centralized systems, metrics scraped by monitoring tools, and alerts that wake you up before your users notice. This guide covers the complete observability stack for containerized Node.js applications.

Prerequisites

  • Docker and Docker Compose
  • Node.js 18+ with Express.js
  • Basic familiarity with logging concepts
  • Understanding of JSON and structured data

Docker Log Drivers

Docker captures stdout and stderr from each container and routes them through a configurable log driver.

Default: json-file

# Check current log driver
docker info --format '{{.LoggingDriver}}'
# json-file

# View logs
docker logs myapp
docker logs --follow myapp
docker logs --tail 100 myapp
docker logs --since 2h myapp

The json-file driver stores logs at /var/lib/docker/containers/<id>/<id>-json.log. Each line is a JSON object:

{"log":"Server running on port 3000\n","stream":"stdout","time":"2026-02-13T10:00:00.123456789Z"}

Configure log rotation to prevent disk exhaustion:

{
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
  }
}

Add this to /etc/docker/daemon.json or per-container:

# docker-compose.yml
services:
  api:
    logging:
      driver: json-file
      options:
        max-size: "10m"
        max-file: "3"

Syslog Driver

Route container logs to syslog:

services:
  api:
    logging:
      driver: syslog
      options:
        syslog-address: "tcp://logserver:514"
        tag: "myapp-api"

Fluentd Driver

Send logs directly to Fluentd for processing:

services:
  api:
    logging:
      driver: fluentd
      options:
        fluentd-address: "localhost:24224"
        tag: "myapp.api"
        fluentd-async-connect: "true"

None Driver

Disable logging entirely (use for high-throughput containers where logging overhead matters):

services:
  benchmark:
    logging:
      driver: none

Structured Logging in Node.js

Unstructured logs are useless at scale. When you have 50 containers generating logs, you need machine-parseable structured data.

Basic Structured Logger

// logger.js
var os = require('os');

var LOG_LEVELS = { error: 0, warn: 1, info: 2, debug: 3 };
var currentLevel = LOG_LEVELS[process.env.LOG_LEVEL || 'info'];

function createLogger(component) {
  var hostname = os.hostname();
  var pid = process.pid;

  function log(level, message, meta) {
    if (LOG_LEVELS[level] > currentLevel) return;

    var entry = {
      timestamp: new Date().toISOString(),
      level: level,
      component: component,
      message: message,
      hostname: hostname,
      pid: pid
    };

    if (meta) {
      Object.keys(meta).forEach(function(key) {
        entry[key] = meta[key];
      });
    }

    var output = level === 'error' ? console.error : console.log;
    output(JSON.stringify(entry));
  }

  return {
    error: function(msg, meta) { log('error', msg, meta); },
    warn: function(msg, meta) { log('warn', msg, meta); },
    info: function(msg, meta) { log('info', msg, meta); },
    debug: function(msg, meta) { log('debug', msg, meta); }
  };
}

module.exports = createLogger;
// Usage
var createLogger = require('./logger');
var logger = createLogger('api');

logger.info('Server started', { port: 3000 });
// {"timestamp":"2026-02-13T10:00:00.000Z","level":"info","component":"api","message":"Server started","hostname":"abc123","pid":1,"port":3000}

logger.error('Database connection failed', { host: 'postgres', error: 'ECONNREFUSED' });
// {"timestamp":"2026-02-13T10:00:01.000Z","level":"error","component":"api","message":"Database connection failed","hostname":"abc123","pid":1,"host":"postgres","error":"ECONNREFUSED"}

Request Logging Middleware

// middleware/request-logger.js
var createLogger = require('../logger');
var logger = createLogger('http');

function requestLogger(req, res, next) {
  var start = Date.now();
  var requestId = req.headers['x-request-id'] || generateId();

  // Attach request ID for downstream logging
  req.requestId = requestId;
  res.set('X-Request-Id', requestId);

  res.on('finish', function() {
    var duration = Date.now() - start;
    var meta = {
      requestId: requestId,
      method: req.method,
      path: req.path,
      query: Object.keys(req.query).length > 0 ? req.query : undefined,
      status: res.statusCode,
      duration: duration,
      contentLength: res.get('content-length'),
      userAgent: req.get('user-agent'),
      ip: req.ip
    };

    if (res.statusCode >= 500) {
      logger.error('Request failed', meta);
    } else if (res.statusCode >= 400) {
      logger.warn('Client error', meta);
    } else {
      logger.info('Request completed', meta);
    }
  });

  next();
}

function generateId() {
  return Date.now().toString(36) + Math.random().toString(36).substr(2, 8);
}

module.exports = requestLogger;
// app.js
var express = require('express');
var requestLogger = require('./middleware/request-logger');

var app = express();
app.use(requestLogger);

Output for each request:

{"timestamp":"2026-02-13T10:00:05.000Z","level":"info","component":"http","message":"Request completed","requestId":"m1abc123","method":"GET","path":"/api/users","status":200,"duration":45,"contentLength":"1234","ip":"172.18.0.1"}

Error Logging with Stack Traces

// middleware/error-handler.js
var createLogger = require('../logger');
var logger = createLogger('error');

function errorHandler(err, req, res, next) {
  var meta = {
    requestId: req.requestId,
    method: req.method,
    path: req.path,
    stack: err.stack,
    code: err.code,
    name: err.name
  };

  logger.error(err.message, meta);

  res.status(err.statusCode || 500).json({
    error: process.env.NODE_ENV === 'production' ? 'Internal Server Error' : err.message
  });
}

module.exports = errorHandler;

Winston for Production

For more advanced needs, Winston provides transports, formatting, and rotation:

// logger-winston.js
var winston = require('winston');

var logger = winston.createLogger({
  level: process.env.LOG_LEVEL || 'info',
  format: winston.format.combine(
    winston.format.timestamp(),
    winston.format.errors({ stack: true }),
    winston.format.json()
  ),
  defaultMeta: {
    service: process.env.SERVICE_NAME || 'api',
    hostname: require('os').hostname()
  },
  transports: [
    new winston.transports.Console()
  ]
});

module.exports = logger;

Always log to stdout/stderr in containers. Let the container runtime handle log routing. Never write to log files inside containers.

Centralized Log Aggregation

ELK Stack (Elasticsearch, Logstash, Kibana)

# docker-compose.logging.yml
version: "3.8"

services:
  elasticsearch:
    image: elasticsearch:8.12.0
    environment:
      - discovery.type=single-node
      - xpack.security.enabled=false
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    volumes:
      - esdata:/usr/share/elasticsearch/data
    ports:
      - "9200:9200"

  logstash:
    image: logstash:8.12.0
    volumes:
      - ./logstash.conf:/usr/share/logstash/pipeline/logstash.conf
    ports:
      - "5044:5044"
    depends_on:
      - elasticsearch

  kibana:
    image: kibana:8.12.0
    environment:
      - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
    ports:
      - "5601:5601"
    depends_on:
      - elasticsearch

  filebeat:
    image: elastic/filebeat:8.12.0
    user: root
    volumes:
      - ./filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
      - /var/lib/docker/containers:/var/lib/docker/containers:ro
      - /var/run/docker.sock:/var/run/docker.sock:ro
    depends_on:
      - logstash

volumes:
  esdata:
# filebeat.yml
filebeat.inputs:
  - type: container
    paths:
      - '/var/lib/docker/containers/*/*.log'
    processors:
      - add_docker_metadata:
          host: "unix:///var/run/docker.sock"

output.logstash:
  hosts: ["logstash:5044"]

Loki + Grafana (Lightweight Alternative)

Loki is a log aggregation system designed for containers. It indexes labels, not log content, making it dramatically cheaper to run than Elasticsearch.

# docker-compose.loki.yml
version: "3.8"

services:
  loki:
    image: grafana/loki:latest
    ports:
      - "3100:3100"
    volumes:
      - loki-data:/loki

  promtail:
    image: grafana/promtail:latest
    volumes:
      - /var/log:/var/log
      - /var/lib/docker/containers:/var/lib/docker/containers:ro
      - ./promtail.yml:/etc/promtail/config.yml
    command: -config.file=/etc/promtail/config.yml

  grafana:
    image: grafana/grafana:latest
    ports:
      - "3000:3000"
    environment:
      - GF_SECURITY_ADMIN_PASSWORD=admin
    volumes:
      - grafana-data:/var/lib/grafana

volumes:
  loki-data:
  grafana-data:
# promtail.yml
server:
  http_listen_port: 9080

positions:
  filename: /tmp/positions.yaml

clients:
  - url: http://loki:3100/loki/api/v1/push

scrape_configs:
  - job_name: docker
    static_configs:
      - targets: ["localhost"]
        labels:
          job: docker
          __path__: /var/lib/docker/containers/*/*-json.log
    pipeline_stages:
      - docker: {}
      - json:
          expressions:
            level: level
            component: component
      - labels:
          level:
          component:

Metrics Collection

Application Metrics Endpoint

// metrics/index.js
var os = require('os');
var v8 = require('v8');

var metrics = {
  requests: { total: 0, errors: 0 },
  latency: { sum: 0, count: 0 },
  connections: { active: 0 }
};

function metricsMiddleware(req, res, next) {
  metrics.requests.total++;
  metrics.connections.active++;
  var start = Date.now();

  res.on('finish', function() {
    metrics.connections.active--;
    metrics.latency.sum += (Date.now() - start);
    metrics.latency.count++;

    if (res.statusCode >= 500) {
      metrics.requests.errors++;
    }
  });

  next();
}

function getMetrics() {
  var memUsage = process.memoryUsage();
  var heapStats = v8.getHeapStatistics();

  return {
    process: {
      uptime: Math.round(process.uptime()),
      memory: {
        rss: Math.round(memUsage.rss / 1024 / 1024),
        heapUsed: Math.round(memUsage.heapUsed / 1024 / 1024),
        heapTotal: Math.round(memUsage.heapTotal / 1024 / 1024),
        external: Math.round(memUsage.external / 1024 / 1024)
      },
      cpu: os.loadavg(),
      eventLoopLag: null
    },
    http: {
      requestsTotal: metrics.requests.total,
      errorsTotal: metrics.requests.errors,
      errorRate: metrics.requests.total > 0
        ? (metrics.requests.errors / metrics.requests.total * 100).toFixed(2) + '%'
        : '0%',
      avgLatency: metrics.latency.count > 0
        ? Math.round(metrics.latency.sum / metrics.latency.count) + 'ms'
        : '0ms',
      activeConnections: metrics.connections.active
    },
    system: {
      hostname: os.hostname(),
      platform: process.platform,
      nodeVersion: process.version,
      totalMemory: Math.round(os.totalmem() / 1024 / 1024) + 'MB',
      freeMemory: Math.round(os.freemem() / 1024 / 1024) + 'MB'
    }
  };
}

module.exports = {
  middleware: metricsMiddleware,
  getMetrics: getMetrics
};
// app.js
var metrics = require('./metrics');

app.use(metrics.middleware);

app.get('/metrics', function(req, res) {
  res.json(metrics.getMetrics());
});

Prometheus Format

If using Prometheus for monitoring, expose metrics in the Prometheus text format:

// metrics/prometheus.js
var client = require('prom-client');

// Collect default Node.js metrics
client.collectDefaultMetrics({ prefix: 'myapp_' });

// Custom metrics
var httpRequestDuration = new client.Histogram({
  name: 'myapp_http_request_duration_seconds',
  help: 'HTTP request duration in seconds',
  labelNames: ['method', 'route', 'status'],
  buckets: [0.01, 0.05, 0.1, 0.5, 1, 5]
});

var httpRequestsTotal = new client.Counter({
  name: 'myapp_http_requests_total',
  help: 'Total HTTP requests',
  labelNames: ['method', 'route', 'status']
});

function prometheusMiddleware(req, res, next) {
  var end = httpRequestDuration.startTimer();

  res.on('finish', function() {
    var route = req.route ? req.route.path : req.path;
    var labels = {
      method: req.method,
      route: route,
      status: res.statusCode
    };
    end(labels);
    httpRequestsTotal.inc(labels);
  });

  next();
}

function metricsEndpoint(req, res) {
  res.set('Content-Type', client.register.contentType);
  client.register.metrics().then(function(metrics) {
    res.end(metrics);
  });
}

module.exports = {
  middleware: prometheusMiddleware,
  endpoint: metricsEndpoint
};
var prometheus = require('./metrics/prometheus');
app.use(prometheus.middleware);
app.get('/metrics', prometheus.endpoint);

Prometheus + Grafana Stack

# docker-compose.monitoring.yml
services:
  prometheus:
    image: prom/prometheus:latest
    volumes:
      - ./prometheus.yml:/etc/prometheus/prometheus.yml
      - promdata:/prometheus
    ports:
      - "9090:9090"

  grafana:
    image: grafana/grafana:latest
    ports:
      - "3001:3000"
    environment:
      - GF_SECURITY_ADMIN_PASSWORD=admin
    volumes:
      - grafdata:/var/lib/grafana

volumes:
  promdata:
  grafdata:
# prometheus.yml
global:
  scrape_interval: 15s

scrape_configs:
  - job_name: 'nodejs-api'
    static_configs:
      - targets: ['api:3000']
    metrics_path: /metrics
    scrape_interval: 10s

  - job_name: 'cadvisor'
    static_configs:
      - targets: ['cadvisor:8080']

cAdvisor for Container Metrics

services:
  cadvisor:
    image: gcr.io/cadvisor/cadvisor:latest
    volumes:
      - /:/rootfs:ro
      - /var/run:/var/run:ro
      - /sys:/sys:ro
      - /var/lib/docker:/var/lib/docker:ro
    ports:
      - "8080:8080"

cAdvisor collects container-level metrics (CPU, memory, network, disk I/O) automatically for every running container. Prometheus scrapes cAdvisor, and Grafana visualizes it.

Event Loop Monitoring

The event loop is Node.js's heartbeat. If it stalls, your application is unresponsive.

// monitoring/event-loop.js
var createLogger = require('../logger');
var logger = createLogger('event-loop');

function EventLoopMonitor(options) {
  this.interval = (options && options.interval) || 1000;
  this.warnThreshold = (options && options.warnThreshold) || 50;
  this.critThreshold = (options && options.critThreshold) || 200;
  this.timer = null;
  this.lastCheck = null;
  this.lagSamples = [];
}

EventLoopMonitor.prototype.start = function() {
  var self = this;
  this.lastCheck = Date.now();

  this.timer = setInterval(function() {
    var now = Date.now();
    var expected = self.interval;
    var actual = now - self.lastCheck;
    var lag = actual - expected;
    self.lastCheck = now;

    self.lagSamples.push(lag);
    if (self.lagSamples.length > 60) {
      self.lagSamples.shift();
    }

    if (lag > self.critThreshold) {
      logger.error('Critical event loop lag', { lag: lag + 'ms' });
    } else if (lag > self.warnThreshold) {
      logger.warn('High event loop lag', { lag: lag + 'ms' });
    }
  }, this.interval);
};

EventLoopMonitor.prototype.getStats = function() {
  if (this.lagSamples.length === 0) {
    return { avg: 0, max: 0, p99: 0 };
  }

  var sorted = this.lagSamples.slice().sort(function(a, b) { return a - b; });
  var sum = sorted.reduce(function(a, b) { return a + b; }, 0);

  return {
    avg: Math.round(sum / sorted.length),
    max: sorted[sorted.length - 1],
    p99: sorted[Math.floor(sorted.length * 0.99)],
    samples: sorted.length
  };
};

EventLoopMonitor.prototype.stop = function() {
  if (this.timer) clearInterval(this.timer);
};

module.exports = EventLoopMonitor;

Complete Working Example

# docker-compose.yml
version: "3.8"

services:
  api:
    build: .
    ports:
      - "3000:3000"
    environment:
      - NODE_ENV=production
      - LOG_LEVEL=info
      - SERVICE_NAME=api
      - DATABASE_URL=postgresql://appuser:secret@postgres:5432/myapp
    logging:
      driver: json-file
      options:
        max-size: "10m"
        max-file: "5"
        tag: "{{.Name}}"
    depends_on:
      postgres:
        condition: service_healthy

  postgres:
    image: postgres:16-alpine
    environment:
      POSTGRES_USER: appuser
      POSTGRES_PASSWORD: secret
      POSTGRES_DB: myapp
    volumes:
      - pgdata:/var/lib/postgresql/data
    healthcheck:
      test: ["CMD-SHELL", "pg_isready -U appuser"]
      interval: 5s
      timeout: 5s
      retries: 5
    logging:
      driver: json-file
      options:
        max-size: "5m"
        max-file: "3"

  # Log aggregation
  loki:
    image: grafana/loki:latest
    ports:
      - "3100:3100"
    volumes:
      - loki-data:/loki

  promtail:
    image: grafana/promtail:latest
    volumes:
      - /var/lib/docker/containers:/var/lib/docker/containers:ro
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - ./promtail.yml:/etc/promtail/config.yml
    command: -config.file=/etc/promtail/config.yml

  # Metrics
  prometheus:
    image: prom/prometheus:latest
    volumes:
      - ./prometheus.yml:/etc/prometheus/prometheus.yml
      - promdata:/prometheus
    ports:
      - "9090:9090"

  # Visualization
  grafana:
    image: grafana/grafana:latest
    ports:
      - "3001:3000"
    environment:
      - GF_SECURITY_ADMIN_PASSWORD=admin
    volumes:
      - grafdata:/var/lib/grafana

volumes:
  pgdata:
  loki-data:
  promdata:
  grafdata:
// app.js - Complete application with logging and monitoring
var express = require('express');
var createLogger = require('./logger');
var requestLogger = require('./middleware/request-logger');
var errorHandler = require('./middleware/error-handler');
var metricsModule = require('./metrics');
var EventLoopMonitor = require('./monitoring/event-loop');

var logger = createLogger('app');
var app = express();

// Event loop monitoring
var elMonitor = new EventLoopMonitor({ warnThreshold: 50 });
elMonitor.start();

// Middleware
app.use(express.json());
app.use(requestLogger);
app.use(metricsModule.middleware);

// Health and metrics endpoints
app.get('/health', function(req, res) {
  res.json({ status: 'healthy', uptime: process.uptime() });
});

app.get('/metrics', function(req, res) {
  var data = metricsModule.getMetrics();
  data.eventLoop = elMonitor.getStats();
  res.json(data);
});

// Application routes
app.get('/api/status', function(req, res) {
  logger.info('Status check', { requestId: req.requestId });
  res.json({ version: '1.0.0', status: 'running' });
});

// Error handler (must be last)
app.use(errorHandler);

// Start server
var port = process.env.PORT || 3000;
app.listen(port, function() {
  logger.info('Server started', {
    port: port,
    env: process.env.NODE_ENV,
    nodeVersion: process.version
  });
});

// Graceful shutdown
process.on('SIGTERM', function() {
  logger.info('SIGTERM received, shutting down');
  elMonitor.stop();
  process.exit(0);
});

Common Issues and Troubleshooting

1. Logs Filling Up Disk

df -h /var/lib/docker
# Filesystem      Size  Used Avail Use% Mounted on
# /dev/sda1       50G   48G   2G   96%  /

Container logs are consuming disk space. Configure log rotation:

{
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
  }
}

Apply to all containers by adding to /etc/docker/daemon.json, then restart Docker.

2. Logs Not Appearing in Aggregation System

# Loki/Elasticsearch shows no logs from container

Check that the log collector (Promtail, Filebeat) has access to Docker's container directory:

volumes:
  - /var/lib/docker/containers:/var/lib/docker/containers:ro
  - /var/run/docker.sock:/var/run/docker.sock:ro

Also verify the container is not using logging: driver: none.

3. JSON Parsing Errors in Log Pipeline

# Logstash/Promtail error: failed to parse JSON

Your application is mixing structured JSON logs with unstructured output. Common culprits: npm install output, native module compilation warnings, third-party library console.log calls.

Fix by ensuring ALL output is JSON, or configure the log pipeline to handle mixed formats:

# promtail pipeline for mixed format
pipeline_stages:
  - docker: {}
  - match:
      selector: '{job="docker"}'
      stages:
        - json:
            expressions:
              level: level
        - labels:
            level:
  - match:
      selector: '{job="docker"} != "{"'
      stages:
        - static_labels:
            level: unknown

4. High Memory Usage from Logging

# Container OOMKilled, last log: "Heap used: 245MB"

Buffering large log objects in memory. Common with request body logging:

// Bad: logging entire request body
logger.info('Request received', { body: req.body }); // Could be megabytes

// Good: log only what you need
logger.info('Request received', {
  method: req.method,
  path: req.path,
  bodySize: JSON.stringify(req.body).length
});

Best Practices

  • Always log to stdout/stderr. Never write to files inside containers. Let Docker's log driver handle routing and rotation.
  • Use structured JSON logging. Every log entry should be a JSON object with timestamp, level, message, and contextual metadata.
  • Include request IDs in every log entry. This enables tracing a single request across multiple services.
  • Configure log rotation. Set max-size and max-file on the json-file driver to prevent disk exhaustion.
  • Separate application logs from access logs. Use different log components or levels so you can filter effectively.
  • Monitor the event loop. Lag above 50ms indicates your Node.js process is overloaded. Alert on sustained lag above 200ms.
  • Use Loki instead of Elasticsearch for cost-sensitive deployments. Loki uses 10-20x less storage by indexing labels instead of content.
  • Never log sensitive data. Passwords, tokens, credit card numbers, and PII should never appear in logs. Sanitize before logging.
  • Set LOG_LEVEL via environment variable. Use info in production, debug in staging. Change it without redeploying.

References

Powered by Contentful