Testing

Performance Testing Node.js Applications

A practical guide to performance testing Node.js applications covering benchmarking, profiling, load testing, memory leak detection, and automated performance regression testing.

Performance Testing Node.js Applications

Performance testing answers a simple question: is this code fast enough? Not "is this the fastest possible code" — that leads to premature optimization. Fast enough means your API responds within the time budget, your server handles the expected load, and your application does not leak memory over hours of operation.

This guide covers the three levels of performance testing: micro-benchmarks for individual functions, profiling for bottleneck identification, and load testing for system-level behavior.

Prerequisites

  • Node.js installed (v18+)
  • An Express application to test
  • Basic understanding of async operations in Node.js

Micro-Benchmarking with Benchmark.js

Benchmark.js runs functions millions of times and reports operations per second. Use it when comparing two implementations of the same function.

Setup

npm install --save-dev benchmark

Comparing String Operations

// bench-string.js
var Benchmark = require("benchmark");
var suite = new Benchmark.Suite();

var data = [];
for (var i = 0; i < 1000; i++) {
  data.push({ name: "item-" + i, value: Math.random() });
}

suite
  .add("Array.join", function() {
    var result = "";
    var parts = [];
    for (var i = 0; i < data.length; i++) {
      parts.push(data[i].name + ":" + data[i].value);
    }
    result = parts.join(",");
  })
  .add("String concatenation", function() {
    var result = "";
    for (var i = 0; i < data.length; i++) {
      if (i > 0) result += ",";
      result += data[i].name + ":" + data[i].value;
    }
  })
  .add("Template literals", function() {
    var parts = [];
    for (var i = 0; i < data.length; i++) {
      parts.push(data[i].name + ":" + data[i].value);
    }
    var result = parts.join(",");
  })
  .on("cycle", function(event) {
    console.log(String(event.target));
  })
  .on("complete", function() {
    console.log("Fastest is " + this.filter("fastest").map("name"));
  })
  .run();
node bench-string.js

Output:

Array.join x 12,345 ops/sec ±1.23% (92 runs sampled)
String concatenation x 8,901 ops/sec ±2.45% (88 runs sampled)
Template literals x 12,100 ops/sec ±1.34% (90 runs sampled)
Fastest is Array.join

Comparing Data Structures

// bench-lookup.js
var Benchmark = require("benchmark");
var suite = new Benchmark.Suite();

// Create test data
var arrayData = [];
var objectData = {};
var mapData = new Map();

for (var i = 0; i < 10000; i++) {
  var key = "key-" + i;
  var value = { id: i, name: "item-" + i };
  arrayData.push({ key: key, value: value });
  objectData[key] = value;
  mapData.set(key, value);
}

var searchKey = "key-5000";

suite
  .add("Array.find", function() {
    var result = null;
    for (var i = 0; i < arrayData.length; i++) {
      if (arrayData[i].key === searchKey) {
        result = arrayData[i].value;
        break;
      }
    }
  })
  .add("Object lookup", function() {
    var result = objectData[searchKey];
  })
  .add("Map.get", function() {
    var result = mapData.get(searchKey);
  })
  .on("cycle", function(event) {
    console.log(String(event.target));
  })
  .on("complete", function() {
    console.log("Fastest is " + this.filter("fastest").map("name"));
  })
  .run();

Built-in Performance Timing

Node.js provides performance.now() for high-resolution timing without external dependencies:

// timing.js
var performance = require("perf_hooks").performance;

function timeFunction(name, fn, iterations) {
  // Warm up
  for (var i = 0; i < 100; i++) {
    fn();
  }

  var start = performance.now();
  for (var i = 0; i < iterations; i++) {
    fn();
  }
  var elapsed = performance.now() - start;

  console.log(name + ": " + (elapsed / iterations).toFixed(4) + "ms per call");
  console.log("  Total: " + elapsed.toFixed(2) + "ms for " + iterations + " iterations");
  console.log("  Ops/sec: " + Math.round(iterations / (elapsed / 1000)));
}

// Usage
var data = Array.from({ length: 10000 }, function(_, i) { return i; });

timeFunction("Array.forEach", function() {
  var sum = 0;
  data.forEach(function(n) { sum += n; });
}, 10000);

timeFunction("for loop", function() {
  var sum = 0;
  for (var i = 0; i < data.length; i++) {
    sum += data[i];
  }
}, 10000);

CPU Profiling

When a function is slow but you do not know which part, CPU profiling shows where time is spent.

Using the Built-in Profiler

# Record a profile
node --prof app.js

# Process the generated log file
node --prof-process isolate-0x*.log > profile.txt

The output shows where CPU time was spent:

 [Summary]:
   ticks  total  nonlib   name
    234   45.2%   52.1%  JavaScript
    187   36.1%   41.6%  C++
     28    5.4%    6.2%  GC
     69   13.3%          Shared libraries

 [JavaScript]:
   ticks  total  nonlib   name
    112   21.6%   24.9%  processRequest
     67   12.9%   14.9%  parseJSON
     32    6.2%    7.1%  validateInput
     23    4.4%    5.1%  formatResponse

Programmatic Profiling with Inspector

// profile.js
var inspector = require("inspector");
var fs = require("fs");
var session = new inspector.Session();

session.connect();

function startProfiling() {
  session.post("Profiler.enable", function() {
    session.post("Profiler.start", function() {
      console.log("Profiling started");
    });
  });
}

function stopProfiling(filename) {
  session.post("Profiler.stop", function(err, result) {
    if (err) {
      console.error("Profiling error:", err);
      return;
    }

    fs.writeFileSync(filename, JSON.stringify(result.profile));
    console.log("Profile saved to " + filename);
  });
}

// Usage in tests
startProfiling();

// Run the code you want to profile
var iterations = 100000;
for (var i = 0; i < iterations; i++) {
  processData({ id: i, name: "test-" + i });
}

stopProfiling("cpu-profile.cpuprofile");
// Open cpu-profile.cpuprofile in Chrome DevTools

Profiling an HTTP Server

// profile-server.js
var http = require("http");
var inspector = require("inspector");
var fs = require("fs");

var session = new inspector.Session();
session.connect();

var server = http.createServer(function(req, res) {
  // Your application logic
  var result = processRequest(req);
  res.writeHead(200, { "Content-Type": "application/json" });
  res.end(JSON.stringify(result));
});

// Start profiling after server starts
server.listen(3000, function() {
  console.log("Server on port 3000");
  console.log("Starting profile — will stop in 30 seconds");

  session.post("Profiler.enable", function() {
    session.post("Profiler.start");
  });

  // Stop after 30 seconds of traffic
  setTimeout(function() {
    session.post("Profiler.stop", function(err, result) {
      fs.writeFileSync("server-profile.cpuprofile", JSON.stringify(result.profile));
      console.log("Profile saved");
      process.exit(0);
    });
  }, 30000);
});

Run the server, send traffic with a load testing tool, and analyze the resulting profile in Chrome DevTools (Performance tab > Load profile).

Memory Profiling

Detecting Memory Leaks

// memory-check.js
function getMemoryUsage() {
  var mem = process.memoryUsage();
  return {
    rss: Math.round(mem.rss / 1024 / 1024) + "MB",
    heapUsed: Math.round(mem.heapUsed / 1024 / 1024) + "MB",
    heapTotal: Math.round(mem.heapTotal / 1024 / 1024) + "MB",
    external: Math.round(mem.external / 1024 / 1024) + "MB"
  };
}

// Monitor memory over time
function monitorMemory(intervalMs, durationMs) {
  var readings = [];
  var start = Date.now();

  var timer = setInterval(function() {
    var elapsed = Math.round((Date.now() - start) / 1000);
    var usage = getMemoryUsage();
    readings.push({ elapsed: elapsed, usage: usage });
    console.log("t=" + elapsed + "s  heap=" + usage.heapUsed + "  rss=" + usage.rss);

    if (Date.now() - start > durationMs) {
      clearInterval(timer);
      analyzeReadings(readings);
    }
  }, intervalMs);
}

function analyzeReadings(readings) {
  var first = parseInt(readings[0].usage.heapUsed);
  var last = parseInt(readings[readings.length - 1].usage.heapUsed);
  var growth = last - first;

  console.log("\n--- Memory Analysis ---");
  console.log("Start: " + readings[0].usage.heapUsed);
  console.log("End: " + readings[readings.length - 1].usage.heapUsed);
  console.log("Growth: " + growth + "MB");

  if (growth > 50) {
    console.log("WARNING: Possible memory leak detected");
  } else {
    console.log("Memory usage appears stable");
  }
}

Common Memory Leak Patterns

// LEAK: Growing event listener list
var EventEmitter = require("events");
var emitter = new EventEmitter();

function handleRequest(req) {
  // This adds a new listener on EVERY request — never removed
  emitter.on("data", function(data) {
    processData(data, req);
  });
}

// FIX: Remove listener when done
function handleRequestFixed(req) {
  function onData(data) {
    processData(data, req);
    emitter.removeListener("data", onData);
  }
  emitter.on("data", onData);
}
// LEAK: Unbounded cache
var cache = {};

function getCachedUser(id) {
  if (!cache[id]) {
    cache[id] = db.findById("users", id);
  }
  return cache[id];
}
// cache grows forever — every unique user ID adds an entry

// FIX: Bounded cache with LRU eviction
function createBoundedCache(maxSize) {
  var cache = {};
  var keys = [];

  return {
    get: function(key) { return cache[key]; },
    set: function(key, value) {
      if (!cache[key]) {
        keys.push(key);
        if (keys.length > maxSize) {
          var evicted = keys.shift();
          delete cache[evicted];
        }
      }
      cache[key] = value;
    }
  };
}
// LEAK: Closures holding references
function createProcessor() {
  var results = []; // This array grows forever

  return function process(data) {
    var processed = transform(data);
    results.push(processed); // Never cleared
    return processed;
  };
}

// FIX: Limit stored results
function createProcessorFixed(maxResults) {
  var results = [];

  return {
    process: function(data) {
      var processed = transform(data);
      results.push(processed);
      if (results.length > maxResults) {
        results = results.slice(-maxResults);
      }
      return processed;
    },
    getResults: function() { return results.slice(); }
  };
}

Heap Snapshots

// heap-snapshot.js
var v8 = require("v8");
var fs = require("fs");

function takeHeapSnapshot(filename) {
  var snapshotStream = v8.writeHeapSnapshot(filename);
  console.log("Heap snapshot written to: " + snapshotStream);
  return snapshotStream;
}

// Take snapshots before and after an operation
takeHeapSnapshot("before.heapsnapshot");

// Run the suspected leaky operation
for (var i = 0; i < 100000; i++) {
  suspectedLeakyFunction();
}

// Force garbage collection (run with --expose-gc)
if (global.gc) global.gc();

takeHeapSnapshot("after.heapsnapshot");

// Compare snapshots in Chrome DevTools:
// Memory tab > Load > Select both snapshots > Comparison view

Response Time Testing

Testing Express Route Performance

// performance.test.js
var request = require("supertest");
var app = require("./app");

describe("Performance", function() {
  test("GET /api/users responds within 200ms", function() {
    var start = Date.now();

    return request(app)
      .get("/api/users")
      .expect(200)
      .then(function() {
        var elapsed = Date.now() - start;
        expect(elapsed).toBeLessThan(200);
      });
  });

  test("GET /api/search responds within 500ms", function() {
    var start = Date.now();

    return request(app)
      .get("/api/search?q=test")
      .expect(200)
      .then(function() {
        var elapsed = Date.now() - start;
        expect(elapsed).toBeLessThan(500);
      });
  });

  test("POST /api/users responds within 300ms", function() {
    var start = Date.now();

    return request(app)
      .post("/api/users")
      .send({ name: "Test", email: "[email protected]" })
      .expect(201)
      .then(function() {
        var elapsed = Date.now() - start;
        expect(elapsed).toBeLessThan(300);
      });
  });
});

Measuring Percentiles

// percentile-test.js
var request = require("supertest");
var app = require("./app");

function runRequests(endpoint, count, callback) {
  var times = [];
  var completed = 0;

  for (var i = 0; i < count; i++) {
    var start = Date.now();

    request(app)
      .get(endpoint)
      .end(function(err) {
        times.push(Date.now() - start);
        completed++;

        if (completed === count) {
          callback(null, analyzeLatencies(times));
        }
      });
  }
}

function analyzeLatencies(times) {
  times.sort(function(a, b) { return a - b; });

  return {
    min: times[0],
    max: times[times.length - 1],
    median: times[Math.floor(times.length * 0.5)],
    p95: times[Math.floor(times.length * 0.95)],
    p99: times[Math.floor(times.length * 0.99)],
    avg: Math.round(times.reduce(function(a, b) { return a + b; }, 0) / times.length)
  };
}

// Run the benchmark
runRequests("/api/users", 100, function(err, stats) {
  console.log("Latency Distribution:");
  console.log("  Min:    " + stats.min + "ms");
  console.log("  Median: " + stats.median + "ms");
  console.log("  p95:    " + stats.p95 + "ms");
  console.log("  p99:    " + stats.p99 + "ms");
  console.log("  Max:    " + stats.max + "ms");
  console.log("  Avg:    " + stats.avg + "ms");
});

Automated Performance Regression Testing

Performance Budget Configuration

// perf-budget.js
var BUDGETS = {
  "/api/users": { p95: 200, p99: 500 },
  "/api/search": { p95: 500, p99: 1000 },
  "/api/orders": { p95: 300, p99: 600 }
};

function checkBudget(endpoint, stats) {
  var budget = BUDGETS[endpoint];
  if (!budget) return { pass: true };

  var failures = [];

  if (stats.p95 > budget.p95) {
    failures.push("p95 " + stats.p95 + "ms exceeds budget " + budget.p95 + "ms");
  }

  if (stats.p99 > budget.p99) {
    failures.push("p99 " + stats.p99 + "ms exceeds budget " + budget.p99 + "ms");
  }

  return {
    pass: failures.length === 0,
    failures: failures
  };
}

module.exports = { BUDGETS: BUDGETS, checkBudget: checkBudget };

Jest Performance Tests

// perf.test.js
var request = require("supertest");
var app = require("./app");
var budget = require("./perf-budget");

function measureEndpoint(endpoint, count) {
  return new Promise(function(resolve) {
    var times = [];
    var completed = 0;

    for (var i = 0; i < count; i++) {
      (function() {
        var start = Date.now();
        request(app)
          .get(endpoint)
          .end(function() {
            times.push(Date.now() - start);
            completed++;
            if (completed === count) {
              times.sort(function(a, b) { return a - b; });
              resolve({
                p95: times[Math.floor(times.length * 0.95)],
                p99: times[Math.floor(times.length * 0.99)],
                median: times[Math.floor(times.length * 0.5)]
              });
            }
          });
      })();
    }
  });
}

describe("Performance Budgets", function() {
  var endpoints = Object.keys(budget.BUDGETS);

  endpoints.forEach(function(endpoint) {
    test(endpoint + " meets performance budget", function() {
      return measureEndpoint(endpoint, 50).then(function(stats) {
        var result = budget.checkBudget(endpoint, stats);
        if (!result.pass) {
          throw new Error(
            endpoint + " failed performance budget:\n" +
            result.failures.join("\n")
          );
        }
      });
    });
  });
});

Event Loop Monitoring

The event loop is Node.js's heartbeat. If it stalls, everything stalls.

// event-loop-monitor.js
function monitorEventLoop(thresholdMs) {
  var lastCheck = Date.now();

  setInterval(function() {
    var now = Date.now();
    var lag = now - lastCheck - 100; // Expected interval is 100ms
    lastCheck = now;

    if (lag > thresholdMs) {
      console.warn("Event loop lag: " + lag + "ms");
      console.warn("Stack:", new Error().stack);
    }
  }, 100);
}

// Start monitoring
monitorEventLoop(50); // Warn if lag exceeds 50ms

Testing Event Loop Impact

// event-loop.test.js
var performance = require("perf_hooks").performance;

function measureEventLoopLag(fn, callback) {
  var measurements = [];
  var count = 0;
  var maxCount = 20;

  function measure() {
    var expected = Date.now();

    setImmediate(function() {
      var actual = Date.now();
      measurements.push(actual - expected);
      count++;

      if (count < maxCount) {
        measure();
      } else {
        var avg = measurements.reduce(function(a, b) { return a + b; }, 0) / measurements.length;
        callback(avg);
      }
    });
  }

  // Run the function under test
  fn();

  // Start measuring
  measure();
}

// Usage in tests
test("data processing does not block event loop", function(done) {
  measureEventLoopLag(function() {
    // Function under test
    processLargeDataset(generateTestData(10000));
  }, function(avgLag) {
    expect(avgLag).toBeLessThan(10); // Less than 10ms average lag
    done();
  });
});

Complete Working Example: Performance Test Suite

// perfSuite.js
var http = require("http");
var performance = require("perf_hooks").performance;

function PerfSuite(baseUrl) {
  this.baseUrl = baseUrl;
  this.results = {};
}

PerfSuite.prototype.benchmark = function(name, options, callback) {
  var self = this;
  var count = options.requests || 100;
  var concurrent = options.concurrency || 10;
  var times = [];
  var errors = 0;
  var completed = 0;
  var active = 0;

  function makeRequest() {
    if (completed + active >= count) return;
    active++;

    var start = performance.now();

    http.get(self.baseUrl + options.path, function(res) {
      var data = "";
      res.on("data", function(chunk) { data += chunk; });
      res.on("end", function() {
        times.push(performance.now() - start);
        if (res.statusCode >= 400) errors++;
        active--;
        completed++;

        if (completed === count) {
          finish();
        } else {
          makeRequest();
        }
      });
    }).on("error", function() {
      errors++;
      active--;
      completed++;
      if (completed === count) finish();
      else makeRequest();
    });
  }

  function finish() {
    times.sort(function(a, b) { return a - b; });

    self.results[name] = {
      requests: count,
      errors: errors,
      min: times[0].toFixed(2),
      median: times[Math.floor(times.length * 0.5)].toFixed(2),
      p95: times[Math.floor(times.length * 0.95)].toFixed(2),
      p99: times[Math.floor(times.length * 0.99)].toFixed(2),
      max: times[times.length - 1].toFixed(2),
      rps: Math.round(count / (times.reduce(function(a, b) { return a + b; }, 0) / 1000))
    };

    callback(null, self.results[name]);
  }

  // Launch concurrent requests
  for (var i = 0; i < Math.min(concurrent, count); i++) {
    makeRequest();
  }
};

PerfSuite.prototype.report = function() {
  console.log("\n=== Performance Report ===\n");

  var names = Object.keys(this.results);
  for (var i = 0; i < names.length; i++) {
    var name = names[i];
    var r = this.results[name];
    console.log(name + ":");
    console.log("  Requests:  " + r.requests + " (" + r.errors + " errors)");
    console.log("  Min:       " + r.min + "ms");
    console.log("  Median:    " + r.median + "ms");
    console.log("  p95:       " + r.p95 + "ms");
    console.log("  p99:       " + r.p99 + "ms");
    console.log("  Max:       " + r.max + "ms");
    console.log("  Throughput:" + r.rps + " req/sec");
    console.log("");
  }
};

// Usage
var suite = new PerfSuite("http://localhost:3000");

suite.benchmark("Homepage", { path: "/", requests: 200, concurrency: 20 }, function() {
  suite.benchmark("API Users", { path: "/api/users", requests: 200, concurrency: 20 }, function() {
    suite.benchmark("API Search", { path: "/api/search?q=test", requests: 200, concurrency: 20 }, function() {
      suite.report();
    });
  });
});

Common Issues and Troubleshooting

Benchmark results vary wildly between runs

V8's JIT compiler optimizes code paths differently between runs, and background processes affect timing:

Fix: Run benchmarks multiple times and compare medians, not individual runs. Close other applications. Use --predictable flag for V8 to reduce JIT variance. Warm up functions before measuring.

Memory usage grows during tests but is not a leak

V8 garbage collection runs at unpredictable intervals. Memory growth between GC cycles is normal:

Fix: Call global.gc() (requires --expose-gc flag) before measuring. Take multiple measurements over minutes, not seconds. True leaks show continuous growth; normal behavior shows sawtooth patterns.

Performance tests pass locally but fail in CI

CI runners have different CPU, memory, and I/O characteristics than development machines:

Fix: Set separate budgets for CI environments. Use relative comparisons (this commit vs previous) instead of absolute thresholds. Run performance tests on dedicated CI runners with consistent resources.

Event loop lag spikes during database operations

Synchronous operations or large result sets block the event loop:

Fix: Use streaming for large query results. Break CPU-intensive work into chunks with setImmediate(). Use worker threads for computation-heavy operations. Profile to identify the specific blocking operation.

Best Practices

  • Test with realistic data volumes. Performance testing with 10 records tells you nothing about production behavior with 10,000 records. Generate test data that matches production scale.
  • Measure percentiles, not averages. Average response time hides tail latency. A p99 of 2 seconds means 1 in 100 users waits over 2 seconds — that matters more than a 50ms average.
  • Profile before optimizing. Never guess where the bottleneck is. Profile, find the hot path, and optimize that specific code. Optimizing code that runs 1% of the time is wasted effort.
  • Set performance budgets and enforce them in CI. Without automated checks, performance regressions accumulate silently until users complain.
  • Monitor memory over time, not in snapshots. A single memory reading means nothing. Track heap usage over minutes or hours to distinguish normal GC patterns from actual leaks.
  • Warm up before benchmarking. V8's JIT compiler optimizes hot code paths. The first few executions are always slower. Run the function 100+ times before measuring.
  • Test under realistic concurrency. A single-request benchmark tells you latency but not throughput. Test with the concurrent load your application actually receives.
  • Keep performance tests separate from unit tests. Performance tests are slower and more sensitive to environment. Run them in a dedicated CI step, not alongside unit tests.

References

Powered by Contentful