diff --git a/packages/bench-trial/README.md b/packages/bench-trial/README.md new file mode 100644 index 00000000..d4ac74d6 --- /dev/null +++ b/packages/bench-trial/README.md @@ -0,0 +1,94 @@ +# Bench trial + +> Runs one or multiple benchmark tests + +## Install + +```bash +npm install -g bench-trial +``` + +## Usage + +```bash +bench-trial benchmarks/map-helper-vs-entity.js -i 5 +``` + +## TL;DR + +Runs one (or more) BenchmarkJs test multiple times enough to get less ambiguous results, includes basic testing to make sure tests are reliable. + +## Why? + +While running [benchmarkjs](https://benchmarkjs.com) to compare different versions of code I found out a couple of things: + +- **Ambiguous results**: I noticed that the same benchmark tests were returning different results every time they executed. If they were re-run consecutively, I would get more operations per second on each benchmark. I believe the reason may be related to the v8 engine warming up and optimizing the code the more it ran, since if I let some time to "cool off" the operations per second for each test would decrease. These ambiguous results meant having to repeat tests to ensure some consistency. +- **Unreliable execution**: Occasionally I made changes to the benchmarked code and would overlook that it was not executing correctly, further compounding the issue of making the results unreliable. + +## Solution + +- **Consistency**: By running benchmark tests more than once, we can get median and average results and get a bigger picture with less fluctuation. Because the tests will run multiple times in succession, the code will get optimized by the engine, and we can use the median time as a more consistent and stable metric. + +- **Reliable execution**: By running simple assertion tests on each suite before the actual benchmark runs, we can be sure our tests are executing correctly. + +## API + +```bash +bench-trial [-i ] [-s] +``` + +- `-i --iterations ` iterations default to 10 iterations if not provided. +- `-s --skip-tests` if provided, it will skip the assertion tests. + +### Writing your benchmark suites + +The file provided to **bench-trial** should export an `array` of test suites, each test suite is an object in the form of: + +``` +{ + name: string, + test: function, + benchmark: function +} +``` + +| Property | Type | Description | +|:---|:---|:---| +| *name* | `String` | Name that describes the test you are running | +| *test* | `function` | function to run assertion test against the result of the code you want to benchmark | +| *benchmark* | `function` | function to pass to benchmarkjs Suite that actually runs the benchmark | + +#### Sync vs Async + +- Synchronous methods are simple methods that expect a return value. +- Asynchronous methods are a bit different to benchmarkjs async methods, bench-trial expects async methods to follow the [error-first callbacks](https://nodejs.org/api/errors.html#errors_error_first_callbacks). + +#### Testing + +bench-trial provides a convenience method that accepts the function to execute and a value to check against the result of the code you are testing. It takes care of async vs async depending on how you set the `async` flag. + +```js +test(test:function, value:*) +``` + +To write your manual test see the manual test example below + +## Examples + +- Test synchronous code [example](examples/array-iteration.js) +- Test asynchronous code [example](examples/async-example.js) +- Write manual test sync/asynchronous code [example](examples/manual-tests.js) + +## Acknowledgements + +This tool is a wrapper of [benchmarkjs](https://benchmarkjs.com), so all credit related to benchmarking itself really goes to them. + +Thanks to [Paul Molluzzo](https://github.com/paulmolluzzo) for coming up with the name **bench-trial**! + +## Contributing + +Please read [CONTRIBUTING.md](https://github.com/ViacomInc/data-point/blob/master/CONTRIBUTING.md) for details on our code of conduct, and the process for submitting pull requests to us. + +## License + +This project is licensed under the Apache License Version 2.0 - see the [LICENSE](LICENSE) file for details diff --git a/packages/bench-trial/examples/array-iteration.js b/packages/bench-trial/examples/array-iteration.js new file mode 100644 index 00000000..8453546d --- /dev/null +++ b/packages/bench-trial/examples/array-iteration.js @@ -0,0 +1,43 @@ +const { test } = require('bench-trial') + +const array = Array(100).fill('foo') +const expected = array.join('').length + +function forLoop () { + let result = '' + for (let index = 0; index < array.length; index++) { + result = result + array[index] + } + + const length = result.length + result = '' + return length +} + +function whileLoop () { + let result = '' + let index = 0 + while (index !== array.length) { + result = result + array[index] + index++ + } + + const length = result.length + result = '' + return length +} + +module.exports = [ + { + async: false, + name: 'while-loop', + test: test(whileLoop, expected), + benchmark: whileLoop + }, + { + async: false, + name: 'for-loop', + test: test(forLoop, expected), + benchmark: forLoop + } +] diff --git a/packages/bench-trial/examples/async-example.js b/packages/bench-trial/examples/async-example.js new file mode 100644 index 00000000..70d7dfa1 --- /dev/null +++ b/packages/bench-trial/examples/async-example.js @@ -0,0 +1,30 @@ +const { test } = require('bench-trial') + +const expected = true + +function testPromise (done) { + Promise.resolve(true).then(() => { + done(null, true) + }) +} + +function testSetTimeOut (done) { + setTimeout(() => { + done(null, true) + }, 0) +} + +module.exports = [ + { + async: true, + name: 'promise', + test: test(testPromise, expected), + benchmark: testPromise + }, + { + async: true, + name: 'timeout', + test: test(testSetTimeOut, expected), + benchmark: testSetTimeOut + } +] diff --git a/packages/bench-trial/examples/manual-tests.js b/packages/bench-trial/examples/manual-tests.js new file mode 100644 index 00000000..7e76f211 --- /dev/null +++ b/packages/bench-trial/examples/manual-tests.js @@ -0,0 +1,33 @@ +const assert = require('assert') + +const expected = 2 + +function addsNumbersSync () { + return 1 + 1 +} + +function addsNumbersAsync (done) { + Promise.resolve().then(() => { + done(null, 1 + 1) + }) +} + +module.exports = [ + { + async: false, + name: 'addsNumbersSync', + test: () => assert.deepEqual(addsNumbersSync(), expected), + benchmark: addsNumbersSync + }, + { + async: true, + name: 'addsNumbersAsync', + test: done => { + addsNumbersAsync((e, val) => { + assert.deepEqual(val, expected) + done(null) + }) + }, + benchmark: addsNumbersAsync + } +] diff --git a/packages/bench-trial/lib/index.js b/packages/bench-trial/lib/index.js new file mode 100644 index 00000000..7e17dedf --- /dev/null +++ b/packages/bench-trial/lib/index.js @@ -0,0 +1,54 @@ +const assert = require('assert') + +function testSync (method, expected) { + return () => assert.deepEqual(method(), expected) +} + +function testAsync (method, expected) { + return done => { + return method((err, value) => { + if (err) { + return done(err) + } + try { + assert.deepEqual(value, expected) + done(null, value) + } catch (e) { + done(e) + } + }) + } +} + +function test (method, expected) { + return { + test: method, + expected + } +} + +test.sync = testSync +test.async = testAsync + +function benchmarkSync (method) { + return method +} + +function benchmarkAsync (method) { + return deferred => { + return method((err, value) => { + if (err) { + throw err + } + deferred.resolve() + }) + } +} + +module.exports = { + test, + benchmark: { + sync: benchmarkSync, + async: benchmarkAsync + } +} diff --git a/packages/bench-trial/package.json b/packages/bench-trial/package.json new file mode 100644 index 00000000..1f2a4f1d --- /dev/null +++ b/packages/bench-trial/package.json @@ -0,0 +1,22 @@ +{ + "name": "bench-trial", + "version": "2.0.0", + "description": "Runs one or multiple benchmark tests", + "main": "./lib/index.js", + "license": "Apache-2.0", + "engines": { + "node": ">=6" + }, + "bin": { + "bench-trial": "runner.js" + }, + "author": { + "name": "Acatl Pacheco", + "email": "acatl.pacheco@viacom.com" + }, + "dependencies": { + "benchmark": "^2.1.4", + "commander": "^2.13.0", + "chalk": "^2.3.0" + } +} diff --git a/packages/bench-trial/runner.js b/packages/bench-trial/runner.js new file mode 100755 index 00000000..0c5f2289 --- /dev/null +++ b/packages/bench-trial/runner.js @@ -0,0 +1,333 @@ +#!/usr/bin/env node --expose-gc + +const path = require('path') +const program = require('commander') +const Benchmark = require('benchmark') +const Promise = require('bluebird') +const chalk = require('chalk') + +const pkg = require('./package.json') +const lib = require('./lib') + +program + .version(pkg.version) + .usage('') + .option( + '-i, --iterations [count]', + 'Number of iterations, defaults to 10', + parseInt + ) + .option('-s, --skip-tests', 'skip tests') + +program.parse(process.argv) + +const filePath = program.args[0] + +const testSuite = require(path.resolve(filePath)) + +const suites = Array.isArray(testSuite) ? testSuite : [testSuite] + +const iterations = program.iterations || 10 + +function start (suites) { + console.log( + 'Running %s suite(s) with %s iterations each\n', + chalk.yellow(suites.length), + chalk.yellow(iterations) + ) + return runTests(suites) + .then(runBenchmarks) + .then(reportFinal) + .catch(err => { + console.error(chalk.red('\nFailed to run benchmark\n')) + console.log(err.stack) + process.exit(1) + }) +} + +function runTest (suite) { + const isAsync = !!suite.async + + if (typeof suite.test === 'function') { + if (isAsync) { + return Promise.fromCallback(suite.test) + } + + return suite.test() + } + + if (typeof suite.test.test === 'function') { + const { test, expected } = suite.test + + if (isAsync) { + return Promise.fromCallback(lib.test.async(test, expected)) + } + + return lib.test.sync(test, expected)() + } + + throw new Error('Test was not provided or has invalid form') +} + +function runTests (suites) { + if (program.skipTests) { + console.warn('%s Tests are skiped\n', chalk.yellow('WARNING:')) + return Promise.resolve(suites) + } + + console.log(chalk.white.bold('Test suite(s):')) + return Promise.each(suites, suite => { + return Promise.resolve(suite) + .then(runTest) + .then(() => { + console.log(' %s %s', chalk.green(' ✔ '), suite.name) + }) + .catch(err => { + console.error( + '%s %s Error: %s', + chalk.red(' ✕ '), + suite.name, + chalk.red(err.toString()) + ) + throw err + }) + }).return(suites) +} + +function runBenchmarks (suites) { + return Promise.reduce( + suites, + (acc, suite) => { + return runGC(suite) + .then(suite => { + suite.memoryBefore = process.memoryUsage().heapUsed + return runBenchmark(suite) + }) + .then(suite => { + suite.memoryAfter = process.memoryUsage().heapUsed + suite.memoryEfficiency = suite.memoryAfter - suite.memoryBefore + return suite + }) + .then(suite => { + acc.push(suite) + return acc + }) + }, + [] + ) +} + +function reportFinal (suites) { + console.log(chalk.white.bold('\nReport:')) + + if (suites.length === 2) { + reportFasterOpsperSec(suites) + } + + if (suites.length > 2) { + listBySpeed(suites) + } + + const hzSet = suites.map(suite => suite.median) + console.log( + '\n Total number of operations per second: %s', + chalk.yellow(fnumber(sum(hzSet)) + 'Hz') + ) + + return suites +} + +function listBySpeed (suites) { + console.log(chalk.white.bold('\n Fastest (median ops/sec):')) + const sorted = suites.sort((a, b) => b.median - a.median) + sorted.forEach((suite, index) => { + const name = index === 0 ? chalk.yellow(suite.name) : chalk.bold(suite.name) + + console.log(' %s: %s', name, chalk.yellow(fnumber(suite.median) + 'Hz')) + }) +} + +function reportFasterOpsperSec (suites) { + const sorted = suites.sort((a, b) => b.median - a.median) + const first = sorted[0] + const second = sorted[1] + + const diffMedian = (first.median - second.median) / first.median * 100 + + console.log( + ` Speed: %s was faster by %s (%s vs %s)`, + chalk.yellow(first.name), + chalk.white.bold(diffMedian.toFixed(2) + '%'), + chalk.yellow(fnumber(first.median) + 'Hz'), + chalk.yellow(fnumber(second.median) + 'Hz') + ) +} + +function runBenchmark (suiteBenchmark) { + return new Promise((resolve, reject) => { + const suite = new Benchmark.Suite() + + const asyncLabel = suiteBenchmark.async ? 'ASYNC' : 'SYNC' + console.log( + '\n%s %s [%s]\n', + chalk.white.bold('Benchmarking:'), + chalk.bold(suiteBenchmark.name), + asyncLabel + ) + + const isAsync = !!suiteBenchmark.async + const benchmarkMethod = + isAsync === true + ? lib.benchmark.async(suiteBenchmark.benchmark) + : suiteBenchmark.benchmark + + for (let index = 0; index < iterations; index++) { + suite.add(`${index + 1} ${suiteBenchmark.name}`, { + defer: isAsync, + fn: benchmarkMethod + }) + } + + // add listeners + suite + .on('cycle', function (event) { + console.log('', String(event.target)) + }) + .on('error', reject) + .on('complete', function () { + const benchmarks = Array.from(this) + const hzSet = benchmarks + .map(benchmark => benchmark.hz) + .sort((a, b) => a - b) + const hzSum = sum(hzSet) + + const average = hzSum / hzSet.length + const median = middle(hzSet) + + console.log( + '\n Ran %s (%s times) with an average of %s ops/sec', + chalk.yellow(suiteBenchmark.name), + chalk.yellow(iterations), + chalk.yellow(fnumber(average)) + ) + console.log(' Fastest: %s ops/sec', fnumber(Math.max(...hzSet))) + console.log(' Average: %s ops/sec', chalk.bold(fnumber(average))) + console.log(' Median : %s ops/sec', chalk.white.bold(fnumber(median))) + console.log(' Slowest: %s ops/sec', fnumber(Math.min(...hzSet))) + + resolve( + Object.assign({}, suiteBenchmark, { + average, + median + }) + ) + }) + // run async + .run({ async: true }) + }) +} + +function fnumber (x) { + return Math.floor(x) + .toString() + .replace(/\B(?=(\d{3})+(?!\d))/g, ',') +} + +function sum (values) { + return values.reduce((acc, val) => acc + val) +} + +function middle (values) { + const len = values.length + const half = Math.floor(len / 2) + + if (len % 2) { + return (values[half - 1] + values[half]) / 2.0 + } else { + return values[half] + } +} + +function bytesToKb (bytes) { + return Math.round(bytes / 1024 * 100) / 100 +} + +function runGC (val) { + return Promise.resolve(val).then(r => { + global.gc() + return r + }) +} + +function listByMemoryEfficiency (suites) { + console.log(chalk.white.bold('\nMore memory efficient:')) + const sorted = suites.sort((a, b) => a.memoryEfficiency - b.memoryEfficiency) + sorted.forEach((suite, index) => { + const name = index === 0 ? chalk.yellow(suite.name) : chalk.bold(suite.name) + + console.log( + ' %s: %s', + name, + chalk.yellow(fnumber(bytesToKb(suite.memoryEfficiency)) + 'Kb') + ) + }) +} + +function reportSuiteMemory (suite) { + const { memoryBefore, memoryAfter } = suite + console.log( + ' Memory: not freed %s (before %s after %s)', + chalk.red.bold(fnumber(bytesToKb(memoryAfter - memoryBefore)) + 'Kb'), + chalk.white.bold(fnumber(bytesToKb(memoryBefore)) + 'Kb'), + chalk.white.bold(fnumber(bytesToKb(memoryAfter)) + 'Kb') + ) + return suite +} + +function reportMemoryEfficincy (suites) { + const sortedSuites = suites.sort( + (a, b) => a.memoryEfficiency - b.memoryEfficiency + ) + const first = sortedSuites[0] + const second = sortedSuites[1] + + const diffMemory = + (second.memoryEfficiency - first.memoryEfficiency) / + second.memoryEfficiency * + 100 + + console.log( + ` Memory: %s was more memory efficient by %s (%s vs %s)`, + chalk.yellow(first.name), + chalk.white.bold(diffMemory.toFixed(2) + '%'), + chalk.yellow(fnumber(bytesToKb(first.memoryEfficiency)) + 'Kb'), + chalk.yellow(fnumber(bytesToKb(second.memoryEfficiency)) + 'Kb') + ) +} + +function unhandledError (err) { + console.log('Failed Tests: ' + err.stack) +} + +process.on('unhandledRejection', unhandledError) +process.on('uncaughtException', unhandledError) + +start(suites) + +module.exports = { + start, + runTests, + runBenchmarks, + reportFinal, + listBySpeed, + reportFasterOpsperSec, + runBenchmark, + fnumber, + sum, + middle, + bytesToKb, + runGC, + listByMemoryEfficiency, + reportSuiteMemory, + reportMemoryEfficincy +} diff --git a/yarn.lock b/yarn.lock index 739b992c..e1b714d8 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1120,7 +1120,7 @@ bcrypt-pbkdf@^1.0.0: dependencies: tweetnacl "^0.14.3" -benchmark@2.1.4: +benchmark@2.1.4, benchmark@^2.1.4: version "2.1.4" resolved "https://registry.yarnpkg.com/benchmark/-/benchmark-2.1.4.tgz#09f3de31c916425d498cc2ee565a0ebf3c2a5629" dependencies: @@ -1441,7 +1441,7 @@ commander@^2.11.0, commander@^2.9.0: version "2.12.2" resolved "https://registry.yarnpkg.com/commander/-/commander-2.12.2.tgz#0f5946c427ed9ec0d91a46bb9def53e54650e555" -commander@^2.5.0: +commander@^2.13.0, commander@^2.5.0: version "2.13.0" resolved "https://registry.yarnpkg.com/commander/-/commander-2.13.0.tgz#6964bca67685df7c1f1430c584f07d7597885b9c"