From 6f45729715e6c9f70f7b671d44518f340f18005b Mon Sep 17 00:00:00 2001 From: Acatl Pacheco Date: Mon, 29 Jan 2018 16:46:42 -0500 Subject: [PATCH 1/8] feat(bench-trail): new package to benchmark performance --- packages/bench-trail/README.md | 92 +++++ .../bench-trail/examples/array-iteration.js | 43 +++ .../bench-trail/examples/async-example.js | 30 ++ packages/bench-trail/examples/manual-tests.js | 33 ++ packages/bench-trail/lib/index.js | 54 +++ packages/bench-trail/package.json | 22 ++ packages/bench-trail/runner.js | 333 ++++++++++++++++++ 7 files changed, 607 insertions(+) create mode 100644 packages/bench-trail/README.md create mode 100644 packages/bench-trail/examples/array-iteration.js create mode 100644 packages/bench-trail/examples/async-example.js create mode 100644 packages/bench-trail/examples/manual-tests.js create mode 100644 packages/bench-trail/lib/index.js create mode 100644 packages/bench-trail/package.json create mode 100755 packages/bench-trail/runner.js diff --git a/packages/bench-trail/README.md b/packages/bench-trail/README.md new file mode 100644 index 00000000..72c6bb5d --- /dev/null +++ b/packages/bench-trail/README.md @@ -0,0 +1,92 @@ +# Bench Trail + +> Runs one or multiple benchmark tests + +## Install + +```bash +npm install -g bench-trail +``` + +## Usage + +```bash +bench-trail benchmarks/map-helper-vs-entity.js -i 5 +``` + +## TL;DR + +Runs one (or more) BenchmarkJs test multiple times enough to get less ambiguous results, includes basic testing to make sure tests are reliable. + +## Why? + +While running [benchmarkjs](https://benchmarkjs.com) to compare different versions of code I found out a couple of things: + +- **Ambiguous results**: tests would throw different results every time I ran the test, specially if the difference was minimal. If I ran the same test multiple times the numbers changed, as time went by I would get more and more operations per second on each benchmark. This would only happen if I ran the tests consecutively, the reason of this might be related to the v8 engine warming up and optimizing the code the more I ran it. If I let some time to cool off, the tests would go back down on operations per second. +- **Reliable Execution**: more than once I made changes on the code being tested and never did I notice the change I had made was not even executing correctly. So the results I was getting were really unreliable. + +## Solution + +- **Ambiguous results**: Run benchmark more than once to get median and average results, because the test will run multiple times the code will get optimized, using the median we can get more reliable results. +- **Reliable Execution**: Run a simple assertion tests on each suite before the actual benchmark runs, this helps us make sure our test are executing correctly. + +## API + +```bash +bench-trail [-i ] [-s] +``` + +- `-i --iterations ` iterations default to 10 iterations if not provided. +- `-s --skip-tests` if provided, it will skip the assertion tests. + +### Writing your benchmark suites + +The file you provide to bench-trail should export an `array` of suites, each suite is an object in the form of: + +``` +{ + name: string, + test: function, + benchmark: function +} +``` + +| Property | Type | Description | +|:---|:---|:---| +| *name* | `String` | Name that describes the test you are running | +| *test* | `function` | function to run assertion test against the result of the code you want to benchmark | +| *benchmark* | `function` | function to pass to benchmarkjs Suite that actually runs the benchmark | + +#### Sync vs Async + +- Synchronous methods are simple methods that expect a return value. +- Asynchronous methods are a bit different to benchmarkjs async methods, bench-trail expects async methods to follow the [error-first callbacks](https://nodejs.org/api/errors.html#errors_error_first_callbacks). + +#### Testing + +bench-trail provides a convenience method that accepts the function to execute and a value to check against the result of the code you are testing. It takes care of async vs async depending on how you set the `async` flag. + +```js +test(test:function, value:*) +``` + +to write your manual test see the manual test example below + +## Examples + +Test synchronous code [example](examples/array-iteration.js) +Test asynchronous code [example](examples/async-example.js) +Write manual test sync/asynchronous code [example](examples/manual-tests.js) + + +## Acknowledgements + +This tool is only a wrapper of [benchmarkjs](https://benchmarkjs.com), so the credit really goes to them. + +## Contributing + +Please read [CONTRIBUTING.md](https://github.com/ViacomInc/data-point/blob/master/CONTRIBUTING.md) for details on our code of conduct, and the process for submitting pull requests to us. + +## License + +This project is licensed under the Apache License Version 2.0 - see the [LICENSE](LICENSE) file for details diff --git a/packages/bench-trail/examples/array-iteration.js b/packages/bench-trail/examples/array-iteration.js new file mode 100644 index 00000000..0eb1ef3d --- /dev/null +++ b/packages/bench-trail/examples/array-iteration.js @@ -0,0 +1,43 @@ +const { test } = require('bench-trail') + +const array = Array(100).fill('foo') +const expected = array.join('').length + +function forLoop () { + let result = '' + for (let index = 0; index < array.length; index++) { + result = result + array[index] + } + + const length = result.length + result = '' + return length +} + +function whileLoop () { + let result = '' + let index = 0 + while (index !== array.length) { + result = result + array[index] + index++ + } + + const length = result.length + result = '' + return length +} + +module.exports = [ + { + async: false, + name: 'while-loop', + test: test(whileLoop, expected), + benchmark: whileLoop + }, + { + async: false, + name: 'for-loop', + test: test(forLoop, expected), + benchmark: forLoop + } +] diff --git a/packages/bench-trail/examples/async-example.js b/packages/bench-trail/examples/async-example.js new file mode 100644 index 00000000..88e270e8 --- /dev/null +++ b/packages/bench-trail/examples/async-example.js @@ -0,0 +1,30 @@ +const { test } = require('bench-trail') + +const expected = true + +function testPromise (done) { + Promise.resolve(true).then(() => { + done(null, true) + }) +} + +function testSetTimeOut (done) { + setTimeout(() => { + done(null, true) + }, 0) +} + +module.exports = [ + { + async: true, + name: 'promise', + test: test(testPromise, expected), + benchmark: testPromise + }, + { + async: true, + name: 'timeout', + test: test(testSetTimeOut, expected), + benchmark: testSetTimeOut + } +] diff --git a/packages/bench-trail/examples/manual-tests.js b/packages/bench-trail/examples/manual-tests.js new file mode 100644 index 00000000..7e76f211 --- /dev/null +++ b/packages/bench-trail/examples/manual-tests.js @@ -0,0 +1,33 @@ +const assert = require('assert') + +const expected = 2 + +function addsNumbersSync () { + return 1 + 1 +} + +function addsNumbersAsync (done) { + Promise.resolve().then(() => { + done(null, 1 + 1) + }) +} + +module.exports = [ + { + async: false, + name: 'addsNumbersSync', + test: () => assert.deepEqual(addsNumbersSync(), expected), + benchmark: addsNumbersSync + }, + { + async: true, + name: 'addsNumbersAsync', + test: done => { + addsNumbersAsync((e, val) => { + assert.deepEqual(val, expected) + done(null) + }) + }, + benchmark: addsNumbersAsync + } +] diff --git a/packages/bench-trail/lib/index.js b/packages/bench-trail/lib/index.js new file mode 100644 index 00000000..7e17dedf --- /dev/null +++ b/packages/bench-trail/lib/index.js @@ -0,0 +1,54 @@ +const assert = require('assert') + +function testSync (method, expected) { + return () => assert.deepEqual(method(), expected) +} + +function testAsync (method, expected) { + return done => { + return method((err, value) => { + if (err) { + return done(err) + } + try { + assert.deepEqual(value, expected) + done(null, value) + } catch (e) { + done(e) + } + }) + } +} + +function test (method, expected) { + return { + test: method, + expected + } +} + +test.sync = testSync +test.async = testAsync + +function benchmarkSync (method) { + return method +} + +function benchmarkAsync (method) { + return deferred => { + return method((err, value) => { + if (err) { + throw err + } + deferred.resolve() + }) + } +} + +module.exports = { + test, + benchmark: { + sync: benchmarkSync, + async: benchmarkAsync + } +} diff --git a/packages/bench-trail/package.json b/packages/bench-trail/package.json new file mode 100644 index 00000000..1eed5923 --- /dev/null +++ b/packages/bench-trail/package.json @@ -0,0 +1,22 @@ +{ + "name": "bench-trail", + "version": "2.0.0", + "description": "Runs one or multiple benchmark tests", + "main": "./lib/index.js", + "license": "Apache-2.0", + "engines": { + "node": ">=6" + }, + "bin": { + "bench-trail": "runner.js" + }, + "author": { + "name": "Acatl Pacheco", + "email": "acatl.pacheco@viacom.com" + }, + "devDependencies": { + "benchmark": "2.1.4", + "commander": "2.x", + "chalk": "2.x" + } +} diff --git a/packages/bench-trail/runner.js b/packages/bench-trail/runner.js new file mode 100755 index 00000000..affda5e8 --- /dev/null +++ b/packages/bench-trail/runner.js @@ -0,0 +1,333 @@ +#!/usr/bin/env node --expose-gc + +const path = require('path') +const program = require('commander') +const Benchmark = require('benchmark') +const Promise = require('bluebird') +const chalk = require('chalk') + +const pkg = require('./package.json') +const lib = require('./lib') + +program + .version(pkg.version) + .usage('') + .option( + '-i, --iterations [count]', + 'Number of iterations, defaults to 10', + parseInt + ) + .option('-s, --skip-tests', 'skip tests') + +program.parse(process.argv) + +const filePath = program.args[0] + +const testSuite = require(path.resolve(filePath)) + +const suites = Array.isArray(testSuite) ? testSuite : [testSuite] + +const iterations = program.iterations || 10 + +function start (suites) { + console.log( + 'Running %s suite(s) with %s iterations each\n', + chalk.yellow(suites.length), + chalk.yellow(iterations) + ) + return runTests(suites) + .then(runBenchmarks) + .then(reportFinal) + .catch(err => { + console.error(chalk.red('\nFailed to run benchmark\n')) + console.log(err.stack) + process.exit(1) + }) +} + +function runTest (suite) { + const isASync = !!suite.async + + if (typeof suite.test === 'function') { + if (isASync) { + return Promise.fromCallback(suite.test) + } + + return suite.test() + } + + if (typeof suite.test.test === 'function') { + const { test, expected } = suite.test + + if (isASync) { + return Promise.fromCallback(lib.test.async(test, expected)) + } + + return lib.test.sync(test, expected)() + } + + throw new Error('Test was not provided or has invalid form') +} + +function runTests (suites) { + if (program.skipTests) { + console.warn('%s Tests are skiped\n', chalk.yellow('WARNING:')) + return Promise.resolve(suites) + } + + console.log(chalk.white.bold('Test suite(s):')) + return Promise.each(suites, suite => { + return Promise.resolve(suite) + .then(runTest) + .then(() => { + console.log(' %s %s', chalk.green(' ✔ '), suite.name) + }) + .catch(err => { + console.error( + '%s %s Error: %s', + chalk.red(' ✕ '), + suite.name, + chalk.red(err.toString()) + ) + throw err + }) + }).return(suites) +} + +function runBenchmarks (suites) { + return Promise.reduce( + suites, + (acc, suite) => { + return runGC(suite) + .then(suite => { + suite.memoryBefore = process.memoryUsage().heapUsed + return runBenchmark(suite) + }) + .then(suite => { + suite.memoryAfter = process.memoryUsage().heapUsed + suite.memoryEfficiency = suite.memoryAfter - suite.memoryBefore + return suite + }) + .then(suite => { + acc.push(suite) + return acc + }) + }, + [] + ) +} + +function reportFinal (suites) { + console.log(chalk.white.bold('\nReport:')) + + if (suites.length === 2) { + reportFasterOpsperSec(suites) + } + + if (suites.length > 2) { + listBySpeed(suites) + } + + const hzSet = suites.map(suite => suite.median) + console.log( + '\n Total number of operations per second: %s', + chalk.yellow(fnumber(sum(hzSet)) + 'Hz') + ) + + return suites +} + +function listBySpeed (suites) { + console.log(chalk.white.bold('\n Fastest (median ops/sec):')) + const sorted = suites.sort((a, b) => b.median - a.median) + sorted.forEach((suite, index) => { + const name = index === 0 ? chalk.yellow(suite.name) : chalk.bold(suite.name) + + console.log(' %s: %s', name, chalk.yellow(fnumber(suite.median) + 'Hz')) + }) +} + +function reportFasterOpsperSec (suites) { + const sorted = suites.sort((a, b) => b.median - a.median) + const first = sorted[0] + const second = sorted[1] + + const diffMedian = (first.median - second.median) / first.median * 100 + + console.log( + ` Speed: %s was faster by %s (%s vs %s)`, + chalk.yellow(first.name), + chalk.white.bold(diffMedian.toFixed(2) + '%'), + chalk.yellow(fnumber(first.median) + 'Hz'), + chalk.yellow(fnumber(second.median) + 'Hz') + ) +} + +function runBenchmark (suiteBenchmark) { + return new Promise((resolve, reject) => { + const suite = new Benchmark.Suite() + + const asyncLabel = suiteBenchmark.async ? 'ASYNC' : 'SYNC' + console.log( + '\n%s %s [%s]\n', + chalk.white.bold('Benchmarking:'), + chalk.bold(suiteBenchmark.name), + asyncLabel + ) + + const isASync = !!suiteBenchmark.async + const benchmarkMethod = + isASync === true + ? lib.benchmark.async(suiteBenchmark.benchmark) + : suiteBenchmark.benchmark + + for (let index = 0; index < iterations; index++) { + suite.add(`${index + 1} ${suiteBenchmark.name}`, { + defer: isASync, + fn: benchmarkMethod + }) + } + + // add listeners + suite + .on('cycle', function (event) { + console.log('', String(event.target)) + }) + .on('error', reject) + .on('complete', function () { + const benchmarks = Array.from(this) + const hzSet = benchmarks + .map(benchmark => benchmark.hz) + .sort((a, b) => a - b) + const hzSum = sum(hzSet) + + const average = hzSum / hzSet.length + const median = middle(hzSet) + + console.log( + '\n Ran %s (%s times) with an average of %s ops/sec', + chalk.yellow(suiteBenchmark.name), + chalk.yellow(iterations), + chalk.yellow(fnumber(average)) + ) + console.log(' Fastest: %s ops/sec', fnumber(Math.max(...hzSet))) + console.log(' Average: %s ops/sec', chalk.bold(fnumber(average))) + console.log(' Median : %s ops/sec', chalk.white.bold(fnumber(median))) + console.log(' Slowest: %s ops/sec', fnumber(Math.min(...hzSet))) + + resolve( + Object.assign({}, suiteBenchmark, { + average, + median + }) + ) + }) + // run async + .run({ async: true }) + }) +} + +function fnumber (x) { + return Math.floor(x) + .toString() + .replace(/\B(?=(\d{3})+(?!\d))/g, ',') +} + +function sum (values) { + return values.reduce((acc, val) => acc + val) +} + +function middle (values) { + var len = values.length + var half = Math.floor(len / 2) + + if (len % 2) { + return (values[half - 1] + values[half]) / 2.0 + } else { + return values[half] + } +} + +function bytesToKb (bytes) { + return Math.round(bytes / 1024 * 100) / 100 +} + +function runGC (val) { + return Promise.resolve(val).then(r => { + global.gc() + return r + }) +} + +function listByMemoryEfficiency (suites) { + console.log(chalk.white.bold('\nMore memory efficient:')) + const sorted = suites.sort((a, b) => a.memoryEfficiency - b.memoryEfficiency) + sorted.forEach((suite, index) => { + const name = index === 0 ? chalk.yellow(suite.name) : chalk.bold(suite.name) + + console.log( + ' %s: %s', + name, + chalk.yellow(fnumber(bytesToKb(suite.memoryEfficiency)) + 'Kb') + ) + }) +} + +function reportSuiteMemory (suite) { + const { memoryBefore, memoryAfter } = suite + console.log( + ' Memory: not freed %s (before %s after %s)', + chalk.red.bold(fnumber(bytesToKb(memoryAfter - memoryBefore)) + 'Kb'), + chalk.white.bold(fnumber(bytesToKb(memoryBefore)) + 'Kb'), + chalk.white.bold(fnumber(bytesToKb(memoryAfter)) + 'Kb') + ) + return suite +} + +function reportMemoryEfficincy (suites) { + const sortedSuites = suites.sort( + (a, b) => a.memoryEfficiency - b.memoryEfficiency + ) + const first = sortedSuites[0] + const second = sortedSuites[1] + + const diffMemory = + (second.memoryEfficiency - first.memoryEfficiency) / + second.memoryEfficiency * + 100 + + console.log( + ` Memory: %s was more memory efficient by %s (%s vs %s)`, + chalk.yellow(first.name), + chalk.white.bold(diffMemory.toFixed(2) + '%'), + chalk.yellow(fnumber(bytesToKb(first.memoryEfficiency)) + 'Kb'), + chalk.yellow(fnumber(bytesToKb(second.memoryEfficiency)) + 'Kb') + ) +} + +function unhandledError (err) { + console.log('Failed Tests: ' + err.stack) +} + +process.on('unhandledRejection', unhandledError) +process.on('uncaughtException', unhandledError) + +start(suites) + +module.exports = { + start, + runTests, + runBenchmarks, + reportFinal, + listBySpeed, + reportFasterOpsperSec, + runBenchmark, + fnumber, + sum, + middle, + bytesToKb, + runGC, + listByMemoryEfficiency, + reportSuiteMemory, + reportMemoryEfficincy +} From 430b5f28767cef23b3ece8c5c431af2d90cf65cb Mon Sep 17 00:00:00 2001 From: Acatl Pacheco Date: Mon, 29 Jan 2018 17:17:29 -0500 Subject: [PATCH 2/8] feat(bench-trial): new package test performance --- packages/{bench-trail => bench-trial}/README.md | 0 packages/{bench-trail => bench-trial}/examples/array-iteration.js | 0 packages/{bench-trail => bench-trial}/examples/async-example.js | 0 packages/{bench-trail => bench-trial}/examples/manual-tests.js | 0 packages/{bench-trail => bench-trial}/lib/index.js | 0 packages/{bench-trail => bench-trial}/package.json | 0 packages/{bench-trail => bench-trial}/runner.js | 0 7 files changed, 0 insertions(+), 0 deletions(-) rename packages/{bench-trail => bench-trial}/README.md (100%) rename packages/{bench-trail => bench-trial}/examples/array-iteration.js (100%) rename packages/{bench-trail => bench-trial}/examples/async-example.js (100%) rename packages/{bench-trail => bench-trial}/examples/manual-tests.js (100%) rename packages/{bench-trail => bench-trial}/lib/index.js (100%) rename packages/{bench-trail => bench-trial}/package.json (100%) rename packages/{bench-trail => bench-trial}/runner.js (100%) diff --git a/packages/bench-trail/README.md b/packages/bench-trial/README.md similarity index 100% rename from packages/bench-trail/README.md rename to packages/bench-trial/README.md diff --git a/packages/bench-trail/examples/array-iteration.js b/packages/bench-trial/examples/array-iteration.js similarity index 100% rename from packages/bench-trail/examples/array-iteration.js rename to packages/bench-trial/examples/array-iteration.js diff --git a/packages/bench-trail/examples/async-example.js b/packages/bench-trial/examples/async-example.js similarity index 100% rename from packages/bench-trail/examples/async-example.js rename to packages/bench-trial/examples/async-example.js diff --git a/packages/bench-trail/examples/manual-tests.js b/packages/bench-trial/examples/manual-tests.js similarity index 100% rename from packages/bench-trail/examples/manual-tests.js rename to packages/bench-trial/examples/manual-tests.js diff --git a/packages/bench-trail/lib/index.js b/packages/bench-trial/lib/index.js similarity index 100% rename from packages/bench-trail/lib/index.js rename to packages/bench-trial/lib/index.js diff --git a/packages/bench-trail/package.json b/packages/bench-trial/package.json similarity index 100% rename from packages/bench-trail/package.json rename to packages/bench-trial/package.json diff --git a/packages/bench-trail/runner.js b/packages/bench-trial/runner.js similarity index 100% rename from packages/bench-trail/runner.js rename to packages/bench-trial/runner.js From fa3d0d76cd1a22a854eb537d18e656ec40c69d57 Mon Sep 17 00:00:00 2001 From: Acatl Pacheco Date: Mon, 29 Jan 2018 17:28:58 -0500 Subject: [PATCH 3/8] fix(bench-trial): fix typos --- packages/bench-trial/README.md | 14 +++++++------- packages/bench-trial/examples/array-iteration.js | 2 +- packages/bench-trial/examples/async-example.js | 2 +- packages/bench-trial/package.json | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/bench-trial/README.md b/packages/bench-trial/README.md index 72c6bb5d..cb2eb3d1 100644 --- a/packages/bench-trial/README.md +++ b/packages/bench-trial/README.md @@ -1,17 +1,17 @@ -# Bench Trail +# Bench trial > Runs one or multiple benchmark tests ## Install ```bash -npm install -g bench-trail +npm install -g bench-trial ``` ## Usage ```bash -bench-trail benchmarks/map-helper-vs-entity.js -i 5 +bench-trial benchmarks/map-helper-vs-entity.js -i 5 ``` ## TL;DR @@ -33,7 +33,7 @@ While running [benchmarkjs](https://benchmarkjs.com) to compare different versio ## API ```bash -bench-trail [-i ] [-s] +bench-trial [-i ] [-s] ``` - `-i --iterations ` iterations default to 10 iterations if not provided. @@ -41,7 +41,7 @@ bench-trail [-i ] [-s] ### Writing your benchmark suites -The file you provide to bench-trail should export an `array` of suites, each suite is an object in the form of: +The file you provide to bench-trial should export an `array` of suites, each suite is an object in the form of: ``` { @@ -60,11 +60,11 @@ The file you provide to bench-trail should export an `array` of suites, each sui #### Sync vs Async - Synchronous methods are simple methods that expect a return value. -- Asynchronous methods are a bit different to benchmarkjs async methods, bench-trail expects async methods to follow the [error-first callbacks](https://nodejs.org/api/errors.html#errors_error_first_callbacks). +- Asynchronous methods are a bit different to benchmarkjs async methods, bench-trial expects async methods to follow the [error-first callbacks](https://nodejs.org/api/errors.html#errors_error_first_callbacks). #### Testing -bench-trail provides a convenience method that accepts the function to execute and a value to check against the result of the code you are testing. It takes care of async vs async depending on how you set the `async` flag. +bench-trial provides a convenience method that accepts the function to execute and a value to check against the result of the code you are testing. It takes care of async vs async depending on how you set the `async` flag. ```js test(test:function, value:*) diff --git a/packages/bench-trial/examples/array-iteration.js b/packages/bench-trial/examples/array-iteration.js index 0eb1ef3d..8453546d 100644 --- a/packages/bench-trial/examples/array-iteration.js +++ b/packages/bench-trial/examples/array-iteration.js @@ -1,4 +1,4 @@ -const { test } = require('bench-trail') +const { test } = require('bench-trial') const array = Array(100).fill('foo') const expected = array.join('').length diff --git a/packages/bench-trial/examples/async-example.js b/packages/bench-trial/examples/async-example.js index 88e270e8..70d7dfa1 100644 --- a/packages/bench-trial/examples/async-example.js +++ b/packages/bench-trial/examples/async-example.js @@ -1,4 +1,4 @@ -const { test } = require('bench-trail') +const { test } = require('bench-trial') const expected = true diff --git a/packages/bench-trial/package.json b/packages/bench-trial/package.json index 1eed5923..c269aa2e 100644 --- a/packages/bench-trial/package.json +++ b/packages/bench-trial/package.json @@ -1,5 +1,5 @@ { - "name": "bench-trail", + "name": "bench-trial", "version": "2.0.0", "description": "Runs one or multiple benchmark tests", "main": "./lib/index.js", @@ -8,7 +8,7 @@ "node": ">=6" }, "bin": { - "bench-trail": "runner.js" + "bench-trial": "runner.js" }, "author": { "name": "Acatl Pacheco", From bc77f113018f56c6bd34c10fd5bc1568ca5957b8 Mon Sep 17 00:00:00 2001 From: Acatl Pacheco Date: Tue, 30 Jan 2018 09:35:09 -0500 Subject: [PATCH 4/8] docs(bench-trial): thanks + feedback --- packages/bench-trial/README.md | 5 +++-- packages/bench-trial/runner.js | 12 ++++++------ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/packages/bench-trial/README.md b/packages/bench-trial/README.md index cb2eb3d1..66cffa25 100644 --- a/packages/bench-trial/README.md +++ b/packages/bench-trial/README.md @@ -78,10 +78,11 @@ Test synchronous code [example](examples/array-iteration.js) Test asynchronous code [example](examples/async-example.js) Write manual test sync/asynchronous code [example](examples/manual-tests.js) - ## Acknowledgements -This tool is only a wrapper of [benchmarkjs](https://benchmarkjs.com), so the credit really goes to them. +This tool is a wrapper of [benchmarkjs](https://benchmarkjs.com), so all credit related to benchmarking itself really goes to them. + +Thanks to [Paul Molluzzo](https://github.com/paulmolluzzo) for coming up with the name **bench-trial**! ## Contributing diff --git a/packages/bench-trial/runner.js b/packages/bench-trial/runner.js index affda5e8..f6a93d05 100755 --- a/packages/bench-trial/runner.js +++ b/packages/bench-trial/runner.js @@ -46,10 +46,10 @@ function start (suites) { } function runTest (suite) { - const isASync = !!suite.async + const isAsync = !!suite.async if (typeof suite.test === 'function') { - if (isASync) { + if (isAsync) { return Promise.fromCallback(suite.test) } @@ -59,7 +59,7 @@ function runTest (suite) { if (typeof suite.test.test === 'function') { const { test, expected } = suite.test - if (isASync) { + if (isAsync) { return Promise.fromCallback(lib.test.async(test, expected)) } @@ -175,15 +175,15 @@ function runBenchmark (suiteBenchmark) { asyncLabel ) - const isASync = !!suiteBenchmark.async + const isAsync = !!suiteBenchmark.async const benchmarkMethod = - isASync === true + isAsync === true ? lib.benchmark.async(suiteBenchmark.benchmark) : suiteBenchmark.benchmark for (let index = 0; index < iterations; index++) { suite.add(`${index + 1} ${suiteBenchmark.name}`, { - defer: isASync, + defer: isAsync, fn: benchmarkMethod }) } From 9ae18abb309db39dd8ee1f4e1fa86a4ac71142a8 Mon Sep 17 00:00:00 2001 From: Acatl Pacheco Date: Wed, 31 Jan 2018 09:20:22 -0500 Subject: [PATCH 5/8] docs(bench-trial): updates wording --- packages/bench-trial/README.md | 13 +++++++------ packages/bench-trial/runner.js | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/packages/bench-trial/README.md b/packages/bench-trial/README.md index 66cffa25..bafd3f13 100644 --- a/packages/bench-trial/README.md +++ b/packages/bench-trial/README.md @@ -22,13 +22,14 @@ Runs one (or more) BenchmarkJs test multiple times enough to get less ambiguous While running [benchmarkjs](https://benchmarkjs.com) to compare different versions of code I found out a couple of things: -- **Ambiguous results**: tests would throw different results every time I ran the test, specially if the difference was minimal. If I ran the same test multiple times the numbers changed, as time went by I would get more and more operations per second on each benchmark. This would only happen if I ran the tests consecutively, the reason of this might be related to the v8 engine warming up and optimizing the code the more I ran it. If I let some time to cool off, the tests would go back down on operations per second. -- **Reliable Execution**: more than once I made changes on the code being tested and never did I notice the change I had made was not even executing correctly. So the results I was getting were really unreliable. +- **Consistency**: I noticed that the same benchmark tests were returning different results every time they executed. If they were re-run consecutively, I would get more operations per second on each benchmark. I believe the reason may be related to the v8 engine warming up and optimizing the code the more it ran, since if I let some time to "cool off" the operations per second for each test would decrease. These ambiguous results meant having to repeat tests to ensure some consistency. +- **Reliable Execution**: Occasionally I made changes to the benchmarked code and would overlook that it was not executing correctly, further compounding the issue of making the results unreliable. ## Solution -- **Ambiguous results**: Run benchmark more than once to get median and average results, because the test will run multiple times the code will get optimized, using the median we can get more reliable results. -- **Reliable Execution**: Run a simple assertion tests on each suite before the actual benchmark runs, this helps us make sure our test are executing correctly. +- **Consistency**: By running benchmark tests more than once, we can get median and average results and get a bigger picture with less fluctuation. Because the tests will run multiple times in succession, the code will get optimized by the engine, and we can use the median time as a more consistent and stable metric. + +- **Tests for reliable execution**: By running simple assertion tests on each suite before the actual benchmark runs, we can be sure our tests are executing correctly. ## API @@ -41,7 +42,7 @@ bench-trial [-i ] [-s] ### Writing your benchmark suites -The file you provide to bench-trial should export an `array` of suites, each suite is an object in the form of: +The file provided to **bench-trial** should export an `array` of test suites, each test suite is an object in the form of: ``` { @@ -70,7 +71,7 @@ bench-trial provides a convenience method that accepts the function to execute a test(test:function, value:*) ``` -to write your manual test see the manual test example below +To write your manual test see the manual test example below ## Examples diff --git a/packages/bench-trial/runner.js b/packages/bench-trial/runner.js index f6a93d05..0c5f2289 100755 --- a/packages/bench-trial/runner.js +++ b/packages/bench-trial/runner.js @@ -238,8 +238,8 @@ function sum (values) { } function middle (values) { - var len = values.length - var half = Math.floor(len / 2) + const len = values.length + const half = Math.floor(len / 2) if (len % 2) { return (values[half - 1] + values[half]) / 2.0 From 18be79e3d30853024f2cb9ff29d796d904a77bdd Mon Sep 17 00:00:00 2001 From: Acatl Pacheco Date: Wed, 31 Jan 2018 09:24:33 -0500 Subject: [PATCH 6/8] build(bench-trial): changes to use dependencies --- packages/bench-trial/package.json | 4 ++-- yarn.lock | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/bench-trial/package.json b/packages/bench-trial/package.json index c269aa2e..b1eab265 100644 --- a/packages/bench-trial/package.json +++ b/packages/bench-trial/package.json @@ -14,8 +14,8 @@ "name": "Acatl Pacheco", "email": "acatl.pacheco@viacom.com" }, - "devDependencies": { - "benchmark": "2.1.4", + "dependencies": { + "benchmark": "2.x", "commander": "2.x", "chalk": "2.x" } diff --git a/yarn.lock b/yarn.lock index 739b992c..0eb6a574 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1120,7 +1120,7 @@ bcrypt-pbkdf@^1.0.0: dependencies: tweetnacl "^0.14.3" -benchmark@2.1.4: +benchmark@2.1.4, benchmark@2.x: version "2.1.4" resolved "https://registry.yarnpkg.com/benchmark/-/benchmark-2.1.4.tgz#09f3de31c916425d498cc2ee565a0ebf3c2a5629" dependencies: @@ -1304,7 +1304,7 @@ chalk@1.1.3, chalk@^1.0.0, chalk@^1.1.0, chalk@^1.1.1, chalk@^1.1.3: strip-ansi "^3.0.0" supports-color "^2.0.0" -chalk@^2.0.0, chalk@^2.0.1, chalk@^2.1.0, chalk@^2.3.0: +chalk@2.x, chalk@^2.0.0, chalk@^2.0.1, chalk@^2.1.0, chalk@^2.3.0: version "2.3.0" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.3.0.tgz#b5ea48efc9c1793dccc9b4767c93914d3f2d52ba" dependencies: @@ -1437,14 +1437,14 @@ command-join@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/command-join/-/command-join-2.0.0.tgz#52e8b984f4872d952ff1bdc8b98397d27c7144cf" +commander@2.x, commander@^2.5.0: + version "2.13.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.13.0.tgz#6964bca67685df7c1f1430c584f07d7597885b9c" + commander@^2.11.0, commander@^2.9.0: version "2.12.2" resolved "https://registry.yarnpkg.com/commander/-/commander-2.12.2.tgz#0f5946c427ed9ec0d91a46bb9def53e54650e555" -commander@^2.5.0: - version "2.13.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.13.0.tgz#6964bca67685df7c1f1430c584f07d7597885b9c" - commitizen@2.9.6, commitizen@latest: version "2.9.6" resolved "https://registry.yarnpkg.com/commitizen/-/commitizen-2.9.6.tgz#c0d00535ef264da7f63737edfda4228983fa2291" From 7b23cecbf0ee1fd0829de65a67167771ae9e17fb Mon Sep 17 00:00:00 2001 From: Acatl Pacheco Date: Wed, 31 Jan 2018 09:54:52 -0500 Subject: [PATCH 7/8] build(bench-trial): update to explicit versions --- packages/bench-trial/package.json | 6 +++--- yarn.lock | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/bench-trial/package.json b/packages/bench-trial/package.json index b1eab265..1f2a4f1d 100644 --- a/packages/bench-trial/package.json +++ b/packages/bench-trial/package.json @@ -15,8 +15,8 @@ "email": "acatl.pacheco@viacom.com" }, "dependencies": { - "benchmark": "2.x", - "commander": "2.x", - "chalk": "2.x" + "benchmark": "^2.1.4", + "commander": "^2.13.0", + "chalk": "^2.3.0" } } diff --git a/yarn.lock b/yarn.lock index 0eb6a574..e1b714d8 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1120,7 +1120,7 @@ bcrypt-pbkdf@^1.0.0: dependencies: tweetnacl "^0.14.3" -benchmark@2.1.4, benchmark@2.x: +benchmark@2.1.4, benchmark@^2.1.4: version "2.1.4" resolved "https://registry.yarnpkg.com/benchmark/-/benchmark-2.1.4.tgz#09f3de31c916425d498cc2ee565a0ebf3c2a5629" dependencies: @@ -1304,7 +1304,7 @@ chalk@1.1.3, chalk@^1.0.0, chalk@^1.1.0, chalk@^1.1.1, chalk@^1.1.3: strip-ansi "^3.0.0" supports-color "^2.0.0" -chalk@2.x, chalk@^2.0.0, chalk@^2.0.1, chalk@^2.1.0, chalk@^2.3.0: +chalk@^2.0.0, chalk@^2.0.1, chalk@^2.1.0, chalk@^2.3.0: version "2.3.0" resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.3.0.tgz#b5ea48efc9c1793dccc9b4767c93914d3f2d52ba" dependencies: @@ -1437,14 +1437,14 @@ command-join@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/command-join/-/command-join-2.0.0.tgz#52e8b984f4872d952ff1bdc8b98397d27c7144cf" -commander@2.x, commander@^2.5.0: - version "2.13.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-2.13.0.tgz#6964bca67685df7c1f1430c584f07d7597885b9c" - commander@^2.11.0, commander@^2.9.0: version "2.12.2" resolved "https://registry.yarnpkg.com/commander/-/commander-2.12.2.tgz#0f5946c427ed9ec0d91a46bb9def53e54650e555" +commander@^2.13.0, commander@^2.5.0: + version "2.13.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.13.0.tgz#6964bca67685df7c1f1430c584f07d7597885b9c" + commitizen@2.9.6, commitizen@latest: version "2.9.6" resolved "https://registry.yarnpkg.com/commitizen/-/commitizen-2.9.6.tgz#c0d00535ef264da7f63737edfda4228983fa2291" From 5a89cd07705f4e93a30ad3fa4852237a0c567a7a Mon Sep 17 00:00:00 2001 From: Acatl Pacheco Date: Wed, 31 Jan 2018 10:00:46 -0500 Subject: [PATCH 8/8] docs(bench-trial): wording --- packages/bench-trial/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/bench-trial/README.md b/packages/bench-trial/README.md index bafd3f13..d4ac74d6 100644 --- a/packages/bench-trial/README.md +++ b/packages/bench-trial/README.md @@ -22,14 +22,14 @@ Runs one (or more) BenchmarkJs test multiple times enough to get less ambiguous While running [benchmarkjs](https://benchmarkjs.com) to compare different versions of code I found out a couple of things: -- **Consistency**: I noticed that the same benchmark tests were returning different results every time they executed. If they were re-run consecutively, I would get more operations per second on each benchmark. I believe the reason may be related to the v8 engine warming up and optimizing the code the more it ran, since if I let some time to "cool off" the operations per second for each test would decrease. These ambiguous results meant having to repeat tests to ensure some consistency. -- **Reliable Execution**: Occasionally I made changes to the benchmarked code and would overlook that it was not executing correctly, further compounding the issue of making the results unreliable. +- **Ambiguous results**: I noticed that the same benchmark tests were returning different results every time they executed. If they were re-run consecutively, I would get more operations per second on each benchmark. I believe the reason may be related to the v8 engine warming up and optimizing the code the more it ran, since if I let some time to "cool off" the operations per second for each test would decrease. These ambiguous results meant having to repeat tests to ensure some consistency. +- **Unreliable execution**: Occasionally I made changes to the benchmarked code and would overlook that it was not executing correctly, further compounding the issue of making the results unreliable. ## Solution - **Consistency**: By running benchmark tests more than once, we can get median and average results and get a bigger picture with less fluctuation. Because the tests will run multiple times in succession, the code will get optimized by the engine, and we can use the median time as a more consistent and stable metric. -- **Tests for reliable execution**: By running simple assertion tests on each suite before the actual benchmark runs, we can be sure our tests are executing correctly. +- **Reliable execution**: By running simple assertion tests on each suite before the actual benchmark runs, we can be sure our tests are executing correctly. ## API @@ -75,9 +75,9 @@ To write your manual test see the manual test example below ## Examples -Test synchronous code [example](examples/array-iteration.js) -Test asynchronous code [example](examples/async-example.js) -Write manual test sync/asynchronous code [example](examples/manual-tests.js) +- Test synchronous code [example](examples/array-iteration.js) +- Test asynchronous code [example](examples/async-example.js) +- Write manual test sync/asynchronous code [example](examples/manual-tests.js) ## Acknowledgements