Skip to content

Commit

Permalink
DEV-6326 Allow custom functions to be benchmarked (#239)
Browse files Browse the repository at this point in the history
DEV-6326 Allow custom functions to be benchmarked
  • Loading branch information
JosVerburg committed Apr 23, 2020
1 parent 46e6cf3 commit dc1a59e
Show file tree
Hide file tree
Showing 19 changed files with 713 additions and 15 deletions.
1 change: 1 addition & 0 deletions .gitignore
@@ -1,5 +1,6 @@
/coverage
/node_modules
/performance/lib
/test/assets/QT3TS
/test/assets/XQUTS
/test/assets/runnablePerformanceTestNames.csv
Expand Down
29 changes: 18 additions & 11 deletions CONTRIBUTING.md
Expand Up @@ -153,7 +153,6 @@ FontoXPath contains different test sets:
|The QT3 XQueryX tests|`npm run qt3testsxqueryx`|
|The XQUTS tests|`npm run xqutstests`|
|The XQUTS XQueryX tests|`npm run xqutstestsxqueryx`|
|The QT3 performance tests|`npm run performance`|

They all run in Node. By running the tests with the `--inspect` flag,
they can be debugged by the browser: `npm run test -- --inspect
Expand Down Expand Up @@ -185,16 +184,24 @@ If you are adding a new feature, don't forget to edit the file
`test/runnableTestSets.csv`. This file disables tests for features we
have not yet implemented.

To check the performance of fontoxpath we pick a random subset of the
qt3tests as running all will take too long (hours). This random subset
is not checked in but can be generated using
`npm run performance -- --regenerate [<number-of-tests>]`, this will create
and populate `test/runnablePerformanceTestNames.csv`. You can manually
edit this file to run specific tests. By storing these in a file you can
run the same set even when switching between different commits. With the
generated file present you can run the tests using `npm run performance`,
this will run a benchmark for each qt3 test using
[benchmarkjs](https://benchmarkjs.com/).
### Running benchmarks

FontoXPath has 2 options to run benchmarks.

In one we run benchmarks over scenarios defined in javascript which are located in the directory
`/performance`. These can be run using `npm run performance` which will run the benchmarks in the
console. Or you can start a server which hosts them using `npm run performance-server` which allows
you to test the performance in different browsers. Some of these benchmarks are set up as a
comparison which indicates the performance overhead of FontoXPath. Note that some tests use assets
from the QT3TS, see the steps in [Setting up a development environment](#setting-up-a-development-environment).

To check the performance of fontoxpath with the qt3tests, we pick a random subset of the qt3tests as
running all will take too long (hours). This random subset is not checked in but can be generated
using `npm run qt3performance -- --regenerate [<number-of-tests>]`, this will create and populate
`test/runnablePerformanceTestNames.csv`. You can manually edit this file to run specific tests. By
storing these in a file you can run the same set even when switching between different commits. With
the generated file present you can run the tests using `npm run qt3performance`, this will run a
benchmark for each qt3 test using [benchmarkjs](https://benchmarkjs.com/).

### Building

Expand Down
170 changes: 167 additions & 3 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 8 additions & 1 deletion package.json
Expand Up @@ -18,7 +18,9 @@
"coverage": "nyc report --reporter=text-lcov | coveralls",
"integrationtests": "ts-mocha --require test/testhook.js \"test/specs/parsing/**/*.ts\" -p test/tsconfig.json",
"prepare": "npm run build",
"performance": "ts-node -P test/tsconfig.json -r tsconfig-paths/register test/qt3testsBenchmark.ts",
"performance": "ts-node -P ./performance/tsconfig.json ./performance/runBenchmarks.ts",
"performance-server": "rimraf ./performance/lib && tsc -p ./performance/web.tsconfig.json && concurrently -k -p \"[{name}]\" -n \"tsc,rollup,server\" \"tsc -p ./performance/web.tsconfig.json -w\" \"rollup -c ./performance/rollup.config.js -w \" \"ts-node -P ./performance/tsconfig.json ./performance/server.ts\"",
"qt3performance": "ts-node -P test/tsconfig.json -r tsconfig-paths/register test/qt3testsBenchmark.ts",
"qt3tests": "ts-mocha --paths -p test/tsconfig.json test/qt3tests.ts",
"qt3testsxqueryx": "ts-mocha --paths -p test/tsconfig.json test/qt3testsXQueryX.ts",
"start": "concurrently -k -p \"[{name}]\" -n \"TypeScript,Node\" \"tsc -p ./demo/tsconfig.json -w\" \"ts-node ./demo/server.ts --dist\"",
Expand Down Expand Up @@ -61,9 +63,12 @@
"homepage": "https://github.com/FontoXML/fontoxpath",
"devDependencies": {
"@microsoft/api-extractor": "^7.7.10",
"@rollup/plugin-commonjs": "^11.1.0",
"@rollup/plugin-node-resolve": "^7.1.3",
"@tscc/tscc": "^0.4.8",
"@types/benchmark": "^1.0.31",
"@types/chai": "^4.2.11",
"@types/glob": "^7.1.1",
"@types/mocha": "^7.0.2",
"@types/node": "^13.5.0",
"@types/node-static": "^0.7.3",
Expand All @@ -74,11 +79,13 @@
"coveralls": "^3.0.11",
"cross-env": "^7.0.2",
"fs-extra": "^9.0.0",
"glob": "^7.1.6",
"mocha": "^7.1.1",
"node-static": "^0.7.11",
"nyc": "^15.0.0",
"pegjs": "^0.10.0",
"prettier": "^2.0.2",
"rollup": "^2.6.1",
"sinon": "^9.0.1",
"sinon-chai": "^3.5.0",
"slimdom": "^2.3.2",
Expand Down
55 changes: 55 additions & 0 deletions performance/benchmarkRunner/BenchmarkCollection.ts
@@ -0,0 +1,55 @@
import Benchmark from 'benchmark';

type testFunction = () => void;
type setupFunction = () => void | Promise<void>;
type teardownFunction = () => void | Promise<void>;

export default abstract class BenchmarkCollection {
protected readonly _benchmarks: {
benchmark: Benchmark;
setup?: setupFunction;
teardown?: teardownFunction;
}[] = [];

protected readonly _comparisons: {
benchmarks: Benchmark[];
name: string;
setup?: setupFunction;
teardown?: teardownFunction;
}[] = [];

public addBenchmark(
name: string,
test: testFunction,
setup?: setupFunction,
teardown?: teardownFunction
): void {
this._benchmarks.push({
benchmark: new Benchmark(name, test),
// We do not use the setup and teardown which is offered within the API of benchmarkjs
// as several attempts to get this working did not yield any successful results.
setup,
teardown,
});
}

public compareBenchmarks(
name: string,
setup?: setupFunction,
teardown?: teardownFunction,
...benchmarks: {
name: string;
test: testFunction;
}[]
): void {
// We do not use the setup and teardown which is offered within the API of benchmarkjs
// as several attempts to get this working did not yield any successful results. We also
// allow only 1 setup and teardown as all functions which compare with one another should
// use the same data to test with.
const comparison = { name, benchmarks: [], setup, teardown };
for (const benchmark of benchmarks) {
comparison.benchmarks.push(new Benchmark(benchmark.name, benchmark.test));
}
this._comparisons.push(comparison);
}
}

0 comments on commit dc1a59e

Please sign in to comment.