Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add benchmark/performance tests #834

Merged
merged 7 commits into from Jun 23, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
35 changes: 26 additions & 9 deletions .github/workflows/pr.yaml
Expand Up @@ -18,19 +18,15 @@ jobs:
cache: 'npm'

- name: Cache Node modules
uses: actions/cache@v2
env:
cache-name: cache-node-modules
uses: actions/cache@v3
id: npm-cache
with:
# npm cache files are stored in `~/.npm` on Linux/macOS
path: ~/.npm
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/package-lock.json') }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
${{ runner.os }}-node-

- name: "Install dependencies"
- name: Install dependencies
run: |
npm install
sudo npx playwright install-deps
Expand All @@ -39,3 +35,24 @@ jobs:
run: npm run lint
- name: Test
run: npm test

- name: Benchmark
run: npm run benchmark | tee benchmark.txt

- name: Download previous benchmark data
uses: actions/cache@v3
with:
path: ./cache
key: ${{ runner.os }}-benchmark

- name: Store benchmark result
uses: benchmark-action/github-action-benchmark@v1
with:
tool: 'benchmarkjs'
output-file-path: benchmark.txt
external-data-json-path: ./cache/benchmark-data.json
github-token: ${{ secrets.GITHUB_TOKEN }}
alert-threshold: '150%'
comment-on-alert: true
fail-on-alert: true
alert-comment-cc-users: '@Stuk'
24 changes: 24 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 5 additions & 1 deletion package.json
Expand Up @@ -6,7 +6,10 @@
"scripts": {
"test": "npm run test-node && npm run test-browser && tsc",
"test-node": "qunit --require ./test/helpers/test-utils.js --require ./test/helpers/node-test-utils.js test/asserts/",
"test-browser": "grunt build && node test/run.js",
"test-browser": "grunt build && node test/run.js --test",
"benchmark": "npm run benchmark-node && npm run benchmark-browser",
"benchmark-node": "node test/benchmark/node.js",
"benchmark-browser": "node test/run.js --benchmark",
"lint": "eslint ."
},
"contributors": [
Expand Down Expand Up @@ -39,6 +42,7 @@
"inflate"
],
"devDependencies": {
"benchmark": "^2.1.4",
"browserify": "~13.0.0",
"eslint": "^8.18.0",
"grunt": "~0.4.1",
Expand Down
8 changes: 8 additions & 0 deletions test/benchmark/.eslintrc.js
@@ -0,0 +1,8 @@
"use strict";

module.exports = {
globals: {
// Added by index.html and node.js
Benchmark: false,
},
};
39 changes: 39 additions & 0 deletions test/benchmark/benchmark.js
@@ -0,0 +1,39 @@
"use strict";

(function (root, factory) {
if (typeof module === "object" && module.exports) {
module.exports = factory();
} else {
root.benchmark = factory();
}
}(typeof self !== "undefined" ? self : this, function () {
return function (type) {
return new Promise(resolve => {
const suite = new Benchmark.Suite();

suite
.add(`${type} generateAsync`, {
defer: true,
async fn(deferred) {
const zip = new JSZip();

for (let i = 0; i < 50; i++) {
zip.file("file_" + i, "R0lGODdhBQAFAIACAAAAAP/eACwAAAAABQAFAAACCIwPkWerClIBADs=", { base64: true, date: new Date(1234123491011) });
}

await zip.generateAsync({ type });
deferred.resolve();
}
})
.on("cycle", event => {
// Output benchmark result by converting benchmark result to string
console.log(String(event.target));
})
.on("complete", () => {
console.log("Benchmark complete");
resolve();
})
.run({ "async": true });
});
};
}));
16 changes: 16 additions & 0 deletions test/benchmark/index.html
@@ -0,0 +1,16 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>JSZip Benchmark</title>
</head>
<body>
<script src="../../node_modules/benchmark/node_modules/lodash/lodash.js"></script>
<script src="../../node_modules/benchmark/benchmark.js"></script>
<script src="../../dist/jszip.js"></script>
<script src="./benchmark.js"></script>
<script>
benchmark("arraybuffer");
</script>
</body>
</html>
7 changes: 7 additions & 0 deletions test/benchmark/node.js
@@ -0,0 +1,7 @@
"use strict";

globalThis.Benchmark = require("benchmark");
globalThis.JSZip = require("../../lib/index");

const benchmark = require("./benchmark");
benchmark("nodebuffer");
90 changes: 66 additions & 24 deletions test/run.js
Expand Up @@ -23,52 +23,94 @@ const createServer = require("http-server").createServer;
* @param {string} browserType
* @returns {Promise<[string, Results]>}
*/
async function run(browserType) {
async function runBrowser(browserType, waitFor, file) {
console.log("Starting", browserType);
const browser = await playwright[browserType].launch();
const context = await browser.newContext();
const page = await context.newPage();

await page.goto("http://127.0.0.1:8080/test/index.html?hidepassed");

let result;
do {
result = await page.evaluate(() => {
return window.global_test_results;
});
} while (!result);
await page.goto(`http://127.0.0.1:8080/test/${file}`);
const result = await waitFor(page);

console.log("Closing", browserType);
await browser.close();

return [browserType, result];
}

async function main() {
async function runBrowsers(waitFor, file) {
const browsersTypes = ["chromium", "firefox", "webkit"];

const server = createServer({root: path.join(__dirname, "..")});
await new Promise(resolve => server.listen(8080, "127.0.0.1", resolve));
console.log("Server started");

try {
const results = await Promise.all(browsersTypes.map(run));
const results = await Promise.all(browsersTypes.map(b => runBrowser(b, waitFor, file)));
return results;
} finally {
server.close();
}
}

let failures = false;
for (const result of results) {
console.log(...result);
failures = failures || result[1].failed > 0;
}
async function waitForTests(page) {
let result;
do {
result = await page.evaluate(() => {
return window.global_test_results;
});
} while (!result);
return result;
}

async function runTests() {
const results = await runBrowsers(waitForTests, "index.html?hidepassed");

let failures = false;
for (const result of results) {
console.log(...result);
failures = failures || result[1].failed > 0;
}

if (failures) {
console.log("Tests failed");
process.exit(1);
} else {
console.log("Tests passed!");
}
}

if (failures) {
console.log("Tests failed");
process.exit(1);
} else {
console.log("Tests passed!");
async function waitForBenchmark(page) {
return new Promise(resolve => {
const logs = [];

page.on("console", async message => {
if (message.text() === "Benchmark complete") {
resolve(logs);
} else {
logs.push(message.text());
}
});
});
}

async function runBenchmark() {
const results = await runBrowsers(waitForBenchmark, "benchmark/index.html");

for (const [browser, logs] of results) {
for (const log of logs) {
console.log(browser, log);
}
} finally {
server.close();
}
}

main();
switch (process.argv[2]) {
case "--test":
runTests();
break;
case "--benchmark":
runBenchmark();
break;
default:
throw new Error(`Unknown argument: ${process.argv[2]}`);
}