diff --git a/.eslintrc.yml b/.eslintrc.yml index 34c77cabe4..4310cee1bc 100644 --- a/.eslintrc.yml +++ b/.eslintrc.yml @@ -16,17 +16,22 @@ rules: - safe overrides: - files: - - scripts/**/*.js - - package-scripts.js - - karma.conf.js - - .wallaby.js - - .eleventy.js - - bin/* - - lib/cli/**/*.js - - test/node-unit/**/*.js - - test/integration/options/watch.spec.js - - test/integration/helpers.js - - lib/growl.js + - 'scripts/**/*.js' + - 'package-scripts.js' + - 'karma.conf.js' + - '.wallaby.js' + - '.eleventy.js' + - 'bin/*' + - 'lib/cli/**/*.js' + - 'test/node-unit/**/*.js' + - 'test/integration/options/watch.spec.js' + - 'test/integration/helpers.js' + - 'lib/growl.js' + - 'lib/buffered-runner.js' + - 'lib/worker.js' + - 'lib/reporters/buffered.js' + - 'lib/serializer.js' + - 'test/reporters/buffered.spec.js' parserOptions: ecmaVersion: 2018 env: diff --git a/.mocharc.yml b/.mocharc.yml index fc4c97339c..ee7c05072e 100644 --- a/.mocharc.yml +++ b/.mocharc.yml @@ -4,4 +4,5 @@ global: - okGlobalA,okGlobalB - okGlobalC - callback* -timeout: 300 +timeout: 1000 +parallel: true diff --git a/.travis.yml b/.travis.yml index a3874504dd..53629a3b72 100644 --- a/.travis.yml +++ b/.travis.yml @@ -38,6 +38,9 @@ jobs: include: - script: COVERAGE=1 npm start test.node after_success: npm start coveralls + - script: MOCHA_PARALLEL=0 npm start test.node.unit + + name: 'Latest Node.js (unit tests in serial mode)' - &node script: npm start test.node diff --git a/bin/mocha b/bin/mocha index 9c63463790..2628093d3c 100755 --- a/bin/mocha +++ b/bin/mocha @@ -126,8 +126,23 @@ if (Object.keys(nodeArgs).length) { // terminate children. process.on('SIGINT', () => { - proc.kill('SIGINT'); // calls runner.abort() - proc.kill('SIGTERM'); // if that didn't work, we're probably in an infinite loop, so make it die. + // XXX: a previous comment said this would abort the runner, but I can't see that it does + // anything with the default runner. + debug('main process caught SIGINT'); + proc.kill('SIGINT'); + // if running in parallel mode, we will have a proper SIGINT handler, so the below won't + // be needed. + if (!args.parallel || args.jobs < 2) { + // win32 does not support SIGTERM, so use next best thing. + if (require('os').platform() === 'win32') { + proc.kill('SIGKILL'); + } else { + // using SIGKILL won't cleanly close the output streams, which can result + // in cut-off text or a befouled terminal. + debug('sending SIGTERM to child process'); + proc.kill('SIGTERM'); + } + } }); } else { require('../lib/cli/cli').main(unparse(mochaArgs, {alias: aliases})); diff --git a/karma.conf.js b/karma.conf.js index 9337e3d3b8..86975e9d48 100644 --- a/karma.conf.js +++ b/karma.conf.js @@ -37,6 +37,10 @@ module.exports = config => { .ignore('./lib/esm-utils.js') .ignore('path') .ignore('supports-color') + .ignore('./lib/buffered-runner.js') + .ignore('./lib/reporters/buffered.js') + .ignore('./lib/serializer.js') + .ignore('./lib/worker.js') .on('bundled', (err, content) => { if (err) { throw err; diff --git a/lib/buffered-runner.js b/lib/buffered-runner.js new file mode 100644 index 0000000000..56ab2c7272 --- /dev/null +++ b/lib/buffered-runner.js @@ -0,0 +1,272 @@ +'use strict'; + +const allSettled = require('promise.allsettled'); +const os = require('os'); +const Runner = require('./runner'); +const {EVENT_RUN_BEGIN, EVENT_RUN_END} = Runner.constants; +const debug = require('debug')('mocha:parallel:buffered-runner'); +const workerpool = require('workerpool'); +const {deserialize} = require('./serializer'); +const WORKER_PATH = require.resolve('./worker.js'); +const {setInterval, clearInterval} = global; +const {createMap, warn} = require('./utils'); +const debugStats = pool => { + const {totalWorkers, busyWorkers, idleWorkers, pendingTasks} = pool.stats(); + debug( + '%d/%d busy workers; %d idle; %d tasks queued', + busyWorkers, + totalWorkers, + idleWorkers, + pendingTasks + ); +}; + +/** + * The interval at which we will display stats for worker processes in debug mode + */ +const DEBUG_STATS_INTERVAL = 5000; + +const ABORTED = 'ABORTED'; +const IDLE = 'IDLE'; +const ABORTING = 'ABORTING'; +const RUNNING = 'RUNNING'; +const BAILING = 'BAILING'; +const BAILED = 'BAILED'; +const COMPLETE = 'COMPLETE'; + +const states = createMap({ + [IDLE]: new Set([RUNNING, ABORTING]), + [RUNNING]: new Set([COMPLETE, BAILING, ABORTING]), + [COMPLETE]: new Set(), + [ABORTED]: new Set(), + [ABORTING]: new Set([ABORTED]), + [BAILING]: new Set([BAILED, ABORTING]), + [BAILED]: new Set([COMPLETE, ABORTING]) +}); + +/** + * This `Runner` delegates tests runs to worker threads. Does not execute any + * {@link Runnable}s by itself! + */ +class BufferedRunner extends Runner { + constructor(...args) { + super(...args); + + let state = IDLE; + Object.defineProperty(this, '_state', { + get() { + return state; + }, + set(newState) { + if (states[state].has(newState)) { + state = newState; + } else { + throw new Error(`invalid state transition: ${state} => ${newState}`); + } + } + }); + + this.once('EVENT_RUN_END', () => { + this._state = COMPLETE; + }); + } + + /** + * Runs Mocha tests by creating a thread pool, then delegating work to the + * worker threads. + * + * Each worker receives one file, and as workers become available, they take a + * file from the queue and run it. The worker thread execution is treated like + * an RPC--it returns a `Promise` containing serialized information about the + * run. The information is processed as it's received, and emitted to a + * {@link Reporter}, which is likely listening for these events. + * + * @todo handle delayed runs? + * @param {Function} callback - Called with an exit code corresponding to + * number of test failures. + * @param {{files: string[], options: Options}} opts - Files to run and + * command-line options, respectively. + */ + run(callback, {files, options} = {}) { + /** + * Listener on `Process.SIGINT` which tries to cleanly terminate the worker pool. + */ + let sigIntListener; + // This function should _not_ return a `Promise`; its parent (`Runner#run`) + // returns this instance, so this should do the same. However, we want to make + // use of `async`/`await`, so we use this IIFE. + + (async () => { + /** + * This is an interval that outputs stats about the worker pool every so often + */ + let debugInterval; + + /** + * @type {WorkerPool} + */ + let pool; + + try { + const cpuCount = os.cpus().length; + const maxJobs = cpuCount - 1; + const jobs = Math.max(1, Math.min(options.jobs || maxJobs, maxJobs)); + if (maxJobs < 2) { + warn( + `(Mocha) not enough CPU cores available (${cpuCount}) to run multiple jobs; avoid --parallel on this machine` + ); + } else if (options.jobs && options.jobs > maxJobs) { + warn( + `(Mocha) ${options.jobs} concurrent jobs requested, but only enough cores available for ${maxJobs}` + ); + } + debug( + 'run(): starting worker pool of size %d, using node args: %s', + jobs, + process.execArgv.join(' ') + ); + pool = workerpool.pool(WORKER_PATH, { + workerType: 'process', + maxWorkers: jobs, + forkOpts: {execArgv: process.execArgv} + }); + + sigIntListener = async () => { + if (this._state !== ABORTING) { + debug('run(): caught a SIGINT'); + this._state = ABORTING; + + try { + debug('run(): shutting down %d (max) workers', jobs); + await pool.terminate(true); + } catch (err) { + console.error( + `Error while attempting to force-terminate worker pool: ${err}` + ); + } finally { + process.nextTick(() => { + debug('run(): imminent death'); + this._state = ABORTED; + process.kill(process.pid, 'SIGINT'); + }); + } + } + }; + + process.once('SIGINT', sigIntListener); + + // the "pool proxy" object is essentially just syntactic sugar to call a + // worker's procedure as one would a regular function. + const poolProxy = await pool.proxy(); + + debugInterval = setInterval( + () => debugStats(pool), + DEBUG_STATS_INTERVAL + ).unref(); + + // this is set for uncaught exception handling in `Runner#uncaught` + this.started = true; + this._state = RUNNING; + + this.emit(EVENT_RUN_BEGIN); + + const results = await allSettled( + files.map(async file => { + debug('run(): enqueueing test file %s', file); + try { + const result = await poolProxy.run(file, options); + if (this._state === BAILED) { + // short-circuit after a graceful bail + return; + } + const {failureCount, events} = deserialize(result); + debug( + 'run(): completed run of file %s; %d failures / %d events', + file, + failureCount, + events.length + ); + this.failures += failureCount; // can this ever be non-numeric? + /** + * If we set this, then we encountered a "bail" flag, and will + * terminate the pool once all events have been emitted. + */ + let event = events.shift(); + while (event) { + this.emit(event.eventName, event.data, event.error); + if ( + this._state !== BAILING && + event.data && + event.data._bail && + (failureCount || event.error) + ) { + debug('run(): nonzero failure count & found bail flag'); + // we need to let the events complete for this file, as the worker + // should run any cleanup hooks + this._state = BAILING; + } + event = events.shift(); + } + if (this._state === BAILING) { + debug('run(): terminating pool due to "bail" flag'); + this._state = BAILED; + await pool.terminate(); + } + } catch (err) { + if (this._state === BAILED || this._state === ABORTING) { + debug( + 'run(): worker pool terminated with intent; skipping file %s', + file + ); + } else { + // this is an uncaught exception + debug('run(): encountered uncaught exception: %O', err); + if (this.allowUncaught) { + // still have to clean up + this._state = ABORTING; + await pool.terminate(true); + } + throw err; + } + } finally { + debug('run(): done running file %s', file); + } + }) + ); + + // note that pool may already be terminated due to --bail + await pool.terminate(); + + results + .filter(({status}) => status === 'rejected') + .forEach(({reason}) => { + if (this.allowUncaught) { + // yep, just the first one. + throw reason; + } + // "rejected" will correspond to uncaught exceptions. + // unlike the serial runner, the parallel runner can always recover. + this.uncaught(reason); + }); + + if (this._state === ABORTING) { + return; + } + this.emit(EVENT_RUN_END); + debug('run(): completing with failure count %d', this.failures); + callback(this.failures); + } catch (err) { + process.nextTick(() => { + debug('run(): throwing uncaught exception'); + throw err; + }); + } finally { + clearInterval(debugInterval); + process.removeListener('SIGINT', sigIntListener); + } + })(); + return this; + } +} + +module.exports = BufferedRunner; diff --git a/lib/cli/collect-files.js b/lib/cli/collect-files.js index 61d54ac4b3..37c236d1f5 100644 --- a/lib/cli/collect-files.js +++ b/lib/cli/collect-files.js @@ -16,13 +16,7 @@ const utils = require('../utils'); /** * Smash together an array of test files in the correct order - * @param {Object} opts - Options - * @param {string[]} opts.extension - File extensions to use - * @param {string[]} opts.spec - Files, dirs, globs to run - * @param {string[]} opts.ignore - Files, dirs, globs to ignore - * @param {string[]} opts.file - List of additional files to include - * @param {boolean} opts.recursive - Find files recursively - * @param {boolean} opts.sort - Sort test files + * @param {FileCollectionOptions} [opts] - Options * @returns {string[]} List of files to test * @private */ @@ -83,3 +77,14 @@ module.exports = ({ignore, extension, file, recursive, sort, spec} = {}) => { return files; }; + +/** + * An object to configure how Mocha gathers test files + * @typedef {Object} FileCollectionOptions + * @property {string[]} extension - File extensions to use + * @property {string[]} spec - Files, dirs, globs to run + * @property {string[]} ignore - Files, dirs, globs to ignore + * @property {string[]} file - List of additional files to include + * @property {boolean} recursive - Find files recursively + * @property {boolean} sort - Sort test files + */ diff --git a/lib/cli/run-helpers.js b/lib/cli/run-helpers.js index 72823c48f6..f55c93587e 100644 --- a/lib/cli/run-helpers.js +++ b/lib/cli/run-helpers.js @@ -10,7 +10,7 @@ const fs = require('fs'); const path = require('path'); const debug = require('debug')('mocha:cli:run:helpers'); -const watchRun = require('./watch-run'); +const {watchRun, watchParallelRun} = require('./watch-run'); const collectFiles = require('./collect-files'); const cwd = (exports.cwd = process.cwd()); @@ -109,24 +109,52 @@ const singleRun = async (mocha, {exit}, fileCollectParams) => { }; /** - * Actually run tests + * Collect files and run tests (using `BufferedRunner`). + * + * This is `async` for consistency. + * * @param {Mocha} mocha - Mocha instance - * @param {Object} opts - Command line options + * @param {Options} options - Command line options + * @param {Object} fileCollectParams - Parameters that control test + * file collection. See `lib/cli/collect-files.js`. + * @returns {Promise} + * @ignore + * @private + */ +const parallelRun = async (mocha, options, fileCollectParams) => { + const files = collectFiles(fileCollectParams); + debug( + 'executing %d test file(s) across %d concurrent jobs', + files.length, + options.jobs + ); + mocha.files = files; + + // note that we DO NOT load any files here; this is handled by the worker + return mocha.run(options.exit ? exitMocha : exitMochaLater); +}; + +/** + * Actually run tests. Delegates to one of four different functions: + * - `singleRun`: run tests in serial & exit + * - `watchRun`: run tests in serial, rerunning as files change + * - `parallelRun`: run tests in parallel & exit + * - `watchParallelRun`: run tests in parallel, rerunning as files change + * @param {Mocha} mocha - Mocha instance + * @param {Options} opts - Command line options * @private - * @returns {Promise} + * @returns {Promise} */ exports.runMocha = async (mocha, options) => { const { watch = false, extension = [], - exit = false, ignore = [], file = [], + parallel = false, recursive = false, sort = false, - spec = [], - watchFiles, - watchIgnore + spec = [] } = options; const fileCollectParams = { @@ -138,11 +166,14 @@ exports.runMocha = async (mocha, options) => { spec }; + let run; if (watch) { - watchRun(mocha, {watchFiles, watchIgnore}, fileCollectParams); + run = parallel ? watchParallelRun : watchRun; } else { - await singleRun(mocha, {exit}, fileCollectParams); + run = parallel ? parallelRun : singleRun; } + + return run(mocha, options, fileCollectParams); }; /** diff --git a/lib/cli/run-option-metadata.js b/lib/cli/run-option-metadata.js index 4648d9fbfe..da3b7d995d 100644 --- a/lib/cli/run-option-metadata.js +++ b/lib/cli/run-option-metadata.js @@ -42,11 +42,12 @@ exports.types = { 'list-interfaces', 'list-reporters', 'no-colors', + 'parallel', 'recursive', 'sort', 'watch' ], - number: ['retries'], + number: ['retries', 'jobs'], string: [ 'config', 'fgrep', @@ -75,7 +76,9 @@ exports.aliases = { growl: ['G'], ignore: ['exclude'], invert: ['i'], + jobs: ['j'], 'no-colors': ['C'], + parallel: ['p'], reporter: ['R'], 'reporter-option': ['reporter-options', 'O'], require: ['r'], diff --git a/lib/cli/run.js b/lib/cli/run.js index d024cbb0f2..634ea39e41 100644 --- a/lib/cli/run.js +++ b/lib/cli/run.js @@ -24,6 +24,7 @@ const {ONE_AND_DONES, ONE_AND_DONE_ARGS} = require('./one-and-dones'); const debug = require('debug')('mocha:cli:run'); const defaults = require('../mocharc'); const {types, aliases} = require('./run-option-metadata'); +const coreCount = require('os').cpus().length; /** * Logical option groups @@ -150,6 +151,14 @@ exports.builder = yargs => description: 'Inverts --grep and --fgrep matches', group: GROUPS.FILTERS }, + jobs: { + description: + 'Number of concurrent jobs for --parallel; use 1 to run in serial', + defaultDescription: '(number of CPU cores - 1)', + requiresArg: true, + group: GROUPS.RULES, + default: Math.max(2, coreCount - 1) + }, 'list-interfaces': { conflicts: Array.from(ONE_AND_DONE_ARGS), description: 'List built-in user interfaces & exit' @@ -169,6 +178,10 @@ exports.builder = yargs => normalize: true, requiresArg: true }, + parallel: { + description: 'Run tests in parallel', + group: GROUPS.RULES + }, recursive: { description: 'Look for tests in subdirectories', group: GROUPS.FILES @@ -271,6 +284,40 @@ exports.builder = yargs => ); } + if (argv.parallel) { + // yargs.conflicts() can't deal with `--file foo.js --no-parallel`, either + if (argv.file) { + throw createUnsupportedError( + '--parallel runs test files in a non-deterministic order, and is mutually exclusive with --file' + ); + } + + // or this + if (argv.sort) { + throw createUnsupportedError( + '--parallel runs test files in a non-deterministic order, and is mutually exclusive with --sort' + ); + } + + if (argv.reporter === 'progress') { + throw createUnsupportedError( + '--reporter=progress is mutually exclusive with --parallel' + ); + } + + if (argv.reporter === 'markdown') { + throw createUnsupportedError( + '--reporter=markdown is mutually exclusive with --parallel' + ); + } + + if (argv.reporter === 'json-stream') { + throw createUnsupportedError( + '--reporter=json-stream is mutually exclusive with --parallel' + ); + } + } + if (argv.compilers) { throw createUnsupportedError( `--compilers is DEPRECATED and no longer supported. diff --git a/lib/cli/watch-run.js b/lib/cli/watch-run.js index b35a906959..d77740dd93 100644 --- a/lib/cli/watch-run.js +++ b/lib/cli/watch-run.js @@ -1,5 +1,6 @@ 'use strict'; +const debug = require('debug')('mocha:cli:watch'); const path = require('path'); const chokidar = require('chokidar'); const Context = require('../context'); @@ -12,6 +13,42 @@ const collectFiles = require('./collect-files'); * @private */ +/** + * Run Mocha in parallel "watch" mode + * @param {Mocha} mocha - Mocha instance + * @param {Object} opts - Options + * @param {string[]} [opts.watchFiles] - List of paths and patterns to + * watch. If not provided all files with an extension included in + * `fileColletionParams.extension` are watched. See first argument of + * `chokidar.watch`. + * @param {string[]} opts.watchIgnore - List of paths and patterns to + * exclude from watching. See `ignored` option of `chokidar`. + * @param {FileCollectionOptions} fileCollectParams - Parameters that control test + * @private + */ +exports.watchParallelRun = ( + mocha, + {watchFiles, watchIgnore}, + fileCollectParams +) => { + debug('creating parallel watcher'); + return createWatcher(mocha, { + watchFiles, + watchIgnore, + beforeRun({mocha}) { + mocha.files = collectFiles(fileCollectParams); + // in parallel mode, the main Mocha process doesn't actually load the + // files. this flag prevents `mocha.run()` from autoloading. + mocha.lazyLoadFiles = true; + return mocha; + }, + afterRun({watcher}) { + blastCache(watcher); + }, + fileCollectParams + }); +}; + /** * Run Mocha in "watch" mode * @param {Mocha} mocha - Mocha instance @@ -22,27 +59,88 @@ const collectFiles = require('./collect-files'); * `chokidar.watch`. * @param {string[]} opts.watchIgnore - List of paths and patterns to * exclude from watching. See `ignored` option of `chokidar`. - * @param {Object} fileCollectParams - Parameters that control test + * @param {FileCollectionOptions} fileCollectParams - Parameters that control test * file collection. See `lib/cli/collect-files.js`. - * @param {string[]} fileCollectParams.extension - List of extensions - * to watch if `opts.watchFiles` is not given. * @private */ -module.exports = (mocha, {watchFiles, watchIgnore}, fileCollectParams) => { +exports.watchRun = (mocha, {watchFiles, watchIgnore}, fileCollectParams) => { + debug('creating serial watcher'); + // list of all test files + + return createWatcher(mocha, { + watchFiles, + watchIgnore, + beforeRun({mocha}) { + mocha.unloadFiles(); + + // I don't know why we're cloning the root suite. + const rootSuite = mocha.suite.clone(); + + // this `require` is needed because the require cache has been cleared. the dynamic + // exports set via the below call to `mocha.ui()` won't work properly if a + // test depends on this module (see `required-tokens.spec.js`). + const Mocha = require('../mocha'); + + // ... and now that we've gotten a new module, we need to use it again due + // to `mocha.ui()` call + const newMocha = new Mocha(mocha.options); + // don't know why this is needed + newMocha.suite = rootSuite; + // nor this + newMocha.suite.ctx = new Context(); + + // reset the list of files + newMocha.files = collectFiles(fileCollectParams); + + // because we've swapped out the root suite (see the `run` inner function + // in `createRerunner`), we need to call `mocha.ui()` again to set up the context/globals. + newMocha.ui(newMocha.options.ui); + + return newMocha; + }, + afterRun({watcher}) { + blastCache(watcher); + }, + fileCollectParams + }); +}; + +/** + * Bootstraps a chokidar watcher. Handles keyboard input & signals + * @param {Mocha} mocha - Mocha instance + * @param {Object} opts + * @param {BeforeWatchRun} [opts.beforeRun] - Function to call before + * `mocha.run()` + * @param {AfterWatchRun} [opts.afterRun] - Function to call after `mocha.run()` + * @param {string[]} [opts.watchFiles] - List of paths and patterns to watch. If + * not provided all files with an extension included in + * `fileColletionParams.extension` are watched. See first argument of + * `chokidar.watch`. + * @param {string[]} [opts.watchIgnore] - List of paths and patterns to exclude + * from watching. See `ignored` option of `chokidar`. + * @param {FileCollectionOptions} opts.fileCollectParams - List of extensions to watch if `opts.watchFiles` is not given. + * @returns {FSWatcher} + * @ignore + * @private + */ +const createWatcher = ( + mocha, + {watchFiles, watchIgnore, beforeRun, afterRun, fileCollectParams} +) => { if (!watchFiles) { watchFiles = fileCollectParams.extension.map(ext => `**/*.${ext}`); } + debug('ignoring files matching: %s', watchIgnore); + const watcher = chokidar.watch(watchFiles, { ignored: watchIgnore, ignoreInitial: true }); - const rerunner = createRerunner(mocha, () => { - getWatchedFiles(watcher).forEach(file => { - delete require.cache[file]; - }); - mocha.files = collectFiles(fileCollectParams); + const rerunner = createRerunner(mocha, watcher, { + beforeRun, + afterRun }); watcher.on('ready', () => { @@ -53,7 +151,6 @@ module.exports = (mocha, {watchFiles, watchIgnore}, fileCollectParams) => { rerunner.scheduleRun(); }); - console.log(); hideCursor(); process.on('exit', () => { showCursor(); @@ -74,36 +171,43 @@ module.exports = (mocha, {watchFiles, watchIgnore}, fileCollectParams) => { .toLowerCase(); if (str === 'rs') rerunner.scheduleRun(); }); + + return watcher; }; /** - * Create an object that allows you to rerun tests on the mocha - * instance. `beforeRun` is called everytime before `mocha.run()` is - * called. + * Create an object that allows you to rerun tests on the mocha instance. * * @param {Mocha} mocha - Mocha instance - * @param {function} beforeRun - Called just before `mocha.run()` + * @param {FSWatcher} watcher - chokidar `FSWatcher` instance + * @param {Object} [opts] - Options! + * @param {BeforeWatchRun} [opts.beforeRun] - Function to call before `mocha.run()` + * @param {AfterWatchRun} [opts.afterRun] - Function to call after `mocha.run()` + * @returns {Rerunner} + * @ignore + * @private */ -const createRerunner = (mocha, beforeRun) => { +const createRerunner = (mocha, watcher, {beforeRun, afterRun} = {}) => { // Set to a `Runner` when mocha is running. Set to `null` when mocha is not // running. let runner = null; + // true if a file has changed during a test run let rerunScheduled = false; const run = () => { - try { - beforeRun(); - resetMocha(mocha); - runner = mocha.run(() => { - runner = null; - if (rerunScheduled) { - rerun(); - } - }); - } catch (e) { - console.log(e.stack); - } + mocha = beforeRun ? beforeRun({mocha, watcher}) : mocha; + + runner = mocha.run(() => { + debug('finished watch run'); + runner = null; + afterRun && afterRun({mocha, watcher}); + if (rerunScheduled) { + rerun(); + } else { + debug('waiting for changes...'); + } + }); }; const scheduleRun = () => { @@ -136,32 +240,18 @@ const createRerunner = (mocha, beforeRun) => { * * @param watcher - Instance of a chokidar watcher * @return {string[]} - List of absolute paths + * @ignore + * @private */ const getWatchedFiles = watcher => { const watchedDirs = watcher.getWatched(); - let watchedFiles = []; - Object.keys(watchedDirs).forEach(dir => { - watchedFiles = watchedFiles.concat( - watchedDirs[dir].map(file => path.join(dir, file)) - ); - }); - return watchedFiles; -}; - -/** - * Reset the internal state of the mocha instance so that tests can be rerun. - * - * @param {Mocha} mocha - Mocha instance - * @private - */ -const resetMocha = mocha => { - mocha.unloadFiles(); - mocha.suite = mocha.suite.clone(); - mocha.suite.ctx = new Context(); - // Registers a callback on `mocha.suite` that wires new context to the DSL - // (e.g. `describe`) that is exposed as globals when the test files are - // reloaded. - mocha.ui(mocha.options.ui); + return Object.keys(watchedDirs).reduce( + (acc, dir) => [ + ...acc, + ...watchedDirs[dir].map(file => path.join(dir, file)) + ], + [] + ); }; /** @@ -189,3 +279,43 @@ const showCursor = () => { const eraseLine = () => { process.stdout.write('\u001b[2K'); }; + +/** + * Blast all of the watched files out of `require.cache` + * @param {FSWatcher} watcher - chokidar FSWatcher + * @ignore + * @private + */ +const blastCache = watcher => { + const files = getWatchedFiles(watcher); + files.forEach(file => { + delete require.cache[file]; + }); + debug('deleted %d file(s) from the require cache', files.length); +}; + +/** + * Callback to be run before `mocha.run()` is called. + * Optionally, it can return a new `Mocha` instance. + * @callback BeforeWatchRun + * @private + * @param {{mocha: Mocha, watcher: FSWatcher}} options + * @returns {Mocha} + */ + +/** + * Callback to be run after `mocha.run()` completes. Typically used to clear + * require cache. + * @callback AfterWatchRun + * @private + * @param {{mocha: Mocha, watcher: FSWatcher}} options + * @returns {void} + */ + +/** + * Object containing run control methods + * @typedef {Object} Rerunner + * @private + * @property {Function} run - Calls `mocha.run()` + * @property {Function} scheduleRun - Schedules another call to `run` + */ diff --git a/lib/hook.js b/lib/hook.js index 71440d23d0..4541775c5b 100644 --- a/lib/hook.js +++ b/lib/hook.js @@ -44,3 +44,26 @@ Hook.prototype.error = function(err) { this._error = err; }; + +/** + * Returns an object suitable for IPC. + * Functions are represented by keys beginning with `$$`. + * @returns {Object} + */ +Hook.prototype.serialize = function serialize() { + return { + $$isPending: this.isPending(), + $$titlePath: this.titlePath(), + ctx: { + currentTest: { + title: this.ctx && this.ctx.currentTest && this.ctx.currentTest.title + } + }, + parent: { + root: this.parent.root, + title: this.parent.title + }, + title: this.title, + type: this.type + }; +}; diff --git a/lib/mocha.js b/lib/mocha.js index 017daa1e2c..6b5561567c 100644 --- a/lib/mocha.js +++ b/lib/mocha.js @@ -90,6 +90,8 @@ exports.Test = require('./test'); * @param {number} [options.slow] - Slow threshold value. * @param {number|string} [options.timeout] - Timeout threshold value. * @param {string} [options.ui] - Interface name. + * @param {boolean} [options.parallel] - Run jobs in parallel + * @param {number} [options.jobs] - Max number of worker processes for parallel runs */ function Mocha(options) { options = utils.assign({}, mocharc, options || {}); @@ -136,6 +138,14 @@ function Mocha(options) { this[opt](); } }, this); + + if (options.parallel && options.jobs > 1) { + this._runner = require('./buffered-runner'); + this.lazyLoadFiles = true; + } else { + this._runner = exports.Runner; + this.lazyLoadFiles = false; + } } /** @@ -186,20 +196,20 @@ Mocha.prototype.addFile = function(file) { * // Use XUnit reporter and direct its output to file * mocha.reporter('xunit', { output: '/path/to/testspec.xunit.xml' }); */ -Mocha.prototype.reporter = function(reporter, reporterOptions) { - if (typeof reporter === 'function') { - this._reporter = reporter; +Mocha.prototype.reporter = function(reporterName, reporterOptions) { + if (typeof reporterName === 'function') { + this._reporter = reporterName; } else { - reporter = reporter || 'spec'; - var _reporter; + reporterName = reporterName || 'spec'; + var reporter; // Try to load a built-in reporter. - if (builtinReporters[reporter]) { - _reporter = builtinReporters[reporter]; + if (builtinReporters[reporterName]) { + reporter = builtinReporters[reporterName]; } // Try to load reporters from process.cwd() and node_modules - if (!_reporter) { + if (!reporter) { try { - _reporter = require(reporter); + reporter = require(reporterName); } catch (err) { if ( err.code !== 'MODULE_NOT_FOUND' || @@ -207,31 +217,31 @@ Mocha.prototype.reporter = function(reporter, reporterOptions) { ) { // Try to load reporters from a path (absolute or relative) try { - _reporter = require(path.resolve(process.cwd(), reporter)); + reporter = require(path.resolve(process.cwd(), reporterName)); } catch (_err) { _err.code !== 'MODULE_NOT_FOUND' || _err.message.indexOf('Cannot find module') !== -1 - ? console.warn(sQuote(reporter) + ' reporter not found') + ? console.warn(sQuote(reporterName) + ' reporter not found') : console.warn( - sQuote(reporter) + + sQuote(reporterName) + ' reporter blew up with error:\n' + err.stack ); } } else { console.warn( - sQuote(reporter) + ' reporter blew up with error:\n' + err.stack + sQuote(reporterName) + ' reporter blew up with error:\n' + err.stack ); } } } - if (!_reporter) { + if (!reporter) { throw createInvalidReporterError( - 'invalid reporter ' + sQuote(reporter), - reporter + 'invalid reporter ' + sQuote(reporterName), + reporterName ); } - this._reporter = _reporter; + this._reporter = reporter; } this.options.reporterOption = reporterOptions; // alias option name is used in public reporters xunit/tap/progress @@ -340,7 +350,7 @@ Mocha.prototype.loadFiles = function(fn) { Mocha.prototype.loadFilesAsync = function() { var self = this; var suite = this.suite; - this.loadAsync = true; + this.lazyLoadFiles = true; if (!esmUtils) { return new Promise(function(resolve) { @@ -825,13 +835,13 @@ Object.defineProperty(Mocha.prototype, 'version', { * mocha.run(failures => process.exitCode = failures ? 1 : 0); */ Mocha.prototype.run = function(fn) { - if (this.files.length && !this.loadAsync) { + if (this.files.length && !this.lazyLoadFiles) { this.loadFiles(); } var suite = this.suite; var options = this.options; options.files = this.files; - var runner = new exports.Runner(suite, options.delay); + var runner = new this._runner(suite, options.delay); createStatsCollector(runner); var reporter = new this._reporter(runner, options); runner.checkLeaks = options.checkLeaks === true; @@ -864,5 +874,5 @@ Mocha.prototype.run = function(fn) { } } - return runner.run(done); + return runner.run(done, {files: this.files, options: options}); }; diff --git a/lib/reporters/buffered.js b/lib/reporters/buffered.js new file mode 100644 index 0000000000..1bbca61d36 --- /dev/null +++ b/lib/reporters/buffered.js @@ -0,0 +1,128 @@ +'use strict'; +/** + * @module Buffered + */ +/** + * Module dependencies. + */ + +const { + EVENT_SUITE_BEGIN, + EVENT_SUITE_END, + EVENT_TEST_FAIL, + EVENT_TEST_PASS, + EVENT_TEST_PENDING, + EVENT_TEST_BEGIN, + EVENT_TEST_END, + EVENT_TEST_RETRY, + EVENT_DELAY_BEGIN, + EVENT_DELAY_END, + EVENT_HOOK_BEGIN, + EVENT_HOOK_END, + EVENT_RUN_END +} = require('../runner').constants; +const {SerializableEvent, SerializableWorkerResult} = require('../serializer'); +const debug = require('debug')('mocha:reporters:buffered'); +const Base = require('./base'); + +/** + * List of events to listen to; these will be buffered and sent + * when `Mocha#run` is complete (via {@link Buffered#done}). + */ +const EVENT_NAMES = [ + EVENT_SUITE_BEGIN, + EVENT_SUITE_END, + EVENT_TEST_BEGIN, + EVENT_TEST_PENDING, + EVENT_TEST_FAIL, + EVENT_TEST_PASS, + EVENT_TEST_RETRY, + EVENT_TEST_END, + EVENT_HOOK_BEGIN, + EVENT_HOOK_END +]; + +/** + * Like {@link EVENT_NAMES}, except we expect these events to only be emitted + * by the `Runner` once. + */ +const ONCE_EVENT_NAMES = [EVENT_DELAY_BEGIN, EVENT_DELAY_END]; + +/** + * The `Buffered` reporter is for use by concurrent runs. Instead of outputting + * to `STDOUT`, etc., it retains a list of events it receives and hands these + * off to the callback passed into {@link Mocha#run}. That callback will then + * return the data to the main process. + */ +class Buffered extends Base { + /** + * Listens for {@link Runner} events and retains them in an `events` instance prop. + * @param {Runner} runner + */ + constructor(runner, opts) { + super(runner, opts); + + /** + * Retained list of events emitted from the {@link Runner} instance. + * @type {BufferedEvent[]} + * @memberOf Buffered + */ + const events = (this.events = []); + + /** + * mapping of event names to listener functions we've created, + * so we can cleanly _remove_ them from the runner once it's completed. + */ + const listeners = new Map(); + + /** + * Creates a listener for event `eventName` and adds it to the `listeners` + * map. This is a defensive measure, so that we don't a) leak memory or b) + * remove _other_ listeners that may not be associated with this reporter. + * @param {string} eventName - Event name + */ + const createListener = eventName => + listeners + .set(eventName, (runnable, err) => { + events.push(SerializableEvent.create(eventName, runnable, err)); + }) + .get(eventName); + + EVENT_NAMES.forEach(evt => { + runner.on(evt, createListener(evt)); + }); + ONCE_EVENT_NAMES.forEach(evt => { + runner.once(evt, createListener(evt)); + }); + + runner.once(EVENT_RUN_END, () => { + debug('received EVENT_RUN_END'); + listeners.forEach((listener, evt) => { + runner.removeListener(evt, listener); + listeners.delete(evt); + }); + }); + } + + /** + * Calls the {@link Mocha#run} callback (`callback`) with the test failure + * count and the array of {@link BufferedEvent} objects. Resets the array. + * @param {number} failures - Number of failed tests + * @param {Function} callback - The callback passed to {@link Mocha#run}. + */ + done(failures, callback) { + callback(SerializableWorkerResult.create(this.events, failures)); + this.events = []; // defensive + } +} + +/** + * Serializable event data from a `Runner`. Keys of the `data` property + * beginning with `__` will be converted into a function which returns the value + * upon deserialization. + * @typedef {Object} BufferedEvent + * @property {string} name - Event name + * @property {object} data - Event parameters + */ + +module.exports = Buffered; diff --git a/lib/reporters/landing.js b/lib/reporters/landing.js index a6af946c42..ef4917c216 100644 --- a/lib/reporters/landing.js +++ b/lib/reporters/landing.js @@ -56,11 +56,12 @@ function Landing(runner, options) { var self = this; var width = (Base.window.width * 0.75) | 0; - var total = runner.total; var stream = process.stdout; + var plane = color('plane', '✈'); var crashed = -1; var n = 0; + var total = 0; function runway() { var buf = Array(width).join('-'); @@ -74,8 +75,7 @@ function Landing(runner, options) { runner.on(EVENT_TEST_END, function(test) { // check if the plane crashed - var col = crashed === -1 ? ((width * ++n) / total) | 0 : crashed; - + var col = crashed === -1 ? ((width * ++n) / ++total) | 0 : crashed; // show the crash if (test.state === STATE_FAILED) { plane = color('plane crash', '✈'); diff --git a/lib/reporters/tap.js b/lib/reporters/tap.js index 12257a745f..aa79fb19f1 100644 --- a/lib/reporters/tap.js +++ b/lib/reporters/tap.js @@ -50,9 +50,7 @@ function TAP(runner, options) { this._producer = createProducer(tapVersion); runner.once(EVENT_RUN_BEGIN, function() { - var ntests = runner.grepTotal(runner.suite); self._producer.writeVersion(); - self._producer.writePlan(ntests); }); runner.on(EVENT_TEST_END, function() { @@ -204,6 +202,7 @@ TAPProducer.prototype.writeEpilogue = function(stats) { println('# pass ' + stats.passes); // :TBD: Why are we not showing pending results? println('# fail ' + stats.failures); + this.writePlan(stats.passes + stats.failures + stats.pending); }; /** diff --git a/lib/runner.js b/lib/runner.js index c60e562a81..1636d53f3e 100644 --- a/lib/runner.js +++ b/lib/runner.js @@ -870,10 +870,11 @@ Runner.prototype.uncaughtEnd = function uncaughtEnd(err) { * * @public * @memberof Runner - * @param {Function} fn + * @param {Function} fn - Callback when finished + * @param {{files: string[], options: Options}} [opts] - For subclasses * @return {Runner} Runner instance. */ -Runner.prototype.run = function(fn) { +Runner.prototype.run = function(fn, opts) { var self = this; var rootSuite = this.suite; diff --git a/lib/serializer.js b/lib/serializer.js new file mode 100644 index 0000000000..db2166a415 --- /dev/null +++ b/lib/serializer.js @@ -0,0 +1,392 @@ +'use strict'; + +const {type} = require('./utils'); +const {createInvalidArgumentTypeError} = require('./errors'); +const debug = require('debug')('mocha:serializer'); + +const SERIALIZABLE_RESULT_NAME = 'SerializableWorkerResult'; +const SERIALIZABLE_TYPES = new Set(['object', 'array', 'function', 'error']); + +/** + * The serializable result of a test file run from a worker. + */ +class SerializableWorkerResult { + /** + * Creates instance props; of note, the `__type` prop. + * + * Note that the failure count is _redundant_ and could be derived from the + * list of events; but since we're already doing the work, might as well use + * it. + * @param {SerializableEvent[]} [events=[]] - Events to eventually serialize + * @param {number} [failureCount=0] - Failure count + */ + constructor(events = [], failureCount = 0) { + /** + * The number of failures in this run + * @type {number} + */ + this.failureCount = failureCount; + /** + * All relevant events emitted from the {@link Runner}. + * @type {SerializableEvent[]} + */ + this.events = events; + + /** + * Symbol-like value needed to distinguish when attempting to deserialize + * this object (once it's been received over IPC). + * @type {Readonly<"SerializableWorkerResult">} + */ + Object.defineProperty(this, '__type', { + value: SERIALIZABLE_RESULT_NAME, + enumerable: true, + writable: false + }); + } + + /** + * Instantiates a new {@link SerializableWorkerResult}. + * @param {...any} args - Args to constructor + * @returns {SerilizableWorkerResult} + */ + static create(...args) { + return new SerializableWorkerResult(...args); + } + + /** + * Serializes each {@link SerializableEvent} in our `events` prop; + * makes this object read-only. + * @returns {Readonly} + */ + serialize() { + this.events.forEach(event => { + event.serialize(); + }); + return Object.freeze(this); + } + + /** + * Deserializes a {@link SerializedWorkerResult} into something reporters can + * use; calls {@link SerializableEvent.deserialize} on each item in its + * `events` prop. + * @param {SerializedWorkerResult} obj + * @returns {SerializedWorkerResult} + */ + static deserialize(obj) { + obj.events.forEach(event => { + SerializableEvent.deserialize(event); + }); + return obj; + } + + /** + * Returns `true` if this is a {@link SerializedWorkerResult} or a + * {@link SerializableWorkerResult}. + * @param {*} value - A value to check + * @returns {boolean} If true, it's deserializable + */ + static isSerializedWorkerResult(value) { + return ( + value instanceof SerializableWorkerResult || + (type(value) === 'object' && value.__type === SERIALIZABLE_RESULT_NAME) + ); + } +} + +/** + * Represents an event, emitted by a {@link Runner}, which is to be transmitted + * over IPC. + * + * Due to the contents of the event data, it's not possible to send them + * verbatim. When received by the main process--and handled by reporters--these + * objects are expected to contain {@link Runnable} instances. This class + * provides facilities to perform the translation via serialization and + * deserialization. + */ +class SerializableEvent { + /** + * Constructs a `SerializableEvent`, throwing if we receive unexpected data. + * + * Practically, events emitted from `Runner` have a minumum of zero (0) + * arguments-- (for example, {@link Runnable.constants.EVENT_RUN_BEGIN}) and a + * maximum of two (2) (for example, + * {@link Runnable.constants.EVENT_TEST_FAIL}, where the second argument is an + * `Error`). The first argument, if present, is a {@link Runnable}. This + * constructor's arguments adhere to this convention. + * @param {string} eventName - A non-empty event name. + * @param {any} [originalValue] - Some data. Corresponds to extra arguments + * passed to `EventEmitter#emit`. + * @param {Error} [originalError] - An error, if there's an error. + * @throws If `eventName` is empty, or `originalValue` is a non-object. + */ + constructor(eventName, originalValue, originalError) { + if (!eventName) { + throw new Error('expected a non-empty `eventName` string argument'); + } + /** + * The event name. + * @memberof SerializableEvent + */ + this.eventName = eventName; + const originalValueType = type(originalValue); + if (originalValueType !== 'object' && originalValueType !== 'undefined') { + throw new Error( + `expected object, received [${originalValueType}]: ${originalValue}` + ); + } + /** + * An error, if present. + * @memberof SerializableEvent + */ + Object.defineProperty(this, 'originalError', { + value: originalError, + enumerable: false + }); + + /** + * The raw value. + * + * We don't want this value sent via IPC; making it non-enumerable will do that. + * + * @memberof SerializableEvent + */ + Object.defineProperty(this, 'originalValue', { + value: originalValue, + enumerable: false + }); + } + + /** + * In case you hated using `new` (I do). + * + * @param {...any} args - Args for {@link SerializableEvent#constructor}. + * @returns {SerializableEvent} A new `SerializableEvent` + */ + static create(...args) { + return new SerializableEvent(...args); + } + + /** + * Used internally by {@link SerilizableEvent#serialize}. + * @ignore + * @param {Array} pairs - List of parent/key tuples to process; modified in-place. This JSDoc type is an approximation + * @param {object} parent - Some parent object + * @param {string} key - Key to inspect + * @param {WeakSet} seenObjects - For avoiding circular references + */ + static _serialize(pairs, parent, key, seenObjects) { + let value = parent[key]; + if (seenObjects.has(value)) { + parent[key] = Object.create(null); + return; + } + if (type(value) === 'error' || value instanceof Error) { + // we need to reference the stack prop b/c it's lazily-loaded. + // `__type` is necessary for deserialization to create an `Error` later. + // `message` is apparently not enumerable, so we must handle it specifically. + value = Object.assign(Object.create(null), value, { + stack: value.stack, + message: value.message, + __type: 'Error' + }); + parent[key] = value; + // after this, the result of type(value) will be `object`, and we'll throw + // whatever other junk is in the original error into the new `value`. + } + switch (type(value)) { + case 'object': + if (type(value.serialize) === 'function') { + parent[key] = value.serialize(); + } else { + // by adding props to the `pairs` array, we will process it further + pairs.push( + ...Object.keys(value) + .filter(key => SERIALIZABLE_TYPES.has(type(value[key]))) + .map(key => [value, key]) + ); + } + break; + case 'function': + // we _may_ want to dig in to functions for some assertion libraries + // that might put a usable property on a function. + // for now, just zap it. + delete parent[key]; + break; + case 'array': + pairs.push( + ...value + .filter(value => SERIALIZABLE_TYPES.has(type(value))) + .map((value, index) => [value, index]) + ); + break; + } + } + + /** + * Modifies this object *in place* (for theoretical memory consumption & + * performance reasons); serializes `SerializableEvent#originalValue` (placing + * the result in `SerializableEvent#data`) and `SerializableEvent#error`. + * Freezes this object. The result is an object that can be transmitted over + * IPC. + * If this quickly becomes unmaintainable, we will want to move towards immutable + * objects post-haste. + */ + serialize() { + // given a parent object and a key, inspect the value and decide whether + // to replace it, remove it, or add it to our `pairs` array to further process. + // this is recursion in loop form. + const originalValue = this.originalValue; + const result = Object.assign(Object.create(null), { + data: + type(originalValue) === 'object' && + type(originalValue.serialize) === 'function' + ? originalValue.serialize() + : originalValue, + error: this.originalError + }); + + const pairs = Object.keys(result).map(key => [result, key]); + const seenObjects = new WeakSet(); + + let pair; + while ((pair = pairs.shift())) { + SerializableEvent._serialize(pairs, ...pair, seenObjects); + seenObjects.add(pair[0]); + } + + this.data = result.data; + this.error = result.error; + + return Object.freeze(this); + } + + /** + * Used internally by {@link SerializableEvent.deserialize}; creates an `Error` + * from an `Error`-like (serialized) object + * @ignore + * @param {Object} value - An Error-like value + * @returns {Error} Real error + */ + static _deserializeError(value) { + const error = new Error(value.message); + error.stack = value.stack; + Object.assign(error, value); + delete error.__type; + return error; + } + + /** + * Used internally by {@link SerializableEvent.deserialize}; recursively + * deserializes an object in-place. + * @param {object|Array} parent - Some object or array + * @param {string|number} key - Some prop name or array index within `parent` + */ + static _deserializeObject(parent, key) { + if (key === '__proto__') { + delete parent[key]; + return; + } + const value = parent[key]; + // keys beginning with `$$` are converted into functions returning the value + // and renamed, stripping the `$$` prefix. + // functions defined this way cannot be array members! + if (type(key) === 'string' && key.startsWith('$$')) { + const newKey = key.slice(2); + parent[newKey] = () => value; + delete parent[key]; + key = newKey; + } + if (type(value) === 'array') { + value.forEach((_, idx) => { + SerializableEvent._deserializeObject(value, idx); + }); + } else if (type(value) === 'object') { + if (value.__type === 'Error') { + parent[key] = SerializableEvent._deserializeError(value); + } else { + Object.keys(value).forEach(key => { + SerializableEvent._deserializeObject(value, key); + }); + } + } + } + + /** + * Deserialize value returned from a worker into something more useful. + * Does not return the same object. + * @todo - do this in a loop instead of with recursion (if necessary) + * @param {SerializedEvent} obj - Object returned from worker + * @returns {SerializedEvent} Deserialized result + */ + static deserialize(obj) { + if (!obj) { + throw createInvalidArgumentTypeError('Expected value', obj); + } + + obj = Object.assign(Object.create(null), obj); + + if (obj.data) { + Object.keys(obj.data).forEach(key => { + SerializableEvent._deserializeObject(obj.data, key); + }); + } + + if (obj.error) { + obj.error = SerializableEvent._deserializeError(obj.error); + } + + return obj; + } +} + +/** + * "Serializes" a value for transmission over IPC as a message. + * + * If value is an object and has a `serialize()` method, call that method; otherwise return the object and hope for the best. + * + * @param {*} [value] - A value to serialize + */ +exports.serialize = function serialize(value) { + const result = + type(value) === 'object' && type(value.serialize) === 'function' + ? value.serialize() + : value; + debug('serialized: %O', result); + return result; +}; + +/** + * "Deserializes" a "message" received over IPC. + * + * This could be expanded with other objects that need deserialization, + * but at present time we only care about {@link SerializableWorkerResult} objects. + * + * @param {*} [value] - A "message" to deserialize + */ +exports.deserialize = function deserialize(value) { + const result = SerializableWorkerResult.isSerializedWorkerResult(value) + ? SerializableWorkerResult.deserialize(value) + : value; + debug('deserialized: %O', result); + return result; +}; + +exports.SerializableEvent = SerializableEvent; +exports.SerializableWorkerResult = SerializableWorkerResult; + +/** + * The result of calling `SerializableEvent.serialize`, as received + * by the deserializer. + * @typedef {Object} SerializedEvent + * @property {object?} data - Optional serialized data + * @property {object?} error - Optional serialized `Error` + */ + +/** + * The result of calling `SerializableWorkerResult.serialize` as received + * by the deserializer. + * @typedef {Object} SerializedWorkerResult + * @property {number} failureCount - Number of failures + * @property {SerializedEvent[]} events - Serialized events + * @property {"SerializedWorkerResult"} __type - Symbol-like to denote the type of object this is + */ diff --git a/lib/suite.js b/lib/suite.js index 191d946b50..a8301cc809 100644 --- a/lib/suite.js +++ b/lib/suite.js @@ -549,6 +549,21 @@ Suite.prototype.cleanReferences = function cleanReferences() { } }; +/** + * Returns an object suitable for IPC. + * Functions are represented by keys beginning with `$$`. + * @returns {Object} + */ +Suite.prototype.serialize = function serialize() { + return { + _bail: this._bail, + $$fullTitle: this.fullTitle(), + $$isPending: this.isPending(), + root: this.root, + title: this.title + }; +}; + var constants = utils.defineConstants( /** * {@link Suite}-related constants. diff --git a/lib/test.js b/lib/test.js index 65122b260c..e40d4adb02 100644 --- a/lib/test.js +++ b/lib/test.js @@ -62,3 +62,29 @@ Test.prototype.clone = function() { test.ctx = this.ctx; return test; }; + +/** + * Returns an object suitable for IPC. + * Functions are represented by keys beginning with `$$`. + * @returns {Object} + */ +Test.prototype.serialize = function serialize() { + return { + $$currentRetry: this._currentRetry, + $$fullTitle: this.fullTitle(), + $$isPending: this.pending, + $$retriedTest: this._retriedTest || null, + $$slow: this._slow, + $$titlePath: this.titlePath(), + body: this.body, + duration: this.duration, + err: this.err, + parent: { + $$fullTitle: this.parent.fullTitle() + }, + speed: this.speed, + state: this.state, + title: this.title, + type: this.type + }; +}; diff --git a/lib/worker.js b/lib/worker.js new file mode 100644 index 0000000000..32cfaa246d --- /dev/null +++ b/lib/worker.js @@ -0,0 +1,114 @@ +'use strict'; + +const {createInvalidArgumentTypeError} = require('./errors'); +const workerpool = require('workerpool'); +const Mocha = require('./mocha'); +const { + handleRequires, + validatePlugin, + loadRootHooks +} = require('./cli/run-helpers'); +const debug = require('debug')(`mocha:parallel:worker:${process.pid}`); +const {serialize} = require('./serializer'); +const {setInterval, clearInterval} = global; + +const BUFFERED_REPORTER_PATH = require.resolve('./reporters/buffered'); + +let rootHooks; + +if (workerpool.isMainThread) { + throw new Error( + 'This script is intended to be run as a worker (by the `workerpool` package).' + ); +} + +/** + * Initializes some stuff on the first call to {@link run}. + * + * Handles `--require` and `--ui`. Does _not_ handle `--reporter`, + * as only the `Buffered` reporter is used. + * + * **This function only runs once per worker**; it overwrites itself with a no-op + * before returning. + * + * @param {Options} argv - Command-line options + */ +let bootstrap = async argv => { + const rawRootHooks = handleRequires(argv.require); + rootHooks = await loadRootHooks(rawRootHooks); + validatePlugin(argv, 'ui', Mocha.interfaces); + bootstrap = () => {}; +}; + +/** + * Runs a single test file in a worker thread. + * @param {string} filepath - Filepath of test file + * @param {Options} [argv] - Parsed command-line options object + * @returns {Promise<{failures: number, events: BufferedEvent[]}>} - Test + * failure count and list of events. + */ +async function run(filepath, argv = {ui: 'bdd'}) { + if (!filepath) { + throw createInvalidArgumentTypeError( + 'Expected a non-empty "filepath" argument', + 'file', + 'string' + ); + } + + debug('run(): running test file %s', filepath); + + const opts = Object.assign(argv, { + // workers only use the `Buffered` reporter. + reporter: BUFFERED_REPORTER_PATH, + // if this was true, it would cause infinite recursion. + parallel: false + }); + + await bootstrap(opts); + + opts.rootHooks = rootHooks; + + const mocha = new Mocha(opts).addFile(filepath); + + try { + await mocha.loadFilesAsync(); + } catch (err) { + debug('run(): could not load file %s: %s', filepath, err); + throw err; + } + + return new Promise((resolve, reject) => { + const t = setInterval(() => { + debug('run(): still running %s...', filepath); + }, 5000).unref(); + mocha.run(result => { + // Runner adds these; if we don't remove them, we'll get a leak. + process.removeAllListeners('uncaughtException'); + + try { + const serialized = serialize(result); + debug( + 'run(): completed run with %d test failures; returning to main process', + typeof result.failures === 'number' ? result.failures : 0 + ); + resolve(serialized); + } catch (err) { + // TODO: figure out exactly what the sad path looks like here. + // rejection should only happen if an error is "unrecoverable" + debug('run(): serialization failed; rejecting: %O', err); + reject(err); + } finally { + clearInterval(t); + } + }); + }); +} + +// this registers the `run` function. +workerpool.worker({run}); + +debug('started worker process'); + +// for testing +exports.run = run; diff --git a/package-scripts.js b/package-scripts.js index 5e0b1736ef..cdb2556d16 100644 --- a/package-scripts.js +++ b/package-scripts.js @@ -10,10 +10,19 @@ const path = require('path'); * @returns {string} Command string to be executed by nps. */ function test(testName, mochaParams) { - const coverageCommand = `nyc --no-clean --report-dir coverage/reports/${testName}`; + let coverageCommand = `nyc --no-clean --report-dir coverage/reports/${testName}`; const mochaCommand = `node ${path.join('bin', 'mocha')}`; // Include 'node' and path.join for Windows compatibility - if (process.env.CI && !/^only-/.test(testName)) { - mochaParams += ' --forbid-only'; + if (process.env.CI) { + // suppress coverage summaries in CI to reduce noise + coverageCommand += ' --reporter=json'; + if (!/^only-/.test(testName)) { + mochaParams += ' --forbid-only'; + } + mochaParams += ' --color'; + } + // this may _actually_ be supported in the future + if (process.env.MOCHA_PARALLEL === '0') { + mochaParams += ' --no-parallel'; } return `${ process.env.COVERAGE ? coverageCommand : '' @@ -23,7 +32,7 @@ function test(testName, mochaParams) { module.exports = { scripts: { build: { - script: `browserify -e browser-entry.js --plugin ./scripts/dedefine --ignore './lib/cli/*.js' --ignore "./lib/esm-utils.js" --ignore 'chokidar' --ignore 'fs' --ignore 'glob' --ignore 'path' --ignore 'supports-color' -o mocha.js`, + script: `browserify -e browser-entry.js --plugin ./scripts/dedefine --ignore './lib/cli/*.js' --ignore "./lib/esm-utils.js" --ignore 'chokidar' --ignore 'fs' --ignore 'glob' --ignore 'path' --ignore 'supports-color' --ignore './lib/buffered-runner.js' --ignore './lib/serializer.js' --ignore './lib/reporters/buffered.js' --ignore './lib/worker.js' -o mocha.js`, description: 'Build browser bundle' }, lint: { diff --git a/package.json b/package.json index d2a133a380..f7439b13ed 100644 --- a/package.json +++ b/package.json @@ -58,10 +58,12 @@ "minimatch": "3.0.4", "ms": "2.1.2", "object.assign": "4.1.0", + "promise.allsettled": "1.0.2", "strip-json-comments": "3.0.1", "supports-color": "7.1.0", "which": "2.0.2", "wide-align": "1.1.3", + "workerpool": "5.0.4", "yargs": "13.3.2", "yargs-parser": "13.1.2", "yargs-unparser": "1.6.0" @@ -120,8 +122,8 @@ "svgo": "^1.3.2", "through2": "^3.0.1", "to-vfile": "^6.1.0", - "unexpected": "^11.13.0", - "unexpected-eventemitter": "^1.1.3", + "unexpected": "^11.14.0", + "unexpected-eventemitter": "^2.1.0", "unexpected-sinon": "^10.11.2", "uslug": "^1.0.4", "watchify": "^3.11.1" @@ -149,7 +151,11 @@ "fs": false, "glob": false, "path": false, - "supports-color": false + "supports-color": false, + "./lib/serializer.js": false, + "./lib/reporters/buffered.js": false, + "./lib/buffered-reporter.js": false, + "./lib/worker.js": false }, "prettier": { "singleQuote": true, diff --git a/test/integration/fixtures/options/jobs/fail-in-parallel.fixture.js b/test/integration/fixtures/options/jobs/fail-in-parallel.fixture.js new file mode 100644 index 0000000000..4601ed330c --- /dev/null +++ b/test/integration/fixtures/options/jobs/fail-in-parallel.fixture.js @@ -0,0 +1,7 @@ +'use strict'; + +it('should fail if in a worker', function() { + if (/worker\.js$/.test(require.main.filename)) { + throw new Error('in worker!'); + } +}); \ No newline at end of file diff --git a/test/integration/fixtures/options/parallel/bail.fixture.js b/test/integration/fixtures/options/parallel/bail.fixture.js new file mode 100644 index 0000000000..77692d56d1 --- /dev/null +++ b/test/integration/fixtures/options/parallel/bail.fixture.js @@ -0,0 +1,9 @@ +describe('some suite', function() { + this.bail(true); + + it('should bail', function() { + throw new Error(); + }); + + it('will not get run', function() {}); +}); diff --git a/test/integration/fixtures/options/parallel/retries-a.fixture.js b/test/integration/fixtures/options/parallel/retries-a.fixture.js new file mode 100644 index 0000000000..e5d37ca561 --- /dev/null +++ b/test/integration/fixtures/options/parallel/retries-a.fixture.js @@ -0,0 +1,5 @@ +describe('retry suite A', function() { + it('should pass', function() { + + }); +}); \ No newline at end of file diff --git a/test/integration/fixtures/options/parallel/retries-b.fixture.js b/test/integration/fixtures/options/parallel/retries-b.fixture.js new file mode 100644 index 0000000000..c1f1d3f7ba --- /dev/null +++ b/test/integration/fixtures/options/parallel/retries-b.fixture.js @@ -0,0 +1,8 @@ +describe('retry suite B', function() { + let count = 0; + it('should retry', function() { + this.retries(3); + console.log(`count: ${++count}`); + throw new Error('failure'); + }); +}); \ No newline at end of file diff --git a/test/integration/fixtures/options/parallel/syntax-err.fixture.js b/test/integration/fixtures/options/parallel/syntax-err.fixture.js new file mode 100644 index 0000000000..8dd2b0f93b --- /dev/null +++ b/test/integration/fixtures/options/parallel/syntax-err.fixture.js @@ -0,0 +1 @@ +var foo = \ No newline at end of file diff --git a/test/integration/fixtures/options/parallel/test-a.fixture.js b/test/integration/fixtures/options/parallel/test-a.fixture.js new file mode 100644 index 0000000000..43f53bbda8 --- /dev/null +++ b/test/integration/fixtures/options/parallel/test-a.fixture.js @@ -0,0 +1,3 @@ +describe('a', function() { + it('should pass', function() {}); +}); diff --git a/test/integration/fixtures/options/parallel/test-b.fixture.js b/test/integration/fixtures/options/parallel/test-b.fixture.js new file mode 100644 index 0000000000..8e6437a56a --- /dev/null +++ b/test/integration/fixtures/options/parallel/test-b.fixture.js @@ -0,0 +1,3 @@ +describe('b', function() { + it('should be pending'); +}); diff --git a/test/integration/fixtures/options/parallel/test-c.fixture.js b/test/integration/fixtures/options/parallel/test-c.fixture.js new file mode 100644 index 0000000000..d06b6a3ee6 --- /dev/null +++ b/test/integration/fixtures/options/parallel/test-c.fixture.js @@ -0,0 +1,5 @@ +describe('c', function() { + it('should fail', function() { + throw new Error('failure'); + }); +}); diff --git a/test/integration/fixtures/options/parallel/test-d.fixture.js b/test/integration/fixtures/options/parallel/test-d.fixture.js new file mode 100644 index 0000000000..ee19d54594 --- /dev/null +++ b/test/integration/fixtures/options/parallel/test-d.fixture.js @@ -0,0 +1,7 @@ +describe('d', function() { + it('should pass, then fail', function() { + process.nextTick(function() { + throw new Error('uncaught!!'); + }); + }); +}); diff --git a/test/integration/fixtures/options/parallel/uncaught.fixture.js b/test/integration/fixtures/options/parallel/uncaught.fixture.js new file mode 100644 index 0000000000..bca1610ab7 --- /dev/null +++ b/test/integration/fixtures/options/parallel/uncaught.fixture.js @@ -0,0 +1,7 @@ +'use strict'; + +it('throws an uncaught exception', function (done) { + process.nextTick(function () { + throw new Error('existential isolation!!'); + }); +}); diff --git a/test/integration/helpers.js b/test/integration/helpers.js index 6cdf7e93cf..e08740bb20 100644 --- a/test/integration/helpers.js +++ b/test/integration/helpers.js @@ -178,11 +178,29 @@ function toJSONRunResult(result) { /** * Creates arguments loading a default fixture if none provided * + * - The `--no-color` arg is always used (color output complicates testing `STDOUT`) + * - Unless `--bail` or `--no-bail` is set, use `--no-bail`. This enables using + * `--bail` (if desired) from the command-line when running our integration + * test suites without stepping on the toes of subprocesses. + * - Unless `--parallel` or `--no-parallel` is set, use `--no-parallel`. We + * assume the test suite is _already_ running in parallel--and there's no point + * in trying to run a single test fixture in parallel. + * - The {@link DEFAULT_FIXTURE} file is used if no arguments are provided. + * * @param {string[]|*} [args] - Arguments to `spawn` * @returns string[] */ function defaultArgs(args) { - return !args || !args.length ? ['--file', DEFAULT_FIXTURE] : args; + var newArgs = (!args || !args.length ? [DEFAULT_FIXTURE] : args).concat([ + '--no-color' + ]); + if (!newArgs.some(arg => /--(no-)?bail/.test(arg))) { + newArgs.push('--no-bail'); + } + if (!newArgs.some(arg => /--(no-)?parallel/.test(arg))) { + newArgs.push('--no-parallel'); + } + return newArgs; } function invokeMocha(args, fn, opts) { diff --git a/test/integration/options/jobs.spec.js b/test/integration/options/jobs.spec.js new file mode 100644 index 0000000000..832d825298 --- /dev/null +++ b/test/integration/options/jobs.spec.js @@ -0,0 +1,34 @@ +'use strict'; + +var helpers = require('../helpers'); +var runMochaAsync = helpers.runMochaAsync; + +describe('--jobs', function() { + describe('when set to a number less than 2', function() { + it('should run tests in serial', function() { + return expect( + runMochaAsync( + 'options/jobs/fail-in-parallel', + ['--parallel', '--jobs', '1'], + 'pipe' + ), + 'when fulfilled', + 'to have passed' + ); + }); + }); + + describe('when set to a number greater than 1', function() { + it('should run tests in parallel', function() { + return expect( + runMochaAsync( + 'options/jobs/fail-in-parallel', + ['--parallel', '--jobs', '2'], + 'pipe' + ), + 'when fulfilled', + 'to have failed' + ); + }); + }); +}); diff --git a/test/integration/options/parallel.spec.js b/test/integration/options/parallel.spec.js new file mode 100644 index 0000000000..d860e35429 --- /dev/null +++ b/test/integration/options/parallel.spec.js @@ -0,0 +1,442 @@ +'use strict'; +var Mocha = require('../../../lib/mocha'); +var path = require('path'); +var helpers = require('../helpers'); +var runMochaAsync = helpers.runMochaAsync; +var invokeMochaAsync = helpers.invokeMochaAsync; +var getSummary = helpers.getSummary; +var utils = require('../../../lib/utils'); + +function compareReporters(reporter) { + this.timeout(Math.max(this.timeout(), 5000)); + return runMochaAsync(path.join('options', 'parallel', 'test-a.fixture.js'), [ + '--reporter', + reporter, + '--no-parallel' + ]).then(function(expected) { + expected.output = expected.output.replace(/\d+ms/g, /100ms/); + return runMochaAsync( + path.join('options', 'parallel', 'test-a.fixture.js'), + ['--reporter', reporter, '--parallel'] + ).then(function(actual) { + actual.output = actual.output.replace(/\d+ms/g, /100ms/); + return [actual, expected]; + }); + }); +} + +function runGenericReporterTest(reporter) { + return compareReporters.call(this, reporter).then(function(result) { + var expected = result.shift(); + var actual = result.shift(); + return expect(actual, 'to satisfy', { + passing: expected.passing, + failing: expected.failing, + pending: expected.pending, + code: expected.code, + output: expected.output + }); + }); +} + +describe('--parallel', function() { + describe('when a test has a syntax error', function() { + describe('when there is only a single test file', function() { + it('should fail gracefully', function() { + return expect( + runMochaAsync('options/parallel/syntax-err', ['--parallel']), + 'when fulfilled', + 'to have failed with output', + /SyntaxError/ + ); + }); + }); + + describe('when there are multiple test files', function() { + it('should fail gracefully', function() { + return expect( + invokeMochaAsync( + [ + require.resolve( + '../fixtures/options/parallel/syntax-err.fixture.js' + ), + '--parallel' + ], + 'pipe' + )[1], + 'when fulfilled', + 'to have failed' + ); + }); + }); + }); + + describe('when used with CJS tests', function() { + it('should have the same result as with --no-parallel', function() { + this.timeout(Math.max(this.timeout(), 5000)); + return runMochaAsync( + path.join('options', 'parallel', 'test-*.fixture.js'), + ['--no-parallel'] + ).then(function(expected) { + return expect( + runMochaAsync(path.join('options', 'parallel', 'test-*.fixture.js'), [ + '--parallel' + ]), + 'to be fulfilled with value satisfying', + { + passing: expected.passing, + failing: expected.failing, + pending: expected.pending, + code: expected.code + } + ); + }); + }); + }); + + describe('when used with ESM tests', function() { + var esmArgs = + Number(process.versions.node.split('.')[0]) >= 13 + ? [] + : ['--experimental-modules']; + + before(function() { + if (!utils.supportsEsModules()) this.skip(); + }); + + it('should have the same result as with --no-parallel', function() { + this.timeout(Math.min(this.timeout(), 5000)); + + var args = [ + path.join(__dirname, '..', 'fixtures', 'esm', '*.fixture.mjs') + ].concat(esmArgs); + return invokeMochaAsync(args.concat('--no-parallel'))[1].then(function( + expected + ) { + var expectedSummary = getSummary(expected); + return invokeMochaAsync(args.concat('--parallel'))[1].then(function( + actual + ) { + var actualSummary = getSummary(actual); + expect(actualSummary, 'to satisfy', { + pending: expectedSummary.pending, + passing: expectedSummary.passing, + failing: expectedSummary.failing + }); + }); + }); + }); + }); + + describe('when used with --retries', function() { + it('should retry tests appropriately', function() { + return expect( + runMochaAsync( + path.join('options', 'parallel', 'retries-*.fixture.js'), + ['--parallel'] + ), + 'when fulfilled', + 'to have failed' + ) + .and('when fulfilled', 'to have passed test count', 1) + .and('when fulfilled', 'to have pending test count', 0) + .and('when fulfilled', 'to have failed test count', 1) + .and('when fulfilled', 'to contain output', /count: 3/); + }); + }); + + describe('when used with --allow-uncaught', function() { + it('should bubble up an exception', function() { + return expect( + invokeMochaAsync( + [ + require.resolve('../fixtures/options/parallel/uncaught.fixture.js'), + '--parallel', + '--allow-uncaught' + ], + 'pipe' + )[1], + 'when fulfilled', + 'to contain output', + /Error: existential isolation/i + ).and('when fulfilled', 'to have exit code', 1); + }); + }); + + describe('when used with --file', function() { + it('should error out', function() { + return expect( + invokeMochaAsync( + [ + '--file', + path.join('options', 'parallel', 'test-a.fixture.js'), + '--parallel' + ], + 'pipe' + )[1], + 'when fulfilled', + 'to contain output', + /mutually exclusive with --file/ + ); + }); + }); + + describe('when used with --sort', function() { + it('should error out', function() { + return expect( + invokeMochaAsync( + [ + '--sort', + path.join('options', 'parallel', 'test-*.fixture.js'), + '--parallel' + ], + 'pipe' + )[1], + 'when fulfilled', + 'to contain output', + /mutually exclusive with --sort/ + ); + }); + }); + + describe('when used with --bail', function() { + it('should skip some tests', function() { + return runMochaAsync( + path.join('options', 'parallel', 'test-*.fixture.js'), + ['--parallel', '--bail'] + ).then(function(result) { + // we don't know _exactly_ how many tests will be skipped here + // due to the --bail, but the number of tests completed should be + // less than the total, which is 5. + return expect( + result.passing + result.pending + result.failing, + 'to be less than', + 5 + ); + }); + }); + + it('should fail', function() { + return expect( + runMochaAsync(path.join('options', 'parallel', 'test-*.fixture.js'), [ + '--parallel', + '--bail' + ]), + 'when fulfilled', + 'to have failed' + ); + }); + }); + + describe('when encountering a "bail" in context', function() { + it('should skip some tests', function() { + return runMochaAsync('options/parallel/bail', ['--parallel']).then( + function(result) { + return expect( + result.passing + result.pending + result.failing, + 'to be less than', + 2 + ); + } + ); + }); + + it('should fail', function() { + return expect( + runMochaAsync('options/parallel/bail', ['--parallel', '--bail']), + 'when fulfilled', + 'to have failed' + ); + }); + }); + + describe('reporter equivalence', function() { + // each reporter name is duplicated; one is in all lower-case + // 'base' is abstract, 'html' is browser-only, others are incompatible + var DENY = ['progress', 'base', 'html', 'markdown', 'json-stream']; + Object.keys(Mocha.reporters) + .filter(function(name) { + return /^[a-z]/.test(name) && DENY.indexOf(name) === -1; + }) + .forEach(function(reporter) { + describe( + 'when multiple test files run with --reporter=' + reporter, + function() { + it('should have the same result as when run with --no-parallel', function() { + // note that the output may not be in the same order, as running file + // order is non-deterministic in parallel mode + this.timeout(Math.max(this.timeout(), 5000)); + return runMochaAsync( + path.join('options', 'parallel', 'test-*.fixture.js'), + ['--reporter', reporter, '--no-parallel'] + ).then(function(expected) { + return expect( + runMochaAsync( + path.join('options', 'parallel', 'test-*.fixture.js'), + ['--reporter', reporter, '--parallel'] + ), + 'to be fulfilled with value satisfying', + { + passing: expected.passing, + failing: expected.failing, + pending: expected.pending, + code: expected.code + } + ); + }); + }); + } + ); + }); + }); + + describe('when a single test file is run with --reporter=dot', function() { + it('should have the same output as when run with --no-parallel', function() { + return runGenericReporterTest.call(this, 'dot'); + }); + }); + + describe('when a single test file is run with --reporter=doc', function() { + it('should have the same output as when run with --no-parallel', function() { + return runGenericReporterTest.call(this, 'doc'); + }); + }); + + describe('when a single test file is run with --reporter=tap', function() { + it('should have the same output as when run with --no-parallel', function() { + return runGenericReporterTest.call(this, 'tap'); + }); + }); + + describe('when a single test file is run with --reporter=list', function() { + it('should have the same output as when run with --no-parallel', function() { + return runGenericReporterTest.call(this, 'list'); + }); + }); + + describe('when a single test file is run with --reporter=min', function() { + it('should have the same output as when run with --no-parallel', function() { + return runGenericReporterTest.call(this, 'min'); + }); + }); + + describe('when a single test file is run with --reporter=spec', function() { + it('should have the same output as when run with --no-parallel', function() { + return runGenericReporterTest.call(this, 'spec'); + }); + }); + + describe('when used with --reporter=nyan', function() { + it('should have the same output as when run with --no-parallel', function() { + return runGenericReporterTest.call(this, 'nyan'); + }); + }); + + describe('when a single test file is run with --reporter=landing', function() { + it('should have the same output as when run with --no-parallel', function() { + return runGenericReporterTest.call(this, 'landing'); + }); + }); + + describe('when a single test file is run with --reporter=progress', function() { + it('should fail due to incompatibility', function() { + return expect( + invokeMochaAsync( + [ + require.resolve('../fixtures/options/parallel/test-a.fixture.js'), + '--reporter=progress', + '--parallel' + ], + 'pipe' + )[1], + 'when fulfilled', + 'to have failed' + ).and('when fulfilled', 'to contain output', /mutually exclusive/); + }); + }); + + describe('when a single test file is run with --reporter=markdown', function() { + it('should fail due to incompatibility', function() { + return expect( + invokeMochaAsync( + [ + require.resolve('../fixtures/options/parallel/test-a.fixture.js'), + '--reporter=markdown', + '--parallel' + ], + 'pipe' + )[1], + 'when fulfilled', + 'to have failed' + ).and('when fulfilled', 'to contain output', /mutually exclusive/); + }); + }); + + describe('when a single test file is run with --reporter=json-stream', function() { + it('should fail due to incompatibility', function() { + return expect( + invokeMochaAsync( + [ + require.resolve('../fixtures/options/parallel/test-a.fixture.js'), + '--reporter=json-stream', + '--parallel' + ], + 'pipe' + )[1], + 'when fulfilled', + 'to have failed' + ).and('when fulfilled', 'to contain output', /mutually exclusive/); + }); + }); + + describe('when a single test file is run with --reporter=json', function() { + it('should have the same output as when run with --no-parallel', function() { + // this one has some timings/durations that we can safely ignore + return compareReporters.call(this, 'json').then(function(result) { + var expected = result.shift(); + expected.output = JSON.parse(expected.output); + var actual = result.shift(); + actual.output = JSON.parse(actual.output); + return expect(actual, 'to satisfy', { + passing: expected.passing, + failing: expected.failing, + pending: expected.pending, + code: expected.code, + output: { + stats: { + suites: expected.output.stats.suites, + tests: expected.output.stats.tests, + passes: expected.output.stats.passes, + pending: expected.output.stats.pending, + failures: expected.output.stats.failures + }, + tests: expected.tests + } + }); + }); + }); + }); + + describe('when a single test file is run with --reporter=xunit', function() { + it('should have the same output as when run with --no-parallel', function() { + // durations need replacing + return compareReporters.call(this, 'xunit').then(function(result) { + var expected = result.shift(); + expected.output = expected.output + .replace(/time=".+?"/g, 'time="0.5"') + .replace(/timestamp=".+?"/g, 'timestamp="some-timestamp'); + var actual = result.shift(); + actual.output = actual.output + .replace(/time=".+?"/g, 'time="0.5"') + .replace(/timestamp=".+?"/g, 'timestamp="some-timestamp'); + return expect(actual, 'to satisfy', { + passing: expected.passing, + failing: expected.failing, + pending: expected.pending, + code: expected.code, + output: expected.output + }); + }); + }); + }); +}); diff --git a/test/node-unit/buffered-runner.spec.js b/test/node-unit/buffered-runner.spec.js new file mode 100644 index 0000000000..0330c62241 --- /dev/null +++ b/test/node-unit/buffered-runner.spec.js @@ -0,0 +1,596 @@ +'use strict'; + +const { + EVENT_RUN_BEGIN, + EVENT_TEST_PASS, + EVENT_TEST_FAIL, + EVENT_SUITE_END, + EVENT_SUITE_BEGIN +} = require('../../lib/runner').constants; +const rewiremock = require('rewiremock/node'); +const BUFFERED_RUNNER_PATH = require.resolve('../../lib/buffered-runner.js'); +const Suite = require('../../lib/suite'); +const {createSandbox} = require('sinon'); + +describe('buffered-runner', function() { + describe('BufferedRunner', function() { + let sandbox; + let pool; + let run; + let terminate; + let BufferedRunner; + let suite; + let warn; + let cpuCount; + + beforeEach(function() { + sandbox = createSandbox(); + cpuCount = 1; + suite = new Suite('a root suite', {}, true); + warn = sandbox.stub(); + + // tests will want to further define the behavior of these. + run = sandbox.stub(); + terminate = sandbox.stub(); + + pool = sandbox.stub().returns({ + proxy: sandbox.stub().resolves({ + run + }), + terminate, + stats: sandbox.stub().returns({}) + }); + BufferedRunner = rewiremock.proxy(BUFFERED_RUNNER_PATH, r => ({ + workerpool: { + pool + }, + os: { + cpus: sandbox.stub().callsFake(() => new Array(cpuCount)) + }, + [require.resolve('../../lib/utils')]: r.with({warn}).callThrough() + })); + }); + + describe('instance method', function() { + describe('run', function() { + let runner; + + beforeEach(function() { + runner = new BufferedRunner(suite); + }); + + // the purpose of this is to ensure that--despite using `Promise`s + // internally--`BufferedRunner#run` does not return a `Promise`. + it('should be chainable', function(done) { + expect(runner.run(done, {files: [], options: {}}), 'to be', runner); + }); + + it('should emit `EVENT_RUN_BEGIN`', async function() { + return expect( + () => + new Promise(resolve => { + runner.run(resolve, {files: [], options: {}}); + }), + 'to emit from', + runner, + EVENT_RUN_BEGIN + ); + }); + + describe('when a worker fails', function() { + it('should recover', function(done) { + const options = {}; + run.withArgs('some-file.js', options).rejects(new Error('whoops')); + run.withArgs('some-other-file.js', options).resolves({ + failureCount: 0, + events: [ + { + eventName: EVENT_TEST_PASS, + data: { + title: 'some test' + } + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite' + } + } + ] + }); + + runner.run( + () => { + expect(terminate, 'to have calls satisfying', [{args: []}]); + done(); + }, + { + files: ['some-file.js', 'some-other-file.js'], + options + } + ); + }); + + it('should delegate to Runner#uncaught', function(done) { + const options = {}; + sandbox.spy(runner, 'uncaught'); + const err = new Error('whoops'); + run.withArgs('some-file.js', options).rejects(new Error('whoops')); + run.withArgs('some-other-file.js', options).resolves({ + failureCount: 0, + events: [ + { + eventName: EVENT_TEST_PASS, + data: { + title: 'some test' + } + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite' + } + } + ] + }); + + runner.run( + () => { + expect(runner.uncaught, 'to have a call satisfying', [err]); + done(); + }, + { + files: ['some-file.js', 'some-other-file.js'], + options + } + ); + }); + }); + + describe('when not provided a max job count', function() { + it('should use a max job count based on CPU cores', function(done) { + runner.run( + () => { + expect(pool, 'to have a call satisfying', { + args: [ + expect.it('to be a', 'string'), + { + maxWorkers: Math.max(cpuCount - 1, 1) + } + ] + }); + done(); + }, + {files: [], options: {}} + ); + }); + }); + + describe('when provided a max job count', function() { + beforeEach(function() { + cpuCount = 8; + }); + + it('should use the provided max count', function(done) { + runner.run( + () => { + expect(pool, 'to have a call satisfying', { + args: [ + expect.it('to be a', 'string'), + { + maxWorkers: 4 + } + ] + }); + done(); + }, + { + files: [], + options: { + jobs: 4 + } + } + ); + }); + + describe('when the max job count exceeds the CPU count', function() { + it('should warn', function(done) { + run.resolves({failureCount: 0, events: []}); + runner.run( + () => { + expect(warn, 'to have a call satisfying', [ + /only enough cores available/ + ]); + done(); + }, + { + files: [], + options: {jobs: 16} + } + ); + }); + }); + + describe('when there are not enough CPU cores', function() { + beforeEach(function() { + cpuCount = 2; + }); + + it('should warn', function(done) { + run.resolves({failureCount: 0, events: []}); + runner.run( + () => { + expect(warn, 'to have a call satisfying', [ + /avoid --parallel on this machine/ + ]); + done(); + }, + { + files: [], + options: {jobs: 4} + } + ); + }); + }); + }); + + describe('when suite should bail', function() { + describe('when no event contains an error', function() { + it('should not force-terminate', function(done) { + run.resolves({ + failureCount: 0, + events: [ + { + eventName: EVENT_SUITE_BEGIN, + data: { + title: 'some suite', + _bail: true + } + }, + { + eventName: EVENT_TEST_PASS, + data: { + title: 'some test' + } + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite', + _bail: true + } + } + ] + }); + + runner.run( + () => { + expect(terminate, 'to have a call satisfying', { + args: [] + }).and('was called once'); + done(); + }, + { + files: ['some-file.js', 'some-other-file.js'], + options: {} + } + ); + }); + }); + + describe('when an event contains an error and has positive failures', function() { + describe('when subsequent files have not yet been run', function() { + it('should cleanly terminate the thread pool', function(done) { + const options = {}; + const err = { + __type: 'Error', + message: 'oh no' + }; + run.withArgs('some-file.js', options).resolves({ + failureCount: 1, + events: [ + { + eventName: EVENT_SUITE_BEGIN, + data: { + title: 'some suite', + _bail: true + } + }, + { + eventName: EVENT_TEST_FAIL, + data: { + title: 'some test' + }, + error: err + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite', + _bail: true + } + } + ] + }); + run.withArgs('some-other-file.js', options).rejects(); + + runner.run( + () => { + expect(terminate, 'to have calls satisfying', [ + {args: []}, // this is the pool force-terminating + {args: []} // this will always be called, and will do nothing due to the previous call + ]).and('was called twice'); + done(); + }, + { + files: ['some-file.js', 'some-other-file.js'], + options + } + ); + }); + }); + describe('when subsequent files already started running', function() { + it('should cleanly terminate the thread pool', function(done) { + const options = {}; + const err = { + __type: 'Error', + message: 'oh no' + }; + run.withArgs('some-file.js', options).resolves({ + failureCount: 1, + events: [ + { + eventName: EVENT_SUITE_BEGIN, + data: { + title: 'some suite', + _bail: true + } + }, + { + eventName: EVENT_TEST_FAIL, + data: { + title: 'some test' + }, + error: err + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite', + _bail: true + } + } + ] + }); + run.withArgs('some-other-file.js', options).resolves({ + failureCount: 0, + events: [ + { + eventName: EVENT_SUITE_BEGIN, + data: { + title: 'some suite' + } + }, + { + eventName: EVENT_TEST_PASS, + data: { + title: 'some test' + } + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite' + } + } + ] + }); + + runner.run( + () => { + expect(terminate, 'to have calls satisfying', [ + {args: []}, // this is the pool force-terminating + {args: []} // this will always be called, and will do nothing due to the previous call + ]).and('was called twice'); + done(); + }, + { + files: ['some-file.js', 'some-other-file.js'], + options + } + ); + }); + }); + }); + }); + + describe('when a suite has a bail flag', function() { + describe('when no event contains an error', function() { + it('should not force-terminate', function(done) { + run.resolves({ + failureCount: 0, + events: [ + { + eventName: EVENT_TEST_PASS, + data: { + title: 'some test' + } + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite', + _bail: true + } + } + ] + }); + + runner.run( + () => { + expect(terminate, 'to have a call satisfying', { + args: [] + }).and('was called once'); + done(); + }, + { + files: ['some-file.js', 'some-other-file.js'], + options: {} + } + ); + }); + }); + describe('when an event contains an error and has positive failures', function() { + describe('when subsequent files have not yet been run', function() { + it('should cleanly terminate the thread pool', function(done) { + const options = {}; + const err = { + __type: 'Error', + message: 'oh no' + }; + run.withArgs('some-file.js', options).resolves({ + failureCount: 1, + events: [ + { + eventName: EVENT_TEST_FAIL, + data: { + title: 'some test' + }, + error: err + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite', + _bail: true + } + } + ] + }); + run.withArgs('some-other-file.js', options).rejects(); + + runner.run( + () => { + expect(terminate, 'to have calls satisfying', [ + {args: []}, // this is the pool force-terminating + {args: []} // this will always be called, and will do nothing due to the previous call + ]).and('was called twice'); + done(); + }, + { + files: ['some-file.js', 'some-other-file.js'], + options + } + ); + }); + }); + + describe('when subsequent files already started running', function() { + it('should cleanly terminate the thread pool', function(done) { + const options = {}; + const err = { + __type: 'Error', + message: 'oh no' + }; + run.withArgs('some-file.js', options).resolves({ + failureCount: 1, + events: [ + { + eventName: EVENT_TEST_FAIL, + data: { + title: 'some test' + }, + error: err + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite', + _bail: true + } + } + ] + }); + run.withArgs('some-other-file.js', options).resolves({ + failureCount: 0, + events: [ + { + eventName: EVENT_TEST_PASS, + data: { + title: 'some test' + } + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite' + } + } + ] + }); + + runner.run( + () => { + expect(terminate, 'to have calls satisfying', [ + {args: []}, // this is the pool force-terminating + {args: []} // this will always be called, and will do nothing due to the previous call + ]).and('was called twice'); + done(); + }, + { + files: ['some-file.js', 'some-other-file.js'], + options + } + ); + }); + }); + + describe('when subsequent files have not yet been run', function() { + it('should cleanly terminate the thread pool', function(done) { + const options = {}; + const err = { + __type: 'Error', + message: 'oh no' + }; + run.withArgs('some-file.js', options).resolves({ + failureCount: 1, + events: [ + { + eventName: EVENT_TEST_FAIL, + data: { + title: 'some test' + }, + error: err + }, + { + eventName: EVENT_SUITE_END, + data: { + title: 'some suite', + _bail: true + } + } + ] + }); + run.withArgs('some-other-file.js', options).rejects(); + + runner.run( + () => { + expect(terminate, 'to have calls satisfying', [ + {args: []}, // this is the pool force-terminating + {args: []} // this will always be called, and will do nothing due to the previous call + ]).and('was called twice'); + done(); + }, + { + files: ['some-file.js', 'some-other-file.js'], + options + } + ); + }); + }); + }); + }); + }); + }); + }); +}); diff --git a/test/node-unit/serializer.spec.js b/test/node-unit/serializer.spec.js new file mode 100644 index 0000000000..437f133a4e --- /dev/null +++ b/test/node-unit/serializer.spec.js @@ -0,0 +1,555 @@ +'use strict'; + +const {createSandbox} = require('sinon'); +const { + serialize, + deserialize, + SerializableEvent, + SerializableWorkerResult +} = require('../../lib/serializer'); + +describe('serializer', function() { + let sandbox; + + beforeEach(function() { + sandbox = createSandbox(); + }); + + afterEach(function() { + sandbox.restore(); + }); + + describe('function', function() { + describe('serialize', function() { + describe('when passed a non-object value', function() { + it('should return the value', function() { + expect(serialize('knees & toes'), 'to be', 'knees & toes'); + }); + }); + + describe('when passed an object value', function() { + describe('w/o a `serialize` method', function() { + it('should return the value', function() { + const obj = {}; + expect(serialize(obj), 'to be', obj); + }); + }); + + describe('having a `serialize` method', function() { + it('should return the result of the `serialize` method', function() { + const serializedObj = {foo: 'bar'}; + const obj = {serialize: sandbox.stub().returns(serializedObj)}; + expect(serialize(obj), 'to be', serializedObj); + }); + }); + }); + + describe('when not passed anything', function() { + it('should return `undefined`', function() { + expect(serialize(), 'to be undefined'); + }); + }); + }); + + describe('deserialize', function() { + describe('when passed nothing', function() { + it('should return `undefined`', function() { + expect(deserialize(), 'to be undefined'); + }); + }); + + describe('when passed a non-object value', function() { + it('should return the value', function() { + expect(deserialize(500), 'to be', 500); + }); + }); + + describe('when passed an object value which is not a SerializedWorkerResult', function() { + it('should return the value', function() { + const obj = {}; + expect(deserialize(obj), 'to be', obj); + }); + }); + + describe('when passed a SerializedWorkerResult object', function() { + // note that SerializedWorkerResult is an interface (typedef), not a class. + + it('should return the result of `SerializableWorkerResult.deserialize` called on the value', function() { + const obj = Object.assign({}, SerializableWorkerResult.create()); + sandbox + .stub(SerializableWorkerResult, 'deserialize') + .returns('butts'); + deserialize(obj); + expect( + SerializableWorkerResult.deserialize, + 'to have a call satisfying', + { + args: [obj], + returned: 'butts' + } + ); + }); + }); + }); + }); + + describe('SerializableEvent', function() { + describe('constructor', function() { + describe('when called without `eventName`', function() { + it('should throw', function() { + expect( + () => new SerializableEvent(), + 'to throw', + /expected a non-empty `eventName`/ + ); + }); + }); + + describe('when called with a non-object `rawObject`', function() { + it('should throw', function() { + expect( + () => new SerializableEvent('blub', 'glug'), + 'to throw', + /expected object, received \[string\]/ + ); + }); + }); + }); + + describe('instance method', function() { + describe('serialize', function() { + it('should mutate the instance in-place', function() { + const evt = SerializableEvent.create('foo'); + expect(evt.serialize(), 'to be', evt); + }); + + it('should freeze the instance', function() { + expect( + Object.isFrozen(SerializableEvent.create('foo').serialize()), + 'to be true' + ); + }); + + describe('when passed an object with a `serialize` method', function() { + it('should call the `serialize` method', function() { + const obj = { + serialize: sandbox.stub() + }; + SerializableEvent.create('some-event', obj).serialize(); + expect(obj.serialize, 'was called once'); + }); + }); + + describe('when passed an object containing an object with a `serialize` method', function() { + it('should call the `serialize` method', function() { + const stub = sandbox.stub(); + const obj = { + nested: { + serialize: stub + } + }; + SerializableEvent.create('some-event', obj).serialize(); + expect(stub, 'was called once'); + }); + }); + + describe('when passed an object containing a non-`serialize` method', function() { + it('should remove the method', function() { + const obj = { + func: () => {} + }; + + expect( + SerializableEvent.create('some-event', obj).serialize(), + 'to satisfy', + { + data: expect.it('not to have property', 'func') + } + ); + }); + }); + + describe('when passed an object containing an array', function() { + it('should serialize the array', function() { + const obj = { + list: [{herp: 'derp'}, {bing: 'bong'}] + }; + expect( + SerializableEvent.create('some-event', obj).serialize(), + 'to satisfy', + {data: {list: [{herp: 'derp'}, {bing: 'bong'}]}} + ); + }); + }); + + describe('when passed an error', function() { + it('should serialize the error', function() { + const obj = {}; + const err = new Error('monkeypants'); + expect( + SerializableEvent.create('some-event', obj, err).serialize(), + 'to satisfy', + { + eventName: 'some-event', + error: { + message: 'monkeypants', + stack: /^Error: monkeypants/, + __type: 'Error' + }, + data: obj + } + ); + }); + + it('should retain own props', function() { + const obj = {}; + const err = new Error('monkeypants'); + err.code = 'MONKEY'; + expect( + SerializableEvent.create('some-event', obj, err).serialize(), + 'to satisfy', + { + eventName: 'some-event', + error: { + code: 'MONKEY', + message: 'monkeypants', + stack: /^Error: monkeypants/, + __type: 'Error' + }, + data: obj + } + ); + }); + + it('should not retain not-own props', function() { + const obj = {}; + const err = new Error('monkeypants'); + // eslint-disable-next-line no-proto + err.__proto__.code = 'MONKEY'; + expect( + SerializableEvent.create('some-event', obj, err).serialize(), + 'to satisfy', + { + eventName: 'some-event', + error: { + message: 'monkeypants', + stack: /^Error: monkeypants/, + __type: 'Error' + }, + data: obj + } + ); + }); + }); + + describe('when passed an object containing a top-level prop with an Error value', function() { + it('should serialize the Error', function() { + const obj = { + monkeyError: new Error('pantsmonkey') + }; + const evt = SerializableEvent.create('some-event', obj); + expect(evt.serialize(), 'to satisfy', { + eventName: 'some-event', + data: { + monkeyError: { + message: 'pantsmonkey', + stack: /^Error: pantsmonkey/, + __type: 'Error' + } + } + }); + }); + }); + describe('when passed an object containing a nested prop with an Error value', function() { + it('should serialize the Error', function() { + const obj = { + nestedObj: { + monkeyError: new Error('pantsmonkey') + } + }; + const evt = SerializableEvent.create('some-event', obj); + expect(evt.serialize(), 'to satisfy', { + eventName: 'some-event', + data: { + nestedObj: { + monkeyError: { + message: 'pantsmonkey', + stack: /^Error: pantsmonkey/, + __type: 'Error' + } + } + } + }); + }); + }); + }); + }); + + describe('static method', function() { + describe('deserialize', function() { + describe('when passed a falsy parameter', function() { + it('should throw "invalid arg type" error', function() { + expect(SerializableEvent.deserialize, 'to throw', { + code: 'ERR_MOCHA_INVALID_ARG_TYPE' + }); + }); + }); + + it('should return a new object w/ null prototype', function() { + const obj = {bob: 'bob'}; + expect(SerializableEvent.deserialize(obj), 'to satisfy', obj) + .and('not to equal', obj) + .and('not to have property', 'constructor'); + }); + + describe('when passed value contains `data` prop', function() { + it('should ignore __proto__', function() { + const obj = { + data: Object.create(null) + }; + // eslint-disable-next-line no-proto + obj.data.__proto__ = {peaches: 'prunes'}; + + const expected = Object.assign(Object.create(null), { + data: Object.create(null) + }); + expect(SerializableEvent.deserialize(obj), 'to equal', expected); + }); + + describe('when `data` prop contains a nested serialized Error prop', function() { + it('should create an Error instance from the nested serialized Error prop', function() { + const message = 'problems!'; + const stack = 'problem instructions'; + const code = 'EIEIO'; + const expected = Object.assign(Object.create(null), { + data: { + whoops: Object.assign(new Error(message), { + stack, + code + }) + } + }); + + expect( + SerializableEvent.deserialize({ + data: { + whoops: { + message, + stack, + code, + __type: 'Error' + } + } + }), + 'to equal', + expected + ); + }); + }); + }); + + describe('when passed value contains an `error` prop', function() { + it('should create an Error instance from the prop', function() { + const message = 'problems!'; + const stack = 'problem instructions'; + const code = 'EIEIO'; + const expected = Object.assign(Object.create(null), { + error: Object.assign(new Error(message), { + stack, + code + }) + }); + + expect( + SerializableEvent.deserialize({ + error: { + message, + stack, + code, + __type: 'Error' + } + }), + 'to equal', + expected + ); + }); + }); + + describe('when passed value data contains a prop beginning with "$$"', function() { + let result; + + beforeEach(function() { + result = SerializableEvent.deserialize({data: {$$foo: 'bar'}}); + }); + it('should create a new prop having a function value', function() { + expect(result, 'to satisfy', { + data: { + foo: expect.it('to be a function') + } + }); + }); + + it('should create a new prop returning the original value', function() { + expect(result.data.foo(), 'to equal', 'bar'); + }); + + it('should remove the prop with the "$$" prefix', function() { + expect(result, 'not to have property', '$$foo'); + }); + }); + + describe('when the value data contains a prop with an array value', function() { + beforeEach(function() { + sandbox.spy(SerializableEvent, '_deserializeObject'); + }); + + it('should deserialize each prop', function() { + const obj = {data: {foo: [{bar: 'baz'}]}}; + SerializableEvent.deserialize(obj); + expect( + SerializableEvent._deserializeObject, + 'to have a call satisfying', + { + args: [obj.data.foo, 0] + } + ); + }); + }); + }); + + describe('create', function() { + it('should instantiate a SerializableEvent', function() { + expect( + SerializableEvent.create('some-event'), + 'to be a', + SerializableEvent + ); + }); + }); + }); + }); + + describe('SerializableWorkerResult', function() { + describe('static method', function() { + describe('create', function() { + it('should return a new SerializableWorkerResult instance', function() { + expect( + SerializableWorkerResult.create(), + 'to be a', + SerializableWorkerResult + ); + }); + }); + + describe('isSerializedWorkerResult', function() { + describe('when passed an instance', function() { + it('should return `true`', function() { + expect( + SerializableWorkerResult.isSerializedWorkerResult( + new SerializableWorkerResult() + ), + 'to be true' + ); + }); + }); + + describe('when passed an object with an appropriate `__type` prop', function() { + it('should return `true`', function() { + // this is the most likely use-case, as the object is transmitted over IPC + // and loses its prototype + const original = new SerializableWorkerResult(); + const clone = Object.assign({}, original); + expect( + SerializableWorkerResult.isSerializedWorkerResult(clone), + 'to be true' + ); + }); + }); + + describe('when passed an object without an appropriate `__type` prop', function() { + it('should return `false`', function() { + expect( + SerializableWorkerResult.isSerializedWorkerResult({ + mister: 'mister' + }), + 'to be false' + ); + }); + }); + }); + + describe('deserialize', function() { + beforeEach(function() { + sandbox.stub(SerializableEvent, 'deserialize'); + }); + + it('should call SerializableEvent#deserialize on each item in its `events` prop', function() { + const result = Object.assign( + {}, + SerializableWorkerResult.create([ + {eventName: 'foo'}, + {eventName: 'bar'} + ]) + ); + SerializableWorkerResult.deserialize(result); + expect(SerializableEvent.deserialize, 'to have calls satisfying', [ + {args: [{eventName: 'foo'}]}, + {args: [{eventName: 'bar'}]} + ]); + }); + + it('should return the deserialized value', function() { + const result = Object.assign( + {}, + SerializableWorkerResult.create([ + {eventName: 'foo'}, + {eventName: 'bar'} + ]) + ); + expect( + SerializableWorkerResult.deserialize(result), + 'to equal', + result + ); + }); + }); + }); + + describe('instance method', function() { + describe('serialize', function() { + it('should return a read-only value', function() { + expect( + Object.isFrozen(SerializableWorkerResult.create().serialize()), + 'to be true' + ); + }); + + it('should call `SerializableEvent#serialize` of each of its events', function() { + sandbox.spy(SerializableEvent.prototype, 'serialize'); + const events = [ + SerializableEvent.create('foo'), + SerializableEvent.create('bar') + ]; + SerializableWorkerResult.create(events).serialize(); + expect( + SerializableEvent.prototype.serialize, + 'to have calls satisfying', + [{thisValue: events[0]}, {thisValue: events[1]}] + ); + }); + }); + }); + describe('constructor', function() { + // the following two tests should be combined into one, but not sure how to express + // as a single assertion + + it('should add a readonly `__type` prop', function() { + expect( + new SerializableWorkerResult(), + 'to have readonly property', + '__type' + ); + }); + }); + }); +}); diff --git a/test/node-unit/worker.spec.js b/test/node-unit/worker.spec.js new file mode 100644 index 0000000000..94c3ceef18 --- /dev/null +++ b/test/node-unit/worker.spec.js @@ -0,0 +1,189 @@ +'use strict'; + +const {SerializableWorkerResult} = require('../../lib/serializer'); +const rewiremock = require('rewiremock/node'); +const {createSandbox} = require('sinon'); + +const WORKER_PATH = require.resolve('../../lib/worker.js'); + +describe('worker', function() { + let worker; + let workerpoolWorker; + let sandbox; + + beforeEach(function() { + sandbox = createSandbox(); + workerpoolWorker = sandbox.stub(); + sandbox.spy(process, 'removeAllListeners'); + }); + + describe('when run as main "thread"', function() { + it('should throw', function() { + expect(() => { + rewiremock.proxy(WORKER_PATH, { + workerpool: { + isMainThread: true, + worker: workerpoolWorker + } + }); + }, 'to throw'); + }); + }); + + describe('when run as "worker thread"', function() { + class MockMocha {} + let serializer; + let runHelpers; + + beforeEach(function() { + MockMocha.prototype.addFile = sandbox.stub().returnsThis(); + MockMocha.prototype.loadFilesAsync = sandbox.stub(); + MockMocha.prototype.run = sandbox.stub(); + MockMocha.interfaces = { + bdd: sandbox.stub() + }; + + serializer = { + serialize: sandbox.stub() + }; + + runHelpers = { + handleRequires: sandbox.stub(), + validatePlugin: sandbox.stub(), + loadRootHooks: sandbox.stub().resolves() + }; + + worker = rewiremock.proxy(WORKER_PATH, { + workerpool: { + isMainThread: false, + worker: workerpoolWorker + }, + '../../lib/mocha': MockMocha, + '../../lib/serializer': serializer, + '../../lib/cli/run-helpers': runHelpers + }); + }); + + it('should register itself with workerpool', function() { + expect(workerpoolWorker, 'to have a call satisfying', [ + {run: worker.run} + ]); + }); + + describe('function', function() { + describe('run', function() { + describe('when called without arguments', function() { + it('should reject', async function() { + return expect(worker.run, 'to be rejected with error satisfying', { + code: 'ERR_MOCHA_INVALID_ARG_TYPE' + }); + }); + }); + + describe('when called with empty "filepath" argument', function() { + it('should reject', async function() { + return expect( + () => worker.run(''), + 'to be rejected with error satisfying', + { + code: 'ERR_MOCHA_INVALID_ARG_TYPE' + } + ); + }); + }); + + describe('when the file at "filepath" argument is unloadable', function() { + it('should reject', async function() { + MockMocha.prototype.loadFilesAsync.rejects(); + return expect( + () => worker.run('some-non-existent-file.js'), + 'to be rejected' + ); + }); + }); + + describe('when the file at "filepath" is loadable', function() { + let result; + beforeEach(function() { + result = SerializableWorkerResult.create(); + + MockMocha.prototype.loadFilesAsync.resolves(); + MockMocha.prototype.run.yields(result); + }); + + it('should handle "--require"', async function() { + await worker.run('some-file.js', {require: 'foo'}); + expect(runHelpers.handleRequires, 'to have a call satisfying', [ + 'foo' + ]).and('was called once'); + }); + + it('should handle "--ui"', async function() { + const argv = {}; + await worker.run('some-file.js', argv); + + expect(runHelpers.validatePlugin, 'to have a call satisfying', [ + argv, + 'ui', + MockMocha.interfaces + ]).and('was called once'); + }); + + it('should call Mocha#run', async function() { + await worker.run('some-file.js'); + expect(MockMocha.prototype.run, 'was called once'); + }); + + it('should remove all uncaughtException listeners', async function() { + await worker.run('some-file.js'); + expect(process.removeAllListeners, 'to have a call satisfying', [ + 'uncaughtException' + ]); + }); + + describe('when serialization succeeds', function() { + beforeEach(function() { + serializer.serialize.returnsArg(0); + }); + + it('should resolve with a SerializedWorkerResult', async function() { + return expect( + worker.run('some-file.js'), + 'to be fulfilled with', + result + ); + }); + }); + + describe('when serialization fails', function() { + beforeEach(function() { + serializer.serialize.throws(); + }); + + it('should reject', async function() { + return expect(worker.run('some-file.js'), 'to be rejected'); + }); + }); + + describe('when run twice', function() { + it('should initialize only once', async function() { + await worker.run('some-file.js'); + await worker.run('some-other-file.js'); + + expect(runHelpers, 'to satisfy', { + handleRequires: expect.it('was called once'), + validatePlugin: expect.it('was called once') + }); + }); + }); + }); + }); + }); + }); + + afterEach(function() { + sandbox.restore(); + // this is needed due to `require.cache` getting dumped in watch mode + process.removeAllListeners('beforeExit'); + }); +}); diff --git a/test/reporters/buffered.spec.js b/test/reporters/buffered.spec.js new file mode 100644 index 0000000000..981b939fd8 --- /dev/null +++ b/test/reporters/buffered.spec.js @@ -0,0 +1,230 @@ +'use strict'; + +// this reporter does not actually output anything to the terminal, so we +// need to test it differently. + +const { + EVENT_SUITE_BEGIN, + EVENT_SUITE_END, + EVENT_TEST_FAIL, + EVENT_TEST_PASS, + EVENT_TEST_PENDING, + EVENT_TEST_BEGIN, + EVENT_TEST_END, + EVENT_TEST_RETRY, + EVENT_DELAY_BEGIN, + EVENT_DELAY_END, + EVENT_HOOK_BEGIN, + EVENT_HOOK_END, + EVENT_RUN_END +} = require('../../lib/runner').constants; +const {EventEmitter} = require('events'); +const {createSandbox} = require('sinon'); +const rewiremock = require('rewiremock/node'); + +describe('Buffered', function() { + let sandbox; + let runner; + let Buffered; + + beforeEach(function() { + sandbox = createSandbox(); + runner = new EventEmitter(); + Buffered = rewiremock.proxy( + require.resolve('../../lib/reporters/buffered.js'), + { + '../../lib/serializer': { + SerializableEvent: { + create: (eventName, runnable, err) => ({ + eventName, + data: runnable, + error: err, + __type: 'MockSerializableEvent' + }) + }, + SerializableWorkerResult: { + create: (events, failures) => ({ + events, + failures, + __type: 'MockSerializableWorkerResult' + }) + } + }, + '../../lib/reporters/base': class MockBase {} + } + ); + }); + + afterEach(function() { + sandbox.restore(); + }); + + describe('constructor', function() { + it('should listen for Runner events', function() { + // EventEmitter#once calls thru to EventEmitter#on, which + // befouls our assertion below. + sandbox.stub(runner, 'once'); + sandbox.stub(runner, 'on'); + // eslint-disable-next-line no-new + new Buffered(runner); + expect(runner.on, 'to have calls satisfying', [ + // via Buffered + [EVENT_SUITE_BEGIN, expect.it('to be a function')], + [EVENT_SUITE_END, expect.it('to be a function')], + [EVENT_TEST_BEGIN, expect.it('to be a function')], + [EVENT_TEST_PENDING, expect.it('to be a function')], + [EVENT_TEST_FAIL, expect.it('to be a function')], + [EVENT_TEST_PASS, expect.it('to be a function')], + [EVENT_TEST_RETRY, expect.it('to be a function')], + [EVENT_TEST_END, expect.it('to be a function')], + [EVENT_HOOK_BEGIN, expect.it('to be a function')], + [EVENT_HOOK_END, expect.it('to be a function')] + ]); + }); + + it('should listen for Runner events expecting to occur once', function() { + sandbox.stub(runner, 'once'); + // eslint-disable-next-line no-new + new Buffered(runner); + expect(runner.once, 'to have calls satisfying', [ + [EVENT_DELAY_BEGIN, expect.it('to be a function')], + [EVENT_DELAY_END, expect.it('to be a function')], + [EVENT_RUN_END, expect.it('to be a function')] + ]); + }); + }); + + describe('event', function() { + let reporter; + + beforeEach(function() { + reporter = new Buffered(runner); + }); + + describe('on EVENT_RUN_END', function() { + it('should remove all listeners', function() { + runner.emit(EVENT_RUN_END); + expect(runner.listeners(), 'to be empty'); + }); + }); + + describe('on any other event listened for', function() { + it('should populate its `events` array with SerializableEvents', function() { + const suite = { + title: 'some suite' + }; + const test = { + title: 'some test' + }; + runner.emit(EVENT_SUITE_BEGIN, suite); + runner.emit(EVENT_TEST_BEGIN, test); + runner.emit(EVENT_TEST_PASS, test); + runner.emit(EVENT_TEST_END, test); + runner.emit(EVENT_SUITE_END, suite); + expect(reporter.events, 'to equal', [ + { + eventName: EVENT_SUITE_BEGIN, + data: suite, + __type: 'MockSerializableEvent' + }, + { + eventName: EVENT_TEST_BEGIN, + data: test, + __type: 'MockSerializableEvent' + }, + { + eventName: EVENT_TEST_PASS, + data: test, + __type: 'MockSerializableEvent' + }, + { + eventName: EVENT_TEST_END, + data: test, + __type: 'MockSerializableEvent' + }, + { + eventName: EVENT_SUITE_END, + data: suite, + __type: 'MockSerializableEvent' + } + ]); + }); + }); + }); + + describe('instance method', function() { + let reporter; + + beforeEach(function() { + reporter = new Buffered(runner); + }); + + describe('done', function() { + it('should execute its callback with a SerializableWorkerResult', function() { + const suite = { + title: 'some suite' + }; + const test = { + title: 'some test' + }; + runner.emit(EVENT_SUITE_BEGIN, suite); + runner.emit(EVENT_TEST_BEGIN, test); + runner.emit(EVENT_TEST_PASS, test); + runner.emit(EVENT_TEST_END, test); + runner.emit(EVENT_SUITE_END, suite); + const cb = sandbox.stub(); + reporter.done(0, cb); + expect(cb, 'to have a call satisfying', [ + { + events: [ + { + eventName: EVENT_SUITE_BEGIN, + data: suite, + __type: 'MockSerializableEvent' + }, + { + eventName: EVENT_TEST_BEGIN, + data: test, + __type: 'MockSerializableEvent' + }, + { + eventName: EVENT_TEST_PASS, + data: test, + __type: 'MockSerializableEvent' + }, + { + eventName: EVENT_TEST_END, + data: test, + __type: 'MockSerializableEvent' + }, + { + eventName: EVENT_SUITE_END, + data: suite, + __type: 'MockSerializableEvent' + } + ], + failures: 0, + __type: 'MockSerializableWorkerResult' + } + ]); + }); + + it('should reset its `events` prop', function() { + const suite = { + title: 'some suite' + }; + const test = { + title: 'some test' + }; + runner.emit(EVENT_SUITE_BEGIN, suite); + runner.emit(EVENT_TEST_BEGIN, test); + runner.emit(EVENT_TEST_PASS, test); + runner.emit(EVENT_TEST_END, test); + runner.emit(EVENT_SUITE_END, suite); + const cb = sandbox.stub(); + reporter.done(0, cb); + expect(reporter.events, 'to be empty'); + }); + }); + }); +}); diff --git a/test/reporters/tap.spec.js b/test/reporters/tap.spec.js index f3bfe8d473..01294e0430 100644 --- a/test/reporters/tap.spec.js +++ b/test/reporters/tap.spec.js @@ -40,29 +40,17 @@ describe('TAP reporter', function() { describe('event handlers', function() { describe("on 'start' event", function() { var expectedSuite = 'some suite'; - var expectedTotal = 10; - var expectedString; var stdout = []; before(function() { var runner = createMockRunner('start', EVENT_RUN_BEGIN); runner.suite = expectedSuite; - runner.grepTotal = function(string) { - expectedString = string; - return expectedTotal; - }; stdout = runReporter({}, runner, options); }); it('should not write a TAP specification version', function() { expect(stdout, 'not to contain', 'TAP version'); }); - - it('should write the number of tests that it plans to run', function() { - var expectedArray = ['1..' + expectedTotal + '\n']; - expect(stdout, 'to equal', expectedArray); - expect(expectedString, 'to be', expectedSuite); - }); }); describe("on 'pending' event", function() { @@ -78,7 +66,6 @@ describe('TAP reporter', function() { test ); runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -102,7 +89,6 @@ describe('TAP reporter', function() { test ); runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -141,7 +127,6 @@ describe('TAP reporter', function() { } }; runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -171,7 +156,6 @@ describe('TAP reporter', function() { error ); runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -209,7 +193,6 @@ describe('TAP reporter', function() { } }; runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -245,7 +228,6 @@ describe('TAP reporter', function() { } }; runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -271,11 +253,10 @@ describe('TAP reporter', function() { test ); runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); - it('should write total tests, passes, and failures', function() { + it('should write total tests, passes, failures, & plan', function() { var numberOfPasses = 1; var numberOfFails = 1; var totalTests = numberOfPasses + numberOfFails; @@ -284,7 +265,8 @@ describe('TAP reporter', function() { 'not ok ' + numberOfFails + ' ' + expectedTitle + '\n', '# tests ' + totalTests + '\n', '# pass ' + numberOfPasses + '\n', - '# fail ' + numberOfFails + '\n' + '# fail ' + numberOfFails + '\n', + '1..' + totalTests + '\n' ]; expect(stdout, 'to equal', expectedArray); }); @@ -302,17 +284,11 @@ describe('TAP reporter', function() { describe('event handlers', function() { describe("on 'start' event", function() { var expectedSuite = 'some suite'; - var expectedTotal = 10; - var expectedString; var stdout; before(function() { var runner = createMockRunner('start', EVENT_RUN_BEGIN); runner.suite = expectedSuite; - runner.grepTotal = function(string) { - expectedString = string; - return expectedTotal; - }; stdout = runReporter({}, runner, options); }); @@ -321,12 +297,6 @@ describe('TAP reporter', function() { var expectedFirstLine = 'TAP version ' + tapVersion + '\n'; expect(stdout[0], 'to equal', expectedFirstLine); }); - - it('should write the number of tests that it plans to run', function() { - var expectedSecondLine = '1..' + expectedTotal + '\n'; - expect(stdout[1], 'to equal', expectedSecondLine); - expect(expectedString, 'to be', expectedSuite); - }); }); describe("on 'pending' event", function() { @@ -342,7 +312,6 @@ describe('TAP reporter', function() { test ); runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -366,7 +335,6 @@ describe('TAP reporter', function() { test ); runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -405,7 +373,6 @@ describe('TAP reporter', function() { } }; runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -438,7 +405,6 @@ describe('TAP reporter', function() { error ); runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -479,7 +445,6 @@ describe('TAP reporter', function() { } }; runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -519,7 +484,6 @@ describe('TAP reporter', function() { } }; runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); @@ -545,11 +509,10 @@ describe('TAP reporter', function() { test ); runner.suite = ''; - runner.grepTotal = noop; stdout = runReporter({}, runner, options); }); - it('should write total tests, passes, and failures', function() { + it('should write total tests, passes, failures & plan', function() { var numberOfPasses = 1; var numberOfFails = 1; var totalTests = numberOfPasses + numberOfFails; @@ -558,7 +521,8 @@ describe('TAP reporter', function() { 'not ok ' + numberOfFails + ' ' + expectedTitle + '\n', '# tests ' + totalTests + '\n', '# pass ' + numberOfPasses + '\n', - '# fail ' + numberOfFails + '\n' + '# fail ' + numberOfFails + '\n', + '1..' + totalTests + '\n' ]; expect(stdout, 'to equal', expectedArray); });