From f82ce59d710e8b5cc30b9a9b07a9da3e8e715768 Mon Sep 17 00:00:00 2001 From: David Himmelstrup Date: Tue, 23 Aug 2022 15:25:17 +0200 Subject: [PATCH] Remove critcmp code (it belongs in cargo-criterion) (#610) * Delete critcmp code (it belongs in cargo-criterion) * Bump MSRV to 1.57 due to os_str_bytes. * Mention MSRV bump in CHANGELOG. --- .github/workflows/ci.yaml | 2 +- CHANGELOG.md | 2 +- Cargo.toml | 3 - README.md | 2 +- book/src/SUMMARY.md | 3 +- book/src/user_guide/tabulating_results.md | 294 ---------------------- src/critcmp/app.rs | 150 ----------- src/critcmp/data.rs | 227 ----------------- src/critcmp/main.rs | 131 ---------- src/critcmp/mod.rs | 5 - src/critcmp/output.rs | 234 ----------------- src/lib.rs | 75 ------ 12 files changed, 4 insertions(+), 1124 deletions(-) delete mode 100644 book/src/user_guide/tabulating_results.md delete mode 100644 src/critcmp/app.rs delete mode 100644 src/critcmp/data.rs delete mode 100644 src/critcmp/main.rs delete mode 100644 src/critcmp/mod.rs delete mode 100644 src/critcmp/output.rs diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index d4f3d417..d7f39e49 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -17,7 +17,7 @@ jobs: rust: - stable - beta - - 1.56.1 # MSRV + - 1.57 # MSRV steps: - uses: actions/checkout@v2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 89f4d80a..053bda9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,7 +20,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0. - HTML report hidden behind non-default feature flag: 'html_reports' - Standalone support (ie without cargo-criterion) feature flag: 'cargo_bench_support' -- MSRV bumped to 1.56.1 +- MSRV bumped to 1.57 - `rayon` and `plotters` are optional (and default) dependencies. - Status messages ('warming up', 'analyzing', etc) are printed to stderr, benchmark results are printed to stdout. - Accept subsecond durations for `--warm-up-time`, `--measurement-time` and `--profile-time`. diff --git a/Cargo.toml b/Cargo.toml index 75b37950..6470b962 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,9 +33,6 @@ cast = "0.3" num-traits = { version = "0.2", default-features = false, features = ["std"] } oorandom = "11.1" regex = { version = "1.5", default-features = false, features = ["std"] } -tabwriter = "1.2.1" -termcolor = "1.1.2" -unicode-width = "0.1.9" # Optional dependencies rayon = { version = "1.3", optional = true } diff --git a/README.md b/README.md index a243e983..a4dfc091 100644 --- a/README.md +++ b/README.md @@ -119,7 +119,7 @@ For more details, see the [CONTRIBUTING.md file](https://github.com/bheisler/cri Criterion.rs supports the last three stable minor releases of Rust. At time of writing, this means Rust 1.59 or later. Older versions may work, but are not guaranteed. -Currently, the oldest version of Rust believed to work is 1.56.1. Future versions of Criterion.rs may +Currently, the oldest version of Rust believed to work is 1.57. Future versions of Criterion.rs may break support for such old versions, and this will not be considered a breaking change. If you require Criterion.rs to work on old versions of Rust, you will need to stick to a specific patch version of Criterion.rs. diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 25bcd1f8..b7268009 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -20,7 +20,6 @@ - [Custom Test Framework](./user_guide/custom_test_framework.md) - [Benchmarking async functions](./user_guide/benchmarking_async.md) - [Quick Mode](./user_guide/quick_mode.md) - - [Tabulating Results](./user_guide/tabulating_results.md) - [WebAssembly/WASI](./user_guide/wasi.md) - [cargo-criterion](./cargo_criterion/cargo_criterion.md) - [Configuring cargo-criterion](./cargo_criterion/configuring_cargo_criterion.md) @@ -30,4 +29,4 @@ - [Comparison to Criterion.rs](./iai/comparison.md) - [Analysis Process](./analysis.md) - [Frequently Asked Questions](./faq.md) -- [Migrating from 0.2.* to 0.3.*](./migrating_0_2_to_0_3.md) \ No newline at end of file +- [Migrating from 0.2.* to 0.3.*](./migrating_0_2_to_0_3.md) diff --git a/book/src/user_guide/tabulating_results.md b/book/src/user_guide/tabulating_results.md deleted file mode 100644 index 2e86f0d0..00000000 --- a/book/src/user_guide/tabulating_results.md +++ /dev/null @@ -1,294 +0,0 @@ - - - -# Tabulating Results - -Criterion can save the results of different benchmark runs and -tabulate the results, making it easier to spot performance changes. - -The set of results from a benchmark run is called a `baseline` and each -`baseline` has a name. By default, the most recent run is named `base` but this -can be changed with the `--save-baseline {name}` flag. There's also a special -baseline called `new` which refers to the most recent set of results. - -## Comparing profiles - -Cargo supports custom -[profiles](https://doc.rust-lang.org/cargo/reference/profiles.html) for -controlling the level of optimizations, debug assertions, overflow checks, and -link-time-optmizations. We can use criterion to benchmark different profiles and -tabulate the results to visualize the changes. Let's use the `base64` crate as -an example: - -```bash -> git clone https://github.com/KokaKiwi/rust-hex.git -> cd rust-hex/ -``` - -Now that we've clone the repository, we can generate the first set of benchmark results: - -```bash -> cargo bench --profile=release `# Use the 'release' profile` \ - --bench=hex `# Select the 'hex' binary` \ - -- `# Switch args from cargo to criterion` \ - --save-baseline release `# Save the baseline under 'release'` -``` - -Once the run is complete (this should take a few minutes), we can benchmark the other profile: - -```bash -> cargo bench --profile=dev `# Use the 'dev' profile` \ - --bench=benchmarks `# Select the 'hex' binary` \ - -- `# Switch args from cargo to criterion` \ - --save-baseline dev `# Save the baseline under 'dev'` -``` - -Finally we can compare the two benchmark runs (scroll to the right to see all columns): - -```bash -> cargo bench --bench=hex -- --compare --baselines=dev,release -``` - -
group                          dev                                               release
------                          ---                                               -------
-faster_hex_decode              239.50  847.6±16.54µs        ? ?/sec    1.00      3.5±0.01µs        ? ?/sec
-faster_hex_decode_fallback     52.58   567.7±8.36µs        ? ?/sec     1.00     10.8±0.04µs        ? ?/sec
-faster_hex_decode_unchecked    400.98   503.7±3.48µs        ? ?/sec    1.00   1256.2±1.57ns        ? ?/sec
-faster_hex_encode              259.95   244.5±2.04µs        ? ?/sec    1.00    940.5±4.64ns        ? ?/sec
-faster_hex_encode_fallback     50.60   565.1±3.41µs        ? ?/sec     1.00     11.2±0.02µs        ? ?/sec
-hex_decode                     25.27     3.0±0.01ms        ? ?/sec     1.00    119.3±0.17µs        ? ?/sec
-hex_encode                     23.99 1460.8±18.11µs        ? ?/sec     1.00     60.9±0.08µs        ? ?/sec
-rustc_hex_decode               28.79     3.1±0.02ms        ? ?/sec     1.00    107.4±0.40µs        ? ?/sec
-rustc_hex_encode               25.80  1385.4±4.37µs        ? ?/sec     1.00    53.7±15.63µs        ? ?/sec
-
- -The first column in the above results has the names of each individual -benchmark. The two other columns (`dev` and `release`) contain the actual -benchmark results. Each baseline column starts with a performance index relative -to the fastest run (eg. `faster_hex_decode` for `dev` has a performance index of -239.50 because it is 239.50 times slower than the `release` build). Next is the -mean execution time plus the standard deviation (eg. 847.6±16.54µs). Lastly -there's an optional throughput. If no throughput data is available, it will be -printed as `? ?/sec`. - -## Compact, list view. - -If horizontal space is limited or if you're comparing more than two baselines, -it can be convenient to arrange the results in a vertical list rather than in a -table. This can be enabled with the `--compare-list` flag: - -``` -faster_hex_decode ------------------ -release 1.00 3.5±0.01µs ? ?/sec -dev 239.50 847.6±16.54µs ? ?/sec - -faster_hex_decode_fallback --------------------------- -release 1.00 10.8±0.04µs ? ?/sec -dev 52.58 567.7±8.36µs ? ?/sec - -faster_hex_decode_unchecked ---------------------------- -release 1.00 1256.2±1.57ns ? ?/sec -dev 400.98 503.7±3.48µs ? ?/sec - -faster_hex_encode ------------------ -release 1.00 940.5±4.64ns ? ?/sec -dev 259.95 244.5±2.04µs ? ?/sec - -faster_hex_encode_fallback --------------------------- -release 1.00 11.2±0.02µs ? ?/sec -dev 50.60 565.1±3.41µs ? ?/sec - -hex_decode ----------- -release 1.00 119.3±0.17µs ? ?/sec -dev 25.27 3.0±0.01ms ? ?/sec - -hex_encode ----------- -release 1.00 60.9±0.08µs ? ?/sec -dev 23.99 1460.8±18.11µs ? ?/sec - -rustc_hex_decode ----------------- -release 1.00 107.4±0.40µs ? ?/sec -dev 28.79 3.1±0.02ms ? ?/sec - -rustc_hex_encode ----------------- -release 1.00 53.7±15.63µs ? ?/sec -dev 25.80 1385.4±4.37µs ? ?/sec -``` - -## Filtering results - -Some projects have dozens or even hundreds of benchmarks which can be -overwhelming if you're only interested in the performance of a single -feature/function. - -Let's clone the `hex` crate and change just a single function: - -```bash -> git clone https://github.com/KokaKiwi/rust-hex.git -> cd rust-hex/ -``` - -Save a baseline for the `main` branch: - -```bash -> cargo bench --bench=hex `# Select the 'hex' binary` \ - -- `# Switch args from cargo to criterion` \ - --save-baseline main `# Save the baseline under 'main'` -``` - -Create a new branch: - -```bash -> git checkout -b new-feature -``` - -For testing, let's modify the `hex_decode` benchmark to run twice: - -```diff ---- a/benches/hex.rs -+++ b/benches/hex.rs - c.bench_function("hex_decode", |b| { - let hex = hex::encode(DATA); -- b.iter(|| hex::decode(&hex).unwrap()) -+ b.iter(|| (hex::decode(&hex).unwrap(),hex::decode(&hex).unwrap())) - }); -``` - -Now we can benchmark just the `hex_decode` function: - -```bash -> cargo bench --bench=hex `# Select the 'hex' binary` \ - -- `# Switch args from cargo to criterion` \ - --save-baseline new-feature `# Save the baseline under 'new-feature'` \ - ^hex_decode `# Select the 'hex_decode' benchmark` -``` - -And compare it to the `main` branch, verifying that we've introduced a 2x -performance regression: - -```bash -> cargo bench --bench=hex -- --compare --baselines=main,new-feature ^hex_decode -``` - -
group                   main                                      new-feature
------                   ----                                      -----------
-hex_decode    1.00    119.1±1.30µs        ? ?/sec    2.06    245.5±2.21µs        ? ?/sec
-
- -## Thresholds - -If we don't know which benchmarks are of interest, we can filter the results -based on how much they've changed. - -In the previous section, we only generated results for the `hex_decode` -benchmark. For this run, we need a complete set of results: - -```bash -> cargo bench --bench=hex `# Select the 'hex' binary` \ - -- `# Switch args from cargo to criterion` \ - --save-baseline new-feature `# Save the baseline under 'new-feature'` \ -``` - -Now we can compare the results that differ by more than 10%: - -```bash -> cargo bench --bench=hex -- --compare --baselines=main,new-feature --compare-threshold=10 -``` - -
group                   main                                      new-feature
------                   ----                                      -----------
-hex_decode    1.00    119.1±1.30µs        ? ?/sec    2.02    240.0±1.05µs        ? ?/sec
-
- -The above console output shows that only a single benchmark changed by more than -10%. - -## Importing/Exporting JSON - -Baselines can be saved in JSON files for later use with the `--export` flag. Continuing with the `hex` crate example, here's how to -save the `release` and `dev` baselines as JSON: - -```bash -> cargo bench --bench=hex -- --export release > release.json -``` - -```bash -> cargo bench --bench=hex -- --export dev > dev.json -``` - -Baselines stored as JSON can be referenced directly when comparing results: - -```bash -> cargo bench --bench=hex -- --compare --baselines dev.json,release.json -``` - -
group                          dev                                               release
------                          ---                                               -------
-faster_hex_decode              239.50  847.6±16.54µs        ? ?/sec    1.00      3.5±0.01µs        ? ?/sec
-faster_hex_decode_fallback     52.58   567.7±8.36µs        ? ?/sec     1.00     10.8±0.04µs        ? ?/sec
-faster_hex_decode_unchecked    400.98   503.7±3.48µs        ? ?/sec    1.00   1256.2±1.57ns        ? ?/sec
-faster_hex_encode              259.95   244.5±2.04µs        ? ?/sec    1.00    940.5±4.64ns        ? ?/sec
-faster_hex_encode_fallback     50.60   565.1±3.41µs        ? ?/sec     1.00     11.2±0.02µs        ? ?/sec
-hex_decode                     25.27     3.0±0.01ms        ? ?/sec     1.00    119.3±0.17µs        ? ?/sec
-hex_encode                     23.99 1460.8±18.11µs        ? ?/sec     1.00     60.9±0.08µs        ? ?/sec
-rustc_hex_decode               28.79     3.1±0.02ms        ? ?/sec     1.00    107.4±0.40µs        ? ?/sec
-rustc_hex_encode               25.80  1385.4±4.37µs        ? ?/sec     1.00    53.7±15.63µs        ? ?/sec
-
- -Note that the JSON format is not stable across criterion versions. diff --git a/src/critcmp/app.rs b/src/critcmp/app.rs deleted file mode 100644 index e6b3e5b8..00000000 --- a/src/critcmp/app.rs +++ /dev/null @@ -1,150 +0,0 @@ -use std::collections::BTreeSet; -use std::fs; -use std::io; -use std::path::{Path, PathBuf}; - -use regex::Regex; -use tabwriter::TabWriter; -use termcolor::{self, WriteColor}; - -use crate::critcmp::data::{BaseBenchmarks, Benchmarks}; -use crate::critcmp::main::Result; - -#[derive(Clone, Debug, Default)] -pub struct Args { - pub baselines: Vec, - pub output_list: bool, - pub threshold: Option, - pub color: bool, - pub filter: Option, -} - -impl Args { - pub fn benchmarks(&self) -> Result { - // First, load benchmark data from command line parameters. If a - // baseline name is given and is not a file path, then it is added to - // our whitelist of baselines. - let mut from_cli: Vec = vec![]; - let mut whitelist = BTreeSet::new(); - for arg in self.baselines.iter() { - let p = Path::new(arg); - if p.is_file() { - let baseb = BaseBenchmarks::from_path(p) - .map_err(|err| format!("{}: {}", p.display(), err))?; - whitelist.insert(baseb.name.clone()); - from_cli.push(baseb); - } else { - whitelist.insert(arg.clone()); - } - } - - let mut from_crit: Vec = vec![]; - match self.criterion_dir() { - Err(err) => { - // If we've loaded specific benchmarks from arguments, then it - // shouldn't matter whether we can find a Criterion directory. - // If we haven't loaded anything explicitly though, and if - // Criterion detection fails, then we won't have loaded - // anything and so we should return an error. - if from_cli.is_empty() { - return Err(err); - } - } - Ok(critdir) => { - let data = Benchmarks::gather(critdir)?; - from_crit.extend(data.by_baseline.into_iter().map(|(_, v)| v)); - } - } - if from_cli.is_empty() && from_crit.is_empty() { - fail!("could not find any benchmark data"); - } - - let mut data = Benchmarks::default(); - for basebench in from_crit.into_iter().chain(from_cli) { - if !whitelist.is_empty() && !whitelist.contains(&basebench.name) { - continue; - } - data.by_baseline.insert(basebench.name.clone(), basebench); - } - Ok(data) - } - - pub fn filter(&self) -> Option<&'_ Regex> { - self.filter.as_ref() - } - - pub fn group(&self) -> Result> { - // TODO - Ok(None) - // let pattern_os = match self.0.value_of_os("group") { - // None => return Ok(None), - // Some(pattern) => pattern, - // }; - // let pattern = cli::pattern_from_os(pattern_os)?; - // let re = Regex::new(pattern)?; - // if re.captures_len() <= 1 { - // fail!( - // "pattern '{}' has no capturing groups, by grouping \ - // benchmarks by a regex requires the use of at least \ - // one capturing group", - // pattern - // ); - // } - // Ok(Some(re)) - } - - pub fn threshold(&self) -> Result> { - Ok(self.threshold) - } - - pub fn list(&self) -> bool { - self.output_list - } - - pub fn criterion_dir(&self) -> Result { - let target_dir = self.target_dir()?; - let crit_dir = target_dir.join("criterion"); - if !crit_dir.exists() { - fail!( - "\ - no criterion data exists at {}\n\ - try running your benchmarks before tabulating results\ - ", - crit_dir.display() - ); - } - Ok(crit_dir) - } - - pub fn stdout(&self) -> Box { - if self.color { - Box::new(termcolor::Ansi::new(TabWriter::new(io::stdout()))) - } else { - Box::new(termcolor::NoColor::new(TabWriter::new(io::stdout()))) - } - } - - fn target_dir(&self) -> Result { - // FIXME: Use the same code as criterion - let mut cwd = fs::canonicalize(".") - .ok() - .unwrap_or_else(|| PathBuf::from(".")); - loop { - let candidate = cwd.join("target"); - if candidate.exists() { - return Ok(candidate); - } - cwd = match cwd.parent() { - Some(p) => p.to_path_buf(), - None => { - fail!( - "\ - could not find Criterion output directory\n\ - try using --target-dir or set CARGO_TARGET_DIR\ - " - ); - } - } - } - } -} diff --git a/src/critcmp/data.rs b/src/critcmp/data.rs deleted file mode 100644 index 4ea32190..00000000 --- a/src/critcmp/data.rs +++ /dev/null @@ -1,227 +0,0 @@ -use std::collections::BTreeMap; -use std::fs::File; -use std::io; -use std::path::Path; - -use serde::de::DeserializeOwned; -// use serde::{Deserialize, Serialize}; -use serde_json as json; -use walkdir::WalkDir; - -use crate::critcmp::main::Result; - -#[derive(Clone, Debug, Default)] -pub struct Benchmarks { - pub by_baseline: BTreeMap, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct BaseBenchmarks { - pub name: String, - pub benchmarks: BTreeMap, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct Benchmark { - pub baseline: String, - pub fullname: String, - #[serde(rename = "criterion_benchmark_v1")] - pub info: CBenchmark, - #[serde(rename = "criterion_estimates_v1")] - pub estimates: CEstimates, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct CBenchmark { - pub group_id: String, - pub function_id: Option, - pub value_str: Option, - pub throughput: Option, - pub full_id: String, - pub directory_name: String, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(rename_all = "PascalCase")] -pub struct CThroughput { - pub bytes: Option, - pub elements: Option, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct CEstimates { - pub mean: CStats, - pub median: CStats, - pub median_abs_dev: CStats, - pub slope: Option, - pub std_dev: CStats, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct CStats { - pub confidence_interval: CConfidenceInterval, - pub point_estimate: f64, - pub standard_error: f64, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct CConfidenceInterval { - pub confidence_level: f64, - pub lower_bound: f64, - pub upper_bound: f64, -} - -impl Benchmarks { - pub fn gather>(criterion_dir: P) -> Result { - let mut benchmarks = Benchmarks::default(); - for result in WalkDir::new(criterion_dir) { - let dent = result?; - let b = match Benchmark::from_path(dent.path())? { - None => continue, - Some(b) => b, - }; - benchmarks - .by_baseline - .entry(b.baseline.clone()) - .or_insert_with(|| BaseBenchmarks { - name: b.baseline.clone(), - benchmarks: BTreeMap::new(), - }) - .benchmarks - .insert(b.benchmark_name().to_string(), b); - } - Ok(benchmarks) - } -} - -impl Benchmark { - fn from_path>(path: P) -> Result> { - let path = path.as_ref(); - Benchmark::from_path_imp(path).map_err(|err| { - if let Some(parent) = path.parent() { - err!("{}: {}", parent.display(), err) - } else { - err!("unknown path: {}", err) - } - }) - } - - fn from_path_imp(path: &Path) -> Result> { - match path.file_name() { - None => return Ok(None), - Some(filename) => { - if filename != "estimates.json" { - return Ok(None); - } - } - } - // Criterion's directory structure looks like this: - // - // criterion/{group}/{name}/{baseline}/estimates.json - // - // In the same directory as `estimates.json`, there is also a - // `benchmark.json` which contains most of the info we need about - // a benchmark, including its name. From the path, we only extract the - // baseline name. - let parent = path - .parent() - .ok_or_else(|| err!("{}: could not find parent dir", path.display()))?; - let baseline = parent - .file_name() - .map(|p| p.to_string_lossy().into_owned()) - .ok_or_else(|| err!("{}: could not find baseline name", path.display()))?; - if baseline == "change" { - // This isn't really a baseline, but special state emitted by - // Criterion to reflect its own comparison between baselines. We - // don't use it. - return Ok(None); - } - - let info = CBenchmark::from_path(parent.join("benchmark.json"))?; - let estimates = CEstimates::from_path(path)?; - let fullname = format!("{}/{}", baseline, info.full_id); - Ok(Some(Benchmark { - baseline, - fullname, - info, - estimates, - })) - } - - pub fn nanoseconds(&self) -> f64 { - self.estimates.mean.point_estimate - } - - pub fn stddev(&self) -> f64 { - self.estimates.std_dev.point_estimate - } - - pub fn fullname(&self) -> &str { - &self.fullname - } - - pub fn baseline(&self) -> &str { - &self.baseline - } - - pub fn benchmark_name(&self) -> &str { - &self.info.full_id - } - - pub fn throughput(&self) -> Option { - const NANOS_PER_SECOND: f64 = 1_000_000_000.0; - - let scale = NANOS_PER_SECOND / self.nanoseconds(); - - self.info.throughput.as_ref().and_then(|t| { - let scaled_bytes = t.bytes.map(|num| Throughput::Bytes(num as f64 * scale)); - let scaled_elements = t - .elements - .map(|num| Throughput::Elements(num as f64 * scale)); - scaled_bytes.or(scaled_elements) - }) - } -} - -#[derive(Clone, Copy, Debug)] -pub enum Throughput { - Bytes(f64), - Elements(f64), -} - -impl BaseBenchmarks { - pub fn from_path>(path: P) -> Result { - deserialize_json_path(path.as_ref()) - } -} - -impl CBenchmark { - fn from_path>(path: P) -> Result { - deserialize_json_path(path.as_ref()) - } -} - -impl CEstimates { - fn from_path>(path: P) -> Result { - deserialize_json_path(path.as_ref()) - } -} - -fn deserialize_json_path(path: &Path) -> Result { - let file = File::open(path).map_err(|err| { - if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - err!("{}: {}", name, err) - } else { - err!("{}: {}", path.display(), err) - } - })?; - let buf = io::BufReader::new(file); - let b = json::from_reader(buf).map_err(|err| { - if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - err!("{}: {}", name, err) - } else { - err!("{}: {}", path.display(), err) - } - })?; - Ok(b) -} diff --git a/src/critcmp/main.rs b/src/critcmp/main.rs deleted file mode 100644 index 1fb44703..00000000 --- a/src/critcmp/main.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::BTreeMap; -use std::error::Error; -use std::io::Write; -use std::process; -use std::result; - -use regex::Regex; - -use crate::critcmp::app::Args; -use crate::critcmp::data::{Benchmark, Benchmarks}; - -use crate::critcmp::output; - -macro_rules! err { - ($($tt:tt)*) => { Box::::from(format!($($tt)*)) } -} - -macro_rules! fail { - ($($tt:tt)*) => { return Err(err!($($tt)*)) } -} - -pub type Result = result::Result>; - -pub fn main(args: Args) { - if let Err(err) = try_main(args) { - eprintln!("{}", err); - process::exit(1); - } -} - -fn try_main(args: Args) -> Result<()> { - let benchmarks = args.benchmarks()?; - - let mut comps = match args.group()? { - None => group_by_baseline(&benchmarks, args.filter()), - Some(re) => group_by_regex(&benchmarks, &re, args.filter()), - }; - if let Some(threshold) = args.threshold()? { - comps.retain(|comp| comp.biggest_difference() > threshold); - } - if comps.is_empty() { - fail!("no benchmark comparisons to show"); - } - - let mut wtr = args.stdout(); - if args.list() { - output::rows(&mut wtr, &comps)?; - } else { - output::columns(&mut wtr, &comps)?; - } - wtr.flush()?; - Ok(()) -} - -fn group_by_baseline(benchmarks: &Benchmarks, filter: Option<&Regex>) -> Vec { - let mut byname: BTreeMap> = BTreeMap::new(); - for base_benchmarks in benchmarks.by_baseline.values() { - for (name, benchmark) in base_benchmarks.benchmarks.iter() { - if filter.map_or(false, |re| !re.is_match(name)) { - continue; - } - let output_benchmark = - output::Benchmark::from_data(benchmark).name(benchmark.baseline()); - byname - .entry(name.to_string()) - .or_insert_with(Vec::new) - .push(output_benchmark); - } - } - byname - .into_iter() - .map(|(name, benchmarks)| output::Comparison::new(&name, benchmarks)) - .collect() -} - -fn group_by_regex( - benchmarks: &Benchmarks, - group_by: &Regex, - filter: Option<&Regex>, -) -> Vec { - let mut byname: BTreeMap> = BTreeMap::new(); - for base_benchmarks in benchmarks.by_baseline.values() { - for (name, benchmark) in base_benchmarks.benchmarks.iter() { - if filter.map_or(false, |re| !re.is_match(name)) { - continue; - } - let (bench, cmp) = match benchmark_names(benchmark, group_by) { - None => continue, - Some((bench, cmp)) => (bench, cmp), - }; - let output_benchmark = output::Benchmark::from_data(benchmark).name(&bench); - byname - .entry(cmp) - .or_insert_with(Vec::new) - .push(output_benchmark); - } - } - byname - .into_iter() - .map(|(name, benchmarks)| output::Comparison::new(&name, benchmarks)) - .collect() -} - -fn benchmark_names(benchmark: &Benchmark, group_by: &Regex) -> Option<(String, String)> { - assert!(group_by.captures_len() > 1); - - let caps = match group_by.captures(benchmark.benchmark_name()) { - None => return None, - Some(caps) => caps, - }; - - let mut bench_name = benchmark.benchmark_name().to_string(); - let mut cmp_name = String::new(); - let mut offset = 0; - for option in caps.iter().skip(1) { - let m = match option { - None => continue, - Some(m) => m, - }; - cmp_name.push_str(m.as_str()); - // Strip everything that doesn't match capturing groups. The leftovers - // are our benchmark name. - bench_name.drain((m.start() - offset)..(m.end() - offset)); - offset += m.end() - m.start(); - } - // Add the baseline name to the benchmark to disambiguate it from - // benchmarks with the same name in other baselines. - bench_name.insert_str(0, &format!("{}/", benchmark.baseline())); - - Some((bench_name, cmp_name)) -} diff --git a/src/critcmp/mod.rs b/src/critcmp/mod.rs deleted file mode 100644 index 17f6ee44..00000000 --- a/src/critcmp/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[macro_use] -pub mod main; -pub mod app; -pub mod data; -pub mod output; diff --git a/src/critcmp/output.rs b/src/critcmp/output.rs deleted file mode 100644 index f6c68d91..00000000 --- a/src/critcmp/output.rs +++ /dev/null @@ -1,234 +0,0 @@ -use std::collections::{BTreeMap, BTreeSet}; -use std::iter; - -use termcolor::{Color, ColorSpec, WriteColor}; -use unicode_width::UnicodeWidthStr; - -use crate::critcmp::data; -use crate::critcmp::main::Result; - -#[derive(Clone, Debug)] -pub struct Comparison { - name: String, - benchmarks: Vec, - name_to_index: BTreeMap, -} - -#[derive(Clone, Debug)] -pub struct Benchmark { - name: String, - nanoseconds: f64, - stddev: Option, - throughput: Option, - /// Whether this is the best benchmark in a group. This is only populated - /// when a `Comparison` is built. - best: bool, - /// The rank of this benchmark in a group. The best is always `1.0`. This - /// is only populated when a `Comparison` is built. - rank: f64, -} - -impl Comparison { - pub fn new(name: &str, benchmarks: Vec) -> Comparison { - let mut comp = Comparison { - name: name.to_string(), - benchmarks, - name_to_index: BTreeMap::new(), - }; - if comp.benchmarks.is_empty() { - return comp; - } - - comp.benchmarks - .sort_by(|a, b| a.nanoseconds.partial_cmp(&b.nanoseconds).unwrap()); - comp.benchmarks[0].best = true; - - let top = comp.benchmarks[0].nanoseconds; - for (i, b) in comp.benchmarks.iter_mut().enumerate() { - comp.name_to_index.insert(b.name.to_string(), i); - b.rank = b.nanoseconds / top; - } - comp - } - - /// Return the biggest difference, percentage wise, between benchmarks - /// in this comparison. - /// - /// If this comparison has fewer than two benchmarks, then 0 is returned. - pub fn biggest_difference(&self) -> f64 { - if self.benchmarks.len() < 2 { - return 0.0; - } - let best = self.benchmarks[0].nanoseconds; - let worst = self.benchmarks.last().unwrap().nanoseconds; - ((worst - best) / best) * 100.0 - } - - fn get(&self, name: &str) -> Option<&Benchmark> { - self.name_to_index - .get(name) - .and_then(|&i| self.benchmarks.get(i)) - } -} - -impl Benchmark { - pub fn from_data(b: &data::Benchmark) -> Benchmark { - Benchmark { - name: b.fullname().to_string(), - nanoseconds: b.nanoseconds(), - stddev: Some(b.stddev()), - throughput: b.throughput(), - best: false, - rank: 0.0, - } - } - - pub fn name(self, name: &str) -> Benchmark { - Benchmark { - name: name.to_string(), - ..self - } - } -} - -pub fn columns(mut wtr: W, groups: &[Comparison]) -> Result<()> { - let mut columns = BTreeSet::new(); - for group in groups { - for b in &group.benchmarks { - columns.insert(b.name.to_string()); - } - } - - write!(wtr, "group")?; - for column in &columns { - write!(wtr, "\t {}", column)?; - } - writeln!(wtr)?; - - write_divider(&mut wtr, '-', "group".width())?; - for column in &columns { - write!(wtr, "\t ")?; - write_divider(&mut wtr, '-', column.width())?; - } - writeln!(wtr)?; - - for group in groups { - if group.benchmarks.is_empty() { - continue; - } - - write!(wtr, "{}", group.name)?; - for column_name in &columns { - let b = match group.get(column_name) { - Some(b) => b, - None => { - write!(wtr, "\t")?; - continue; - } - }; - - if b.best { - let mut spec = ColorSpec::new(); - spec.set_fg(Some(Color::Green)).set_bold(true); - wtr.set_color(&spec)?; - } - write!( - wtr, - "\t {:<5.2} {:>14} {:>14}", - b.rank, - time(b.nanoseconds, b.stddev), - throughput(b.throughput), - )?; - if b.best { - wtr.reset()?; - } - } - writeln!(wtr)?; - } - Ok(()) -} - -pub fn rows(mut wtr: W, groups: &[Comparison]) -> Result<()> { - for (i, group) in groups.iter().enumerate() { - if i > 0 { - writeln!(wtr)?; - } - rows_one(&mut wtr, group)?; - } - Ok(()) -} - -fn rows_one(mut wtr: W, group: &Comparison) -> Result<()> { - writeln!(wtr, "{}", group.name)?; - write_divider(&mut wtr, '-', group.name.width())?; - writeln!(wtr)?; - - if group.benchmarks.is_empty() { - writeln!(wtr, "NOTHING TO SHOW")?; - return Ok(()); - } - - for b in &group.benchmarks { - writeln!( - wtr, - "{}\t{:>7.2}\t{:>15}\t{:>12}", - b.name, - b.rank, - time(b.nanoseconds, b.stddev), - throughput(b.throughput), - )?; - } - Ok(()) -} - -fn write_divider(mut wtr: W, divider: char, width: usize) -> Result<()> { - let div: String = iter::repeat(divider).take(width).collect(); - write!(wtr, "{}", div)?; - Ok(()) -} - -fn time(nanos: f64, stddev: Option) -> String { - const MIN_MICRO: f64 = 2_000.0; - const MIN_MILLI: f64 = 2_000_000.0; - const MIN_SEC: f64 = 2_000_000_000.0; - - let (div, label) = if nanos < MIN_MICRO { - (1.0, "ns") - } else if nanos < MIN_MILLI { - (1_000.0, "µs") - } else if nanos < MIN_SEC { - (1_000_000.0, "ms") - } else { - (1_000_000_000.0, "s") - }; - if let Some(stddev) = stddev { - format!("{:.1}±{:.2}{}", nanos / div, stddev / div, label) - } else { - format!("{:.1}{}", nanos / div, label) - } -} - -fn throughput(throughput: Option) -> String { - use data::Throughput::*; - match throughput { - Some(Bytes(num)) => throughput_per(num, "B"), - Some(Elements(num)) => throughput_per(num, "Elem"), - _ => "? ?/sec".to_string(), - } -} - -fn throughput_per(per: f64, unit: &str) -> String { - const MIN_K: f64 = (2 * (1 << 10) as u64) as f64; - const MIN_M: f64 = (2 * (1 << 20) as u64) as f64; - const MIN_G: f64 = (2 * (1 << 30) as u64) as f64; - - if per < MIN_K { - format!("{} {}/sec", per as u64, unit) - } else if per < MIN_M { - format!("{:.1} K{}/sec", (per / (1 << 10) as f64), unit) - } else if per < MIN_G { - format!("{:.1} M{}/sec", (per / (1 << 20) as f64), unit) - } else { - format!("{:.1} G{}/sec", (per / (1 << 30) as f64), unit) - } -} diff --git a/src/lib.rs b/src/lib.rs index b1a12859..19ecac92 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -59,7 +59,6 @@ mod benchmark_group; pub mod async_executor; mod bencher; mod connection; -mod critcmp; #[cfg(feature = "csv_output")] mod csv_report; mod error; @@ -80,7 +79,6 @@ use std::cell::RefCell; use std::collections::HashSet; use std::default::Default; use std::env; -use std::io::Write; use std::net::TcpStream; use std::path::{Path, PathBuf}; use std::process::Command; @@ -786,31 +784,6 @@ impl Criterion { .takes_value(true) .help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.") .conflicts_with_all(&["test", "list"])) - .arg(Arg::new("export") - .long("export") - .takes_value(true) - .help("Export baseline as json, printed to stdout") - .conflicts_with_all(&["list", "test", "profile-time", "compare"])) - .arg(Arg::new("compare") - .long("compare") - .help("Tabulate benchmark results") - .conflicts_with_all(&["list", "test", "profile-time", "export"])) - .arg(Arg::new("baselines") - .long("baselines") - .multiple_occurrences(true) - .value_name("baselines") - .requires("compare") - .require_value_delimiter(true) - .use_value_delimiter(true) - .help("Limit the baselines used in tabulated results.") - .help("")) - .arg(Arg::new("compare-threshold") - .long("compare-threshold") - .takes_value(true) - .help("Hide results that differ by less than the threshold percentage. By default, all results are shown.")) - .arg(Arg::new("compare-list") - .long("compare-list") - .help("Show benchmark results in a list rather than in a table. Useful when horizontal space is limited.")) .arg(Arg::new("load-baseline") .long("load-baseline") .takes_value(true) @@ -1087,54 +1060,6 @@ https://bheisler.github.io/criterion.rs/book/faq.html self.config.significance_level = num_significance_level; } - // XXX: Comparison functionality should ideally live in 'cargo-criterion'. - if matches.is_present("compare") { - if self.connection.is_some() { - eprintln!( - "Error: tabulating results is not supported when running with cargo-criterion." - ); - std::process::exit(1); - } - // Other arguments: compare-threshold, compare-list. - - let stdout_isatty = atty::is(atty::Stream::Stdout); - let enable_text_coloring = match matches.value_of("color") { - Some("always") => true, - Some("never") => false, - _ => stdout_isatty, - }; - - let args = critcmp::app::Args { - baselines: matches.values_of_lossy("baselines").unwrap_or_default(), - output_list: matches.is_present("compare-list"), - threshold: matches.value_of_t("compare-threshold").ok(), // FIXME: Print error message if parsing fails. - color: enable_text_coloring, - filter: self.filter, - }; - critcmp::main::main(args); - std::process::exit(0); - } - - if let Some(baseline) = matches.value_of("export") { - let benchmarks = critcmp::app::Args { - baselines: matches.values_of_lossy("baselines").unwrap_or_default(), - ..Default::default() - } - .benchmarks() - .expect("failed to find baselines"); - let mut stdout = std::io::stdout(); - let basedata = match benchmarks.by_baseline.get(baseline) { - Some(basedata) => basedata, - None => { - eprintln!("failed to find baseline '{}'", baseline); - std::process::exit(1); - } - }; - serde_json::to_writer_pretty(&mut stdout, basedata).unwrap(); - writeln!(stdout).unwrap(); - std::process::exit(0); - } - if matches.is_present("quick") { self.config.quick_mode = true; }