-
Notifications
You must be signed in to change notification settings - Fork 55
/
words.rs
59 lines (50 loc) · 1.54 KB
/
words.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
//! Compares the performance of `UnicodeSegmentation::unicode_words` with stdlib's UTF-8
//! scalar-based `std::str::split_whitespace`.
//!
//! It is expected that `std::str::split_whitespace` is faster than
//! `UnicodeSegmentation::unicode_words` since it does not consider the complexity of grapheme
//! clusters. The question in this benchmark is how much slower full unicode handling is.
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
use std::fs;
use unicode_segmentation::UnicodeSegmentation;
const FILES: &[&str] = &[
"arabic",
"english",
"hindi",
"japanese",
"korean",
"mandarin",
"russian",
"source_code",
];
#[inline(always)]
fn grapheme(text: &str) {
for w in text.unicode_words() {
black_box(w);
}
}
#[inline(always)]
fn scalar(text: &str) {
for w in text.split_whitespace() {
black_box(w);
}
}
fn bench_all(c: &mut Criterion) {
let mut group = c.benchmark_group("words");
for file in FILES {
group.bench_with_input(
BenchmarkId::new("grapheme", file),
&fs::read_to_string(format!("benches/texts/{}.txt", file)).unwrap(),
|b, content| b.iter(|| grapheme(content)),
);
}
for file in FILES {
group.bench_with_input(
BenchmarkId::new("scalar", file),
&fs::read_to_string(format!("benches/texts/{}.txt", file)).unwrap(),
|b, content| b.iter(|| scalar(content)),
);
}
}
criterion_group!(benches, bench_all);
criterion_main!(benches);