Skip to content

Commit

Permalink
Merge pull request #362 from cuviper/find-octillion
Browse files Browse the repository at this point in the history
Add `find_first` and `find_last` tests on a giant range
  • Loading branch information
nikomatsakis committed Jun 14, 2017
2 parents f115dd8 + 6b8b71a commit 1f7b60b
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 33 deletions.
35 changes: 2 additions & 33 deletions src/iter/find_first_last/mod.rs
Expand Up @@ -29,7 +29,8 @@ enum MatchPosition {
Rightmost,
}

// Returns true if pos1 is a better match than pos2 according to MatchPosition
/// Returns true if pos1 is a better match than pos2 according to MatchPosition
#[inline]
fn better_position(pos1: usize, pos2: usize, mp: MatchPosition) -> bool {
match mp {
MatchPosition::Leftmost => pos1 < pos2,
Expand Down Expand Up @@ -205,38 +206,6 @@ impl<'p, P: 'p + Fn(&T) -> bool, T> Folder<T> for FindFolder<'p, T, P> {
}
}

// These tests requires that a folder be assigned to an iterator with more than
// one element. We can't necessarily determine when that will happen for a given
// input to find_first/find_last, so we test the folder directly here instead.
#[test]
fn find_first_folder_does_not_clobber_first_found() {
let best_found = AtomicUsize::new(usize::max_value());
let f = FindFolder {
find_op: &(|&_: &i32| -> bool { true }),
boundary: 0,
match_position: MatchPosition::Leftmost,
best_found: &best_found,
item: None,
};
let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
assert!(f.full());
assert_eq!(f.complete(), Some(0_i32));
}

#[test]
fn find_last_folder_yields_last_match() {
let best_found = AtomicUsize::new(0);
let f = FindFolder {
find_op: &(|&_: &i32| -> bool { true }),
boundary: 0,
match_position: MatchPosition::Rightmost,
best_found: &best_found,
item: None,
};
let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
assert_eq!(f.complete(), Some(2_i32));
}

struct FindReducer {
match_position: MatchPosition,
}
Expand Down
79 changes: 79 additions & 0 deletions src/iter/find_first_last/test.rs
Expand Up @@ -68,3 +68,82 @@ fn same_range_last_consumers_return_correct_answer() {
assert_eq!(reducer.reduce(left_folder.complete(), right_folder.complete()),
Some(2));
}

// These tests requires that a folder be assigned to an iterator with more than
// one element. We can't necessarily determine when that will happen for a given
// input to find_first/find_last, so we test the folder directly here instead.
#[test]
fn find_first_folder_does_not_clobber_first_found() {
let best_found = AtomicUsize::new(usize::max_value());
let f = FindFolder {
find_op: &(|&_: &i32| -> bool { true }),
boundary: 0,
match_position: MatchPosition::Leftmost,
best_found: &best_found,
item: None,
};
let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
assert!(f.full());
assert_eq!(f.complete(), Some(0_i32));
}

#[test]
fn find_last_folder_yields_last_match() {
let best_found = AtomicUsize::new(0);
let f = FindFolder {
find_op: &(|&_: &i32| -> bool { true }),
boundary: 0,
match_position: MatchPosition::Rightmost,
best_found: &best_found,
item: None,
};
let f = f.consume(0_i32).consume(1_i32).consume(2_i32);
assert_eq!(f.complete(), Some(2_i32));
}


/// Produce a parallel iterator for 0u128..10²⁷
fn octillion() -> impl ParallelIterator<Item = u128> {
(0u32..1_000_000_000)
.into_par_iter()
.with_max_len(1_000)
.map(|i| i as u64 * 1_000_000_000)
.flat_map(
|i| {
(0u32..1_000_000_000)
.into_par_iter()
.with_max_len(1_000)
.map(move |j| i + j as u64)
}
)
.map(|i| i as u128 * 1_000_000_000)
.flat_map(
|i| {
(0u32..1_000_000_000)
.into_par_iter()
.with_max_len(1_000)
.map(move |j| i + j as u128)
}
)
}

#[test]
fn find_first_octillion() {
let x = octillion().find_first(|_| true);
assert_eq!(x, Some(0));
}

#[test]
fn find_last_octillion() {
// FIXME: If we don't use at least two threads, then we end up walking
// through the entire iterator sequentially, without the benefit of any
// short-circuiting. We probably don't want testing to wait that long. ;)
// It would be nice if `find_last` could prioritize the later splits,
// basically flipping the `join` args, without needing indexed `rev`.
// (or could we have an unindexed `rev`?)
let config = ::Configuration::new().num_threads(2);
let pool = ::ThreadPool::new(config).unwrap();

let x = pool.install(|| octillion().find_last(|_| true));
assert_eq!(x, Some(999999999999999999999999999));
}
1 change: 1 addition & 0 deletions src/lib.rs
@@ -1,5 +1,6 @@
#![allow(non_camel_case_types)] // I prefer to use ALL_CAPS for type parameters
#![cfg_attr(test, feature(conservative_impl_trait))]
#![cfg_attr(test, feature(i128_type))]

// If you're not compiling the unstable code, it often happens that
// there is stuff that is considered "dead code" and so forth. So
Expand Down

0 comments on commit 1f7b60b

Please sign in to comment.