diff --git a/src/iter/find_first_last/mod.rs b/src/iter/find_first_last/mod.rs index 505fb8e16..18fa48b14 100644 --- a/src/iter/find_first_last/mod.rs +++ b/src/iter/find_first_last/mod.rs @@ -29,7 +29,8 @@ enum MatchPosition { Rightmost, } -// Returns true if pos1 is a better match than pos2 according to MatchPosition +/// Returns true if pos1 is a better match than pos2 according to MatchPosition +#[inline] fn better_position(pos1: usize, pos2: usize, mp: MatchPosition) -> bool { match mp { MatchPosition::Leftmost => pos1 < pos2, @@ -205,38 +206,6 @@ impl<'p, P: 'p + Fn(&T) -> bool, T> Folder for FindFolder<'p, T, P> { } } -// These tests requires that a folder be assigned to an iterator with more than -// one element. We can't necessarily determine when that will happen for a given -// input to find_first/find_last, so we test the folder directly here instead. -#[test] -fn find_first_folder_does_not_clobber_first_found() { - let best_found = AtomicUsize::new(usize::max_value()); - let f = FindFolder { - find_op: &(|&_: &i32| -> bool { true }), - boundary: 0, - match_position: MatchPosition::Leftmost, - best_found: &best_found, - item: None, - }; - let f = f.consume(0_i32).consume(1_i32).consume(2_i32); - assert!(f.full()); - assert_eq!(f.complete(), Some(0_i32)); -} - -#[test] -fn find_last_folder_yields_last_match() { - let best_found = AtomicUsize::new(0); - let f = FindFolder { - find_op: &(|&_: &i32| -> bool { true }), - boundary: 0, - match_position: MatchPosition::Rightmost, - best_found: &best_found, - item: None, - }; - let f = f.consume(0_i32).consume(1_i32).consume(2_i32); - assert_eq!(f.complete(), Some(2_i32)); -} - struct FindReducer { match_position: MatchPosition, } diff --git a/src/iter/find_first_last/test.rs b/src/iter/find_first_last/test.rs index c284001c8..44c962fd5 100644 --- a/src/iter/find_first_last/test.rs +++ b/src/iter/find_first_last/test.rs @@ -68,3 +68,82 @@ fn same_range_last_consumers_return_correct_answer() { assert_eq!(reducer.reduce(left_folder.complete(), right_folder.complete()), Some(2)); } + +// These tests requires that a folder be assigned to an iterator with more than +// one element. We can't necessarily determine when that will happen for a given +// input to find_first/find_last, so we test the folder directly here instead. +#[test] +fn find_first_folder_does_not_clobber_first_found() { + let best_found = AtomicUsize::new(usize::max_value()); + let f = FindFolder { + find_op: &(|&_: &i32| -> bool { true }), + boundary: 0, + match_position: MatchPosition::Leftmost, + best_found: &best_found, + item: None, + }; + let f = f.consume(0_i32).consume(1_i32).consume(2_i32); + assert!(f.full()); + assert_eq!(f.complete(), Some(0_i32)); +} + +#[test] +fn find_last_folder_yields_last_match() { + let best_found = AtomicUsize::new(0); + let f = FindFolder { + find_op: &(|&_: &i32| -> bool { true }), + boundary: 0, + match_position: MatchPosition::Rightmost, + best_found: &best_found, + item: None, + }; + let f = f.consume(0_i32).consume(1_i32).consume(2_i32); + assert_eq!(f.complete(), Some(2_i32)); +} + + +/// Produce a parallel iterator for 0u128..10²⁷ +fn octillion() -> impl ParallelIterator { + (0u32..1_000_000_000) + .into_par_iter() + .with_max_len(1_000) + .map(|i| i as u64 * 1_000_000_000) + .flat_map( + |i| { + (0u32..1_000_000_000) + .into_par_iter() + .with_max_len(1_000) + .map(move |j| i + j as u64) + } + ) + .map(|i| i as u128 * 1_000_000_000) + .flat_map( + |i| { + (0u32..1_000_000_000) + .into_par_iter() + .with_max_len(1_000) + .map(move |j| i + j as u128) + } + ) +} + +#[test] +fn find_first_octillion() { + let x = octillion().find_first(|_| true); + assert_eq!(x, Some(0)); +} + +#[test] +fn find_last_octillion() { + // FIXME: If we don't use at least two threads, then we end up walking + // through the entire iterator sequentially, without the benefit of any + // short-circuiting. We probably don't want testing to wait that long. ;) + // It would be nice if `find_last` could prioritize the later splits, + // basically flipping the `join` args, without needing indexed `rev`. + // (or could we have an unindexed `rev`?) + let config = ::Configuration::new().num_threads(2); + let pool = ::ThreadPool::new(config).unwrap(); + + let x = pool.install(|| octillion().find_last(|_| true)); + assert_eq!(x, Some(999999999999999999999999999)); +} diff --git a/src/lib.rs b/src/lib.rs index e8788a978..04eb9e5f5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,6 @@ #![allow(non_camel_case_types)] // I prefer to use ALL_CAPS for type parameters #![cfg_attr(test, feature(conservative_impl_trait))] +#![cfg_attr(test, feature(i128_type))] // If you're not compiling the unstable code, it often happens that // there is stuff that is considered "dead code" and so forth. So