diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bf456b33a..497cea91b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,7 @@ jobs: - crossbeam-skiplist - crossbeam-utils rust: - - 1.28.0 + - 1.30.0 - nightly steps: - uses: actions/checkout@master @@ -38,7 +38,7 @@ jobs: rustup target add thumbv6m-none-eabi # cfg-if 0.1.10 requires Rust 1.31+ so downgrade it. - name: Downgrade dependencies - if: matrix.rust == '1.28.0' + if: matrix.rust == '1.30.0' run: | cargo generate-lockfile cargo update -p cfg-if --precise 0.1.9 @@ -66,3 +66,14 @@ jobs: run: rustup update stable && rustup default stable - name: rustfmt run: ./ci/rustfmt.sh + + # Run loom tests. + loom: + name: loom + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Install Rust + run: rustup update stable && rustup default stable + - name: loom + run: ./ci/crossbeam-epoch-loom.sh diff --git a/ci/crossbeam-epoch-loom.sh b/ci/crossbeam-epoch-loom.sh new file mode 100755 index 000000000..40949f3ed --- /dev/null +++ b/ci/crossbeam-epoch-loom.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +cd "$(dirname "$0")"/../crossbeam-epoch +set -ex + +export RUSTFLAGS="-D warnings --cfg=loom" + +env LOOM_MAX_PREEMPTIONS=2 cargo test --test loom --features sanitize --release -- --nocapture diff --git a/crossbeam-epoch/Cargo.toml b/crossbeam-epoch/Cargo.toml index a1a056f5e..a67c924b7 100644 --- a/crossbeam-epoch/Cargo.toml +++ b/crossbeam-epoch/Cargo.toml @@ -27,6 +27,9 @@ cfg-if = "0.1.2" maybe-uninit = "2.0.0" memoffset = "0.5" +[target.'cfg(loom)'.dependencies] +loom = "0.3.2" + [dependencies.crossbeam-utils] version = "0.7" path = "../crossbeam-utils" diff --git a/crossbeam-epoch/src/atomic.rs b/crossbeam-epoch/src/atomic.rs index a2044740b..184da8aab 100644 --- a/crossbeam-epoch/src/atomic.rs +++ b/crossbeam-epoch/src/atomic.rs @@ -1,4 +1,5 @@ use alloc::boxed::Box; +use concurrency::sync::atomic::{AtomicUsize, Ordering}; use core::borrow::{Borrow, BorrowMut}; use core::cmp; use core::fmt; @@ -6,7 +7,6 @@ use core::marker::PhantomData; use core::mem; use core::ops::{Deref, DerefMut}; use core::ptr; -use core::sync::atomic::{AtomicUsize, Ordering}; use crossbeam_utils::atomic::AtomicConsume; use guard::Guard; @@ -150,7 +150,7 @@ impl Atomic { /// /// let a = Atomic::::null(); /// ``` - #[cfg(not(has_min_const_fn))] + #[cfg(any(loom, not(has_min_const_fn)))] pub fn null() -> Atomic { Self { data: AtomicUsize::new(0), @@ -167,7 +167,7 @@ impl Atomic { /// /// let a = Atomic::::null(); /// ``` - #[cfg(has_min_const_fn)] + #[cfg(all(not(loom), has_min_const_fn))] pub const fn null() -> Atomic { Self { data: AtomicUsize::new(0), @@ -506,7 +506,14 @@ impl Atomic { /// } /// ``` pub unsafe fn into_owned(self) -> Owned { - Owned::from_usize(self.data.into_inner()) + #[cfg(loom)] + { + Owned::from_usize(self.data.unsync_load()) + } + #[cfg(not(loom))] + { + Owned::from_usize(self.data.into_inner()) + } } } @@ -1185,7 +1192,7 @@ impl<'g, T> Default for Shared<'g, T> { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use super::Shared; diff --git a/crossbeam-epoch/src/collector.rs b/crossbeam-epoch/src/collector.rs index 1817d9ada..ac38690eb 100644 --- a/crossbeam-epoch/src/collector.rs +++ b/crossbeam-epoch/src/collector.rs @@ -12,7 +12,7 @@ /// /// handle.pin().flush(); /// ``` -use alloc::sync::Arc; +use concurrency::sync::Arc; use core::fmt; use guard::Guard; @@ -103,7 +103,7 @@ impl fmt::Debug for LocalHandle { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use std::mem; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -145,9 +145,9 @@ mod tests { let a = Owned::new(7).into_shared(guard); guard.defer_destroy(a); - assert!(!(*(*guard.local).bag.get()).is_empty()); + assert!(!(*guard.local).bag.with(|b| (*b).is_empty())); - while !(*(*guard.local).bag.get()).is_empty() { + while !(*guard.local).bag.with(|b| (*b).is_empty()) { guard.flush(); } } @@ -166,7 +166,7 @@ mod tests { let a = Owned::new(7).into_shared(guard); guard.defer_destroy(a); } - assert!(!(*(*guard.local).bag.get()).is_empty()); + assert!(!(*guard.local).bag.with(|b| (*b).is_empty())); } } diff --git a/crossbeam-epoch/src/default.rs b/crossbeam-epoch/src/default.rs index 870e590fa..444d24534 100644 --- a/crossbeam-epoch/src/default.rs +++ b/crossbeam-epoch/src/default.rs @@ -5,6 +5,7 @@ //! destructed on thread exit, which in turn unregisters the thread. use collector::{Collector, LocalHandle}; +use concurrency::{lazy_static, thread_local}; use guard::Guard; lazy_static! { @@ -44,7 +45,7 @@ where .unwrap_or_else(|_| f(&COLLECTOR.register())) } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use crossbeam_utils::thread; diff --git a/crossbeam-epoch/src/deferred.rs b/crossbeam-epoch/src/deferred.rs index a0970f115..c72b2af96 100644 --- a/crossbeam-epoch/src/deferred.rs +++ b/crossbeam-epoch/src/deferred.rs @@ -78,7 +78,7 @@ impl Deferred { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use super::Deferred; use std::cell::Cell; diff --git a/crossbeam-epoch/src/epoch.rs b/crossbeam-epoch/src/epoch.rs index e7759d935..51877cd56 100644 --- a/crossbeam-epoch/src/epoch.rs +++ b/crossbeam-epoch/src/epoch.rs @@ -7,7 +7,7 @@ //! If an object became garbage in some epoch, then we can be sure that after two advancements no //! participant will hold a reference to it. That is the crux of safe memory reclamation. -use core::sync::atomic::{AtomicUsize, Ordering}; +use concurrency::sync::atomic::{AtomicUsize, Ordering}; /// An epoch that can be marked as pinned or unpinned. /// diff --git a/crossbeam-epoch/src/internal.rs b/crossbeam-epoch/src/internal.rs index 645511b9c..e34cb798a 100644 --- a/crossbeam-epoch/src/internal.rs +++ b/crossbeam-epoch/src/internal.rs @@ -35,11 +35,12 @@ //! Ideally each instance of concurrent data structure may have its own queue that gets fully //! destroyed as soon as the data structure gets dropped. -use core::cell::{Cell, UnsafeCell}; +use concurrency::cell::UnsafeCell; +use concurrency::sync::atomic; +use concurrency::sync::atomic::Ordering; +use core::cell::Cell; use core::mem::{self, ManuallyDrop}; use core::num::Wrapping; -use core::sync::atomic; -use core::sync::atomic::Ordering; use core::{fmt, ptr}; use crossbeam_utils::CachePadded; @@ -411,7 +412,7 @@ impl Local { /// Returns a reference to the `Collector` in which this `Local` resides. #[inline] pub fn collector(&self) -> &Collector { - unsafe { &**self.collector.get() } + self.collector.with(|c| unsafe { &**c }) } /// Returns `true` if the current participant is pinned. @@ -426,7 +427,7 @@ impl Local { /// /// It should be safe for another thread to execute the given function. pub unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) { - let bag = &mut *self.bag.get(); + let bag = self.bag.with_mut(|b| &mut *b); while let Err(d) = bag.try_push(deferred) { self.global().push_bag(bag, guard); @@ -435,7 +436,7 @@ impl Local { } pub fn flush(&self, guard: &Guard) { - let bag = unsafe { &mut *self.bag.get() }; + let bag = self.bag.with_mut(|b| unsafe { &mut *b }); if !bag.is_empty() { self.global().push_bag(bag, guard); @@ -573,7 +574,8 @@ impl Local { // Pin and move the local bag into the global queue. It's important that `push_bag` // doesn't defer destruction on any new garbage. let guard = &self.pin(); - self.global().push_bag(&mut *self.bag.get(), guard); + self.global() + .push_bag(self.bag.with_mut(|b| &mut *b), guard); } // Revert the handle count back to zero. self.handle_count.set(0); @@ -582,7 +584,7 @@ impl Local { // Take the reference to the `Global` out of this `Local`. Since we're not protected // by a guard at this time, it's crucial that the reference is read before marking the // `Local` as deleted. - let collector: Collector = ptr::read(&*(*self.collector.get())); + let collector: Collector = ptr::read(self.collector.with(|c| &*(*c))); // Mark this node in the linked list as deleted. self.entry.delete(&unprotected()); @@ -613,7 +615,7 @@ impl IsElement for Local { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use std::sync::atomic::{AtomicUsize, Ordering}; diff --git a/crossbeam-epoch/src/lib.rs b/crossbeam-epoch/src/lib.rs index 282bbe90f..c6c3c6b1c 100644 --- a/crossbeam-epoch/src/lib.rs +++ b/crossbeam-epoch/src/lib.rs @@ -66,6 +66,87 @@ extern crate core; extern crate maybe_uninit; +#[cfg(loom)] +extern crate loom; + +#[cfg(loom)] +#[allow(unused_imports, dead_code)] +pub(crate) mod concurrency { + pub(crate) mod cell { + pub(crate) use loom::cell::UnsafeCell; + } + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use loom::sync::atomic::{AtomicUsize, Ordering}; + pub(crate) fn fence(ord: Ordering) { + if let Ordering::Acquire = ord { + } else { + // FIXME: loom only supports acquire fences at the moment. + // https://github.com/tokio-rs/loom/issues/117 + // let's at least not panic... + // this may generate some false positives (`SeqCst` is stronger than `Acquire` + // for example), and some false negatives (`Relaxed` is weaker than `Acquire`), + // but it's the best we can do for the time being. + } + loom::sync::atomic::fence(Ordering::Acquire) + } + + // FIXME: loom does not support compiler_fence at the moment. + // https://github.com/tokio-rs/loom/issues/117 + // we use fence as a stand-in for compiler_fence for the time being. + // this may miss some races since fence is stronger than compiler_fence, + // but it's the best we can do for the time being. + pub(crate) use self::fence as compiler_fence; + } + pub(crate) use loom::sync::Arc; + } + pub(crate) use loom::lazy_static; + pub(crate) use loom::thread_local; +} +#[cfg(not(loom))] +#[allow(unused_imports, dead_code)] +pub(crate) mod concurrency { + #[cfg(any(feature = "alloc", feature = "std"))] + pub(crate) mod cell { + #[derive(Debug)] + #[repr(transparent)] + pub(crate) struct UnsafeCell(::core::cell::UnsafeCell); + + impl UnsafeCell { + #[inline] + pub(crate) fn new(data: T) -> UnsafeCell { + UnsafeCell(::core::cell::UnsafeCell::new(data)) + } + + #[inline] + pub(crate) fn with(&self, f: impl FnOnce(*const T) -> R) -> R { + f(self.0.get()) + } + + #[inline] + pub(crate) fn with_mut(&self, f: impl FnOnce(*mut T) -> R) -> R { + f(self.0.get()) + } + } + } + #[cfg(any(feature = "alloc", feature = "std"))] + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use core::sync::atomic::compiler_fence; + pub(crate) use core::sync::atomic::fence; + pub(crate) use core::sync::atomic::{AtomicUsize, Ordering}; + } + #[cfg_attr(feature = "nightly", cfg(target_has_atomic = "ptr"))] + pub(crate) use alloc::sync::Arc; + } + + #[cfg(feature = "std")] + pub(crate) use std::thread_local; + + #[cfg(feature = "std")] + pub(crate) use lazy_static::lazy_static; +} + cfg_if! { if #[cfg(feature = "alloc")] { extern crate alloc; @@ -99,7 +180,6 @@ cfg_if! { cfg_if! { if #[cfg(feature = "std")] { - #[macro_use] extern crate lazy_static; mod default; diff --git a/crossbeam-epoch/src/sync/list.rs b/crossbeam-epoch/src/sync/list.rs index 8e8899ea2..09c98b435 100644 --- a/crossbeam-epoch/src/sync/list.rs +++ b/crossbeam-epoch/src/sync/list.rs @@ -3,8 +3,8 @@ //! Ideas from Michael. High Performance Dynamic Lock-Free Hash Tables and List-Based Sets. SPAA //! 2002. http://dl.acm.org/citation.cfm?id=564870.564881 +use concurrency::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use core::marker::PhantomData; -use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use {unprotected, Atomic, Guard, Shared}; @@ -295,7 +295,7 @@ impl<'g, T: 'g, C: IsElement> Iterator for Iter<'g, T, C> { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod tests { use super::*; use crossbeam_utils::thread; diff --git a/crossbeam-epoch/src/sync/queue.rs b/crossbeam-epoch/src/sync/queue.rs index 99fb6a1c4..306650610 100644 --- a/crossbeam-epoch/src/sync/queue.rs +++ b/crossbeam-epoch/src/sync/queue.rs @@ -8,7 +8,7 @@ //! Simon Doherty, Lindsay Groves, Victor Luchangco, and Mark Moir. 2004b. Formal Verification of a //! Practical Lock-Free Queue Algorithm. https://doi.org/10.1007/978-3-540-30232-2_7 -use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; +use concurrency::sync::atomic::Ordering::{Acquire, Relaxed, Release}; use crossbeam_utils::CachePadded; @@ -203,7 +203,7 @@ impl Drop for Queue { } } -#[cfg(test)] +#[cfg(all(test, not(loom)))] mod test { use super::*; use crossbeam_utils::thread; diff --git a/crossbeam-epoch/tests/loom.rs b/crossbeam-epoch/tests/loom.rs new file mode 100644 index 000000000..1399af7eb --- /dev/null +++ b/crossbeam-epoch/tests/loom.rs @@ -0,0 +1,147 @@ +#![cfg(loom)] + +extern crate crossbeam_epoch as epoch; +extern crate crossbeam_utils as utils; +extern crate loom; + +use epoch::*; +use epoch::{Atomic, Owned}; +use loom::sync::atomic::Ordering::{self, Acquire, Relaxed, Release}; +use loom::sync::Arc; +use loom::thread::spawn; +use std::mem::ManuallyDrop; +use std::ptr; + +#[test] +fn it_works() { + loom::model(|| { + let collector = Collector::new(); + let item: Atomic = Atomic::from(Owned::new(String::from("boom"))); + let item2 = item.clone(); + let collector2 = collector.clone(); + let guard = collector.register().pin(); + + let jh = loom::thread::spawn(move || { + let guard = collector2.register().pin(); + guard.defer(move || { + // this isn't really safe, since other threads may still have pointers to the + // value, but in this limited test scenario it's okay, since we know the test won't + // access item after all the pins are released. + let mut item = unsafe { item2.into_owned() }; + // mutate it as a second measure to make sure the assert_eq below would fail + item.retain(|c| c == 'o'); + drop(item); + }); + }); + + let item = item.load(Ordering::SeqCst, &guard); + // we pinned strictly before the call to defer_destroy, + // so item cannot have been dropped yet + assert_eq!(*unsafe { item.deref() }, "boom"); + drop(guard); + + jh.join().unwrap(); + + drop(collector); + }) +} + +#[test] +fn treiber_stack() { + // this is mostly a copy-paste from the example + #[derive(Debug)] + pub struct TreiberStack { + head: Atomic>, + } + + #[derive(Debug)] + struct Node { + data: ManuallyDrop, + next: Atomic>, + } + + impl TreiberStack { + pub fn new() -> TreiberStack { + TreiberStack { + head: Atomic::null(), + } + } + + pub fn push(&self, t: T) { + let mut n = Owned::new(Node { + data: ManuallyDrop::new(t), + next: Atomic::null(), + }); + + let guard = epoch::pin(); + + loop { + let head = self.head.load(Relaxed, &guard); + n.next.store(head, Relaxed); + + match self.head.compare_and_set(head, n, Release, &guard) { + Ok(_) => break, + Err(e) => n = e.new, + } + } + } + + pub fn pop(&self) -> Option { + let guard = epoch::pin(); + loop { + let head = self.head.load(Acquire, &guard); + + match unsafe { head.as_ref() } { + Some(h) => { + let next = h.next.load(Relaxed, &guard); + + if self + .head + .compare_and_set(head, next, Relaxed, &guard) + .is_ok() + { + unsafe { + guard.defer_destroy(head); + return Some(ManuallyDrop::into_inner(ptr::read(&(*h).data))); + } + } + } + None => return None, + } + } + } + + pub fn is_empty(&self) -> bool { + let guard = epoch::pin(); + self.head.load(Acquire, &guard).is_null() + } + } + + impl Drop for TreiberStack { + fn drop(&mut self) { + while self.pop().is_some() {} + } + } + + loom::model(|| { + let stack1 = Arc::new(TreiberStack::new()); + let stack2 = Arc::clone(&stack1); + + // use 5 since it's greater than the 4 used for the sanitize feature + let jh = spawn(move || { + for i in 0..5 { + stack2.push(i); + assert!(stack2.pop().is_some()); + } + }); + + for i in 0..5 { + stack1.push(i); + assert!(stack1.pop().is_some()); + } + + jh.join().unwrap(); + assert!(stack1.pop().is_none()); + assert!(stack1.is_empty()); + }); +} diff --git a/crossbeam-utils/Cargo.toml b/crossbeam-utils/Cargo.toml index 069043a31..ffe59158d 100644 --- a/crossbeam-utils/Cargo.toml +++ b/crossbeam-utils/Cargo.toml @@ -25,6 +25,9 @@ alloc = [] cfg-if = "0.1.2" lazy_static = { version = "1.1.0", optional = true } +[target.'cfg(loom)'.dependencies] +loom = "0.3.2" + [build-dependencies] autocfg = "1" diff --git a/crossbeam-utils/src/atomic/atomic_cell.rs b/crossbeam-utils/src/atomic/atomic_cell.rs index cf0658aad..67ad85f01 100644 --- a/crossbeam-utils/src/atomic/atomic_cell.rs +++ b/crossbeam-utils/src/atomic/atomic_cell.rs @@ -1,12 +1,15 @@ +use concurrency::sync::atomic::{self, AtomicBool, Ordering}; use core::cell::UnsafeCell; use core::fmt; use core::mem; + +#[cfg(not(loom))] use core::ptr; -use core::sync::atomic::{self, AtomicBool, Ordering}; #[cfg(feature = "std")] use std::panic::{RefUnwindSafe, UnwindSafe}; +#[cfg(not(loom))] use super::seq_lock::SeqLock; /// A thread-safe mutable memory location. @@ -523,23 +526,23 @@ macro_rules! impl_arithmetic { #[cfg(has_atomic_u8)] impl_arithmetic!(u8, atomic::AtomicU8, "let a = AtomicCell::new(7u8);"); -#[cfg(has_atomic_u8)] +#[cfg(all(has_atomic_u8, not(loom)))] impl_arithmetic!(i8, atomic::AtomicI8, "let a = AtomicCell::new(7i8);"); #[cfg(has_atomic_u16)] impl_arithmetic!(u16, atomic::AtomicU16, "let a = AtomicCell::new(7u16);"); -#[cfg(has_atomic_u16)] +#[cfg(all(has_atomic_u16, not(loom)))] impl_arithmetic!(i16, atomic::AtomicI16, "let a = AtomicCell::new(7i16);"); #[cfg(has_atomic_u32)] impl_arithmetic!(u32, atomic::AtomicU32, "let a = AtomicCell::new(7u32);"); -#[cfg(has_atomic_u32)] +#[cfg(all(has_atomic_u32, not(loom)))] impl_arithmetic!(i32, atomic::AtomicI32, "let a = AtomicCell::new(7i32);"); #[cfg(has_atomic_u64)] impl_arithmetic!(u64, atomic::AtomicU64, "let a = AtomicCell::new(7u64);"); -#[cfg(has_atomic_u64)] +#[cfg(all(has_atomic_u64, not(loom)))] impl_arithmetic!(i64, atomic::AtomicI64, "let a = AtomicCell::new(7i64);"); -#[cfg(has_atomic_u128)] +#[cfg(all(has_atomic_u128, not(loom)))] impl_arithmetic!(u128, atomic::AtomicU128, "let a = AtomicCell::new(7u128);"); -#[cfg(has_atomic_u128)] +#[cfg(all(has_atomic_u128, not(loom)))] impl_arithmetic!(i128, atomic::AtomicI128, "let a = AtomicCell::new(7i128);"); impl_arithmetic!( @@ -547,6 +550,7 @@ impl_arithmetic!( atomic::AtomicUsize, "let a = AtomicCell::new(7usize);" ); +#[cfg(not(loom))] impl_arithmetic!( isize, atomic::AtomicIsize, @@ -648,6 +652,7 @@ fn can_transmute() -> bool { /// scalability. #[inline] #[must_use] +#[cfg(not(loom))] fn lock(addr: usize) -> &'static SeqLock { // The number of locks is a prime number because we want to make sure `addr % LEN` gets // dispersed across all locks. @@ -741,6 +746,9 @@ macro_rules! atomic { #[cfg(has_atomic_u64)] atomic!(@check, $t, atomic::AtomicU64, $a, $atomic_op); + #[cfg(loom)] + unimplemented!("loom does not support non-atomic atomic ops"); + #[cfg(not(loom))] break $fallback_op; } }; diff --git a/crossbeam-utils/src/atomic/consume.rs b/crossbeam-utils/src/atomic/consume.rs index 9be5464fb..10ed628c1 100644 --- a/crossbeam-utils/src/atomic/consume.rs +++ b/crossbeam-utils/src/atomic/consume.rs @@ -1,6 +1,6 @@ #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] -use core::sync::atomic::compiler_fence; -use core::sync::atomic::Ordering; +use concurrency::sync::atomic::compiler_fence; +use concurrency::sync::atomic::Ordering; /// Trait which allows reading from primitive atomic types with "consume" ordering. pub trait AtomicConsume { @@ -53,30 +53,42 @@ macro_rules! impl_atomic { type Val = $val; impl_consume!(); } + #[cfg(loom)] + impl AtomicConsume for ::loom::sync::atomic::$atomic { + type Val = $val; + impl_consume!(); + } }; } impl_atomic!(AtomicBool, bool); impl_atomic!(AtomicUsize, usize); +#[cfg(not(loom))] impl_atomic!(AtomicIsize, isize); #[cfg(all(feature = "nightly", target_has_atomic = "8"))] impl_atomic!(AtomicU8, u8); -#[cfg(all(feature = "nightly", target_has_atomic = "8"))] +#[cfg(all(feature = "nightly", target_has_atomic = "8", not(loom)))] impl_atomic!(AtomicI8, i8); #[cfg(all(feature = "nightly", target_has_atomic = "16"))] impl_atomic!(AtomicU16, u16); -#[cfg(all(feature = "nightly", target_has_atomic = "16"))] +#[cfg(all(feature = "nightly", target_has_atomic = "16", not(loom)))] impl_atomic!(AtomicI16, i16); #[cfg(all(feature = "nightly", target_has_atomic = "32"))] impl_atomic!(AtomicU32, u32); -#[cfg(all(feature = "nightly", target_has_atomic = "32"))] +#[cfg(all(feature = "nightly", target_has_atomic = "32", not(loom)))] impl_atomic!(AtomicI32, i32); #[cfg(all(feature = "nightly", target_has_atomic = "64"))] impl_atomic!(AtomicU64, u64); -#[cfg(all(feature = "nightly", target_has_atomic = "64"))] +#[cfg(all(feature = "nightly", target_has_atomic = "64", not(loom)))] impl_atomic!(AtomicI64, i64); impl AtomicConsume for ::core::sync::atomic::AtomicPtr { type Val = *mut T; impl_consume!(); } + +#[cfg(loom)] +impl AtomicConsume for ::loom::sync::atomic::AtomicPtr { + type Val = *mut T; + impl_consume!(); +} diff --git a/crossbeam-utils/src/atomic/mod.rs b/crossbeam-utils/src/atomic/mod.rs index 074b0ca53..14af10c91 100644 --- a/crossbeam-utils/src/atomic/mod.rs +++ b/crossbeam-utils/src/atomic/mod.rs @@ -1,5 +1,6 @@ //! Atomic types. +#[cfg(not(loom))] cfg_if! { // Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap // around. diff --git a/crossbeam-utils/src/backoff.rs b/crossbeam-utils/src/backoff.rs index 446755bbc..305d20574 100644 --- a/crossbeam-utils/src/backoff.rs +++ b/crossbeam-utils/src/backoff.rs @@ -1,6 +1,6 @@ +use concurrency::sync::atomic; use core::cell::Cell; use core::fmt; -use core::sync::atomic; const SPIN_LIMIT: u32 = 6; const YIELD_LIMIT: u32 = 10; diff --git a/crossbeam-utils/src/lib.rs b/crossbeam-utils/src/lib.rs index 06f23beb4..85baf5e5d 100644 --- a/crossbeam-utils/src/lib.rs +++ b/crossbeam-utils/src/lib.rs @@ -36,6 +36,52 @@ extern crate cfg_if; #[cfg(feature = "std")] extern crate core; +#[cfg(loom)] +extern crate loom; + +#[cfg(loom)] +#[allow(unused_imports)] +pub(crate) mod concurrency { + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use loom::sync::atomic::spin_loop_hint; + pub(crate) use loom::sync::atomic::{ + AtomicBool, AtomicU16, AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering, + }; + + // FIXME: loom does not support compiler_fence at the moment. + // https://github.com/tokio-rs/loom/issues/117 + // we use fence as a stand-in for compiler_fence for the time being. + // this may miss some races since fence is stronger than compiler_fence, + // but it's the best we can do for the time being. + pub(crate) use loom::sync::atomic::fence as compiler_fence; + } + pub(crate) use loom::sync::{Arc, Condvar, Mutex}; + } +} +#[cfg(not(loom))] +#[allow(unused_imports)] +pub(crate) mod concurrency { + pub(crate) mod sync { + pub(crate) mod atomic { + pub(crate) use core::sync::atomic::compiler_fence; + pub(crate) use core::sync::atomic::spin_loop_hint; + pub(crate) use core::sync::atomic::{AtomicBool, AtomicIsize, AtomicUsize, Ordering}; + #[cfg(has_atomic_u16)] + pub(crate) use core::sync::atomic::{AtomicI16, AtomicU16}; + #[cfg(has_atomic_u32)] + pub(crate) use core::sync::atomic::{AtomicI32, AtomicU32}; + #[cfg(has_atomic_u64)] + pub(crate) use core::sync::atomic::{AtomicI64, AtomicU64}; + #[cfg(has_atomic_u8)] + pub(crate) use core::sync::atomic::{AtomicI8, AtomicU8}; + } + + #[cfg(feature = "std")] + pub(crate) use std::sync::{Arc, Condvar, Mutex}; + } +} + cfg_if! { if #[cfg(feature = "alloc")] { extern crate alloc; @@ -55,10 +101,13 @@ pub use backoff::Backoff; cfg_if! { if #[cfg(feature = "std")] { + #[cfg(not(loom))] #[macro_use] extern crate lazy_static; pub mod sync; + + #[cfg(not(loom))] pub mod thread; } } diff --git a/crossbeam-utils/src/sync/mod.rs b/crossbeam-utils/src/sync/mod.rs index 363496372..007c3fb38 100644 --- a/crossbeam-utils/src/sync/mod.rs +++ b/crossbeam-utils/src/sync/mod.rs @@ -9,9 +9,11 @@ //! [`WaitGroup`]: struct.WaitGroup.html mod parker; +#[cfg(not(loom))] mod sharded_lock; mod wait_group; pub use self::parker::{Parker, Unparker}; +#[cfg(not(loom))] pub use self::sharded_lock::{ShardedLock, ShardedLockReadGuard, ShardedLockWriteGuard}; pub use self::wait_group::WaitGroup; diff --git a/crossbeam-utils/src/sync/parker.rs b/crossbeam-utils/src/sync/parker.rs index 051afe512..d605adc81 100644 --- a/crossbeam-utils/src/sync/parker.rs +++ b/crossbeam-utils/src/sync/parker.rs @@ -1,8 +1,8 @@ +use concurrency::sync::atomic::AtomicUsize; +use concurrency::sync::atomic::Ordering::SeqCst; +use concurrency::sync::{Arc, Condvar, Mutex}; use std::fmt; use std::marker::PhantomData; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::SeqCst; -use std::sync::{Arc, Condvar, Mutex}; use std::time::Duration; /// A thread parking primitive. diff --git a/crossbeam-utils/src/sync/wait_group.rs b/crossbeam-utils/src/sync/wait_group.rs index 0527b3159..dc935cf7f 100644 --- a/crossbeam-utils/src/sync/wait_group.rs +++ b/crossbeam-utils/src/sync/wait_group.rs @@ -1,5 +1,5 @@ +use concurrency::sync::{Arc, Condvar, Mutex}; use std::fmt; -use std::sync::{Arc, Condvar, Mutex}; /// Enables threads to synchronize the beginning or end of some computation. ///