diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 843e325..bde4450 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -231,8 +231,7 @@ jobs: - name: Run miri env: PROPTEST_CASES: "10" - # TODO: Do something with the stacked borrows. Figure out what it means. - MIRIFLAGS: "-Zmiri-disable-isolation -Zmiri-disable-stacked-borrows" + MIRIFLAGS: "-Zmiri-disable-isolation -Zmiri-permissive-provenance" run: cargo miri test --all-features thread_sanitizer-MacOS: diff --git a/CHANGELOG.md b/CHANGELOG.md index b41d0c6..0dea87a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,6 @@ +* Fix a data race reported by MIRI. +* Avoid violating stacked borrows (AFAIK these are still experimental and not + normative, but better safe than sorry). (#80). * The `AccessConvert` wrapper is needed less often in practice (#77). # 1.5.1 diff --git a/src/debt/list.rs b/src/debt/list.rs index a98a02e..3d17388 100644 --- a/src/debt/list.rs +++ b/src/debt/list.rs @@ -58,7 +58,12 @@ pub(crate) struct Node { fast: FastSlots, helping: HelpingSlots, in_use: AtomicUsize, - next: Option<&'static Node>, + // Next node in the list. + // + // It is a pointer because we touch it before synchronization (we don't _dereference_ it before + // synchronization, only manipulate the pointer itself). That is illegal according to strict + // interpretation of the rules by MIRI on references. + next: *const Node, active_writers: AtomicUsize, } @@ -68,7 +73,7 @@ impl Default for Node { fast: FastSlots::default(), helping: HelpingSlots::default(), in_use: AtomicUsize::new(NODE_USED), - next: None, + next: ptr::null(), active_writers: AtomicUsize::new(0), } } @@ -94,7 +99,7 @@ impl Node { if result.is_some() { return result; } - current = node.next; + current = unsafe { node.next.as_ref() }; } None } @@ -165,7 +170,7 @@ impl Node { // compare_exchange below. let mut head = LIST_HEAD.load(Relaxed); loop { - node.next = unsafe { head.as_ref() }; + node.next = head; if let Err(old) = LIST_HEAD.compare_exchange_weak( head, node, // We need to release *the whole chain* here. For that, we need to diff --git a/src/lib.rs b/src/lib.rs index 7e2e6c2..c9526a3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -883,7 +883,7 @@ macro_rules! t { const WRITER_CNT: usize = 2; const READER_CNT: usize = 3; #[cfg(miri)] - const ITERATIONS: usize = 10; + const ITERATIONS: usize = 5; #[cfg(not(miri))] const ITERATIONS: usize = 100; const SEQ: usize = 50; @@ -1160,6 +1160,9 @@ macro_rules! t { #[test] /// Make sure the reference count and compare_and_swap works as expected. fn cas_ref_cnt() { + #[cfg(miri)] + const ITERATIONS: usize = 10; + #[cfg(not(miri))] const ITERATIONS: usize = 50; let shared = ArcSwap::from(Arc::new(0)); for i in 0..ITERATIONS { @@ -1173,7 +1176,7 @@ macro_rules! t { // Fill up the slots sometimes let fillup = || { if i % 2 == 0 { - Some((0..50).map(|_| shared.load()).collect::>()) + Some((0..ITERATIONS).map(|_| shared.load()).collect::>()) } else { None } @@ -1272,10 +1275,14 @@ mod tests { /// created, but contain full Arcs. #[test] fn lease_overflow() { + #[cfg(miri)] + const GUARD_COUNT: usize = 100; + #[cfg(not(miri))] + const GUARD_COUNT: usize = 1000; let a = Arc::new(0); let shared = ArcSwap::from(Arc::clone(&a)); assert_eq!(2, Arc::strong_count(&a)); - let mut guards = (0..1000).map(|_| shared.load()).collect::>(); + let mut guards = (0..GUARD_COUNT).map(|_| shared.load()).collect::>(); let count = Arc::strong_count(&a); assert!(count > 2); let guard = shared.load(); diff --git a/src/ref_cnt.rs b/src/ref_cnt.rs index 5128f5c..4c31739 100644 --- a/src/ref_cnt.rs +++ b/src/ref_cnt.rs @@ -1,3 +1,4 @@ +use std::mem; use std::ptr; use std::rc::Rc; use std::sync::Arc; @@ -92,7 +93,29 @@ unsafe impl RefCnt for Arc { Arc::into_raw(me) as *mut T } fn as_ptr(me: &Arc) -> *mut T { - me as &T as *const T as *mut T + // Slightly convoluted way to do this, but this avoids stacked borrows violations. The same + // intention as + // + // me as &T as *const T as *mut T + // + // We first create a "shallow copy" of me - one that doesn't really own its ref count + // (that's OK, me _does_ own it, so it can't be destroyed in the meantime). + // Then we can use into_raw (which preserves not having the ref count). + // + // We need to "revert" the changes we did. In current std implementation, the combination + // of from_raw and forget is no-op. But formally, into_raw shall be paired with from_raw + // and that read shall be paired with forget to properly "close the brackets". In future + // versions of STD, these may become something else that's not really no-op (unlikely, but + // possible), so we future-proof it a bit. + + // SAFETY: &T cast to *const T will always be aligned, initialised and valid for reads + let ptr = Arc::into_raw(unsafe { std::ptr::read(me) }); + let ptr = ptr as *mut T; + + // SAFETY: We got the pointer from into_raw just above + mem::forget(unsafe { Arc::from_raw(ptr) }); + + ptr } unsafe fn from_ptr(ptr: *const T) -> Arc { Arc::from_raw(ptr) @@ -105,7 +128,9 @@ unsafe impl RefCnt for Rc { Rc::into_raw(me) as *mut T } fn as_ptr(me: &Rc) -> *mut T { - me as &T as *const T as *mut T + // SAFETY: &T cast to *const T will always be aligned, initialised and valid for reads + let ptr = Rc::into_raw(unsafe { std::ptr::read(me) }); + ptr as *mut T } unsafe fn from_ptr(ptr: *const T) -> Rc { Rc::from_raw(ptr)