From 1176693209e12d2df42d1b0ada388d2624e2dfe8 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 22 May 2022 13:32:34 +0900 Subject: [PATCH 1/4] Add test for issue 833 --- crossbeam-utils/tests/atomic_cell.rs | 40 ++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/crossbeam-utils/tests/atomic_cell.rs b/crossbeam-utils/tests/atomic_cell.rs index da7a6e1fa..93be2bbba 100644 --- a/crossbeam-utils/tests/atomic_cell.rs +++ b/crossbeam-utils/tests/atomic_cell.rs @@ -330,3 +330,43 @@ fn issue_748() { let x = AtomicCell::new(Test::FieldLess); assert_eq!(x.load(), Test::FieldLess); } + +// https://github.com/crossbeam-rs/crossbeam/issues/833 +#[rustversion::since(1.40)] // const_constructor requires Rust 1.40 +#[test] +fn issue_833() { + use std::num::NonZeroU128; + use std::thread; + + const N: usize = 1_000_000; + + #[allow(dead_code)] + enum Enum { + NeverConstructed, + Cell(AtomicCell), + } + + static STATIC: Enum = Enum::Cell(AtomicCell::new(match NonZeroU128::new(1) { + Some(nonzero) => nonzero, + None => unreachable!(), + })); + + thread::spawn(|| { + let cell = match &STATIC { + Enum::NeverConstructed => unreachable!(), + Enum::Cell(cell) => cell, + }; + let x = NonZeroU128::new(0xFFFF_FFFF_FFFF_FFFF_0000_0000_0000_0000).unwrap(); + let y = NonZeroU128::new(0x0000_0000_0000_0000_FFFF_FFFF_FFFF_FFFF).unwrap(); + loop { + cell.store(x); + cell.store(y); + } + }); + + for _ in 0..N { + if let Enum::NeverConstructed = STATIC { + unreachable!(":("); + } + } +} From d670b12a51c4486ebb566d1dd190a982dd285d1b Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 22 May 2022 13:32:38 +0900 Subject: [PATCH 2/4] Use `UnsafeCell>` in AtomicCell --- crossbeam-utils/src/atomic/atomic_cell.rs | 110 +++++++++++----------- 1 file changed, 56 insertions(+), 54 deletions(-) diff --git a/crossbeam-utils/src/atomic/atomic_cell.rs b/crossbeam-utils/src/atomic/atomic_cell.rs index 891425e48..3225ff1cc 100644 --- a/crossbeam-utils/src/atomic/atomic_cell.rs +++ b/crossbeam-utils/src/atomic/atomic_cell.rs @@ -5,7 +5,7 @@ use crate::primitive::sync::atomic::{self, AtomicBool}; use core::cell::UnsafeCell; use core::cmp; use core::fmt; -use core::mem; +use core::mem::{self, MaybeUninit}; use core::sync::atomic::Ordering; use core::ptr; @@ -30,13 +30,16 @@ use super::seq_lock::SeqLock; /// [`Acquire`]: std::sync::atomic::Ordering::Acquire /// [`Release`]: std::sync::atomic::Ordering::Release #[repr(transparent)] -pub struct AtomicCell { +pub struct AtomicCell { /// The inner value. /// /// If this value can be transmuted into a primitive atomic type, it will be treated as such. /// Otherwise, all potentially concurrent operations on this data will be protected by a global /// lock. - value: UnsafeCell, + /// + /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state: + /// + value: UnsafeCell>, } unsafe impl Send for AtomicCell {} @@ -59,7 +62,7 @@ impl AtomicCell { /// ``` pub const fn new(val: T) -> AtomicCell { AtomicCell { - value: UnsafeCell::new(val), + value: UnsafeCell::new(MaybeUninit::new(val)), } } @@ -76,7 +79,8 @@ impl AtomicCell { /// assert_eq!(v, 7); /// ``` pub fn into_inner(self) -> T { - self.value.into_inner() + // SAFETY: we'll never store uninitialized `T` + unsafe { self.value.into_inner().assume_init() } } /// Returns `true` if operations on values of this type are lock-free. @@ -129,7 +133,7 @@ impl AtomicCell { drop(self.swap(val)); } else { unsafe { - atomic_store(self.value.get(), val); + atomic_store(self.as_ptr(), val); } } } @@ -148,11 +152,9 @@ impl AtomicCell { /// assert_eq!(a.load(), 8); /// ``` pub fn swap(&self, val: T) -> T { - unsafe { atomic_swap(self.value.get(), val) } + unsafe { atomic_swap(self.as_ptr(), val) } } -} -impl AtomicCell { /// Returns a raw pointer to the underlying data in this atomic cell. /// /// # Examples @@ -166,7 +168,7 @@ impl AtomicCell { /// ``` #[inline] pub fn as_ptr(&self) -> *mut T { - self.value.get() + self.value.get() as *mut T } } @@ -202,7 +204,7 @@ impl AtomicCell { /// assert_eq!(a.load(), 7); /// ``` pub fn load(&self) -> T { - unsafe { atomic_load(self.value.get()) } + unsafe { atomic_load(self.as_ptr()) } } } @@ -254,7 +256,7 @@ impl AtomicCell { /// assert_eq!(a.load(), 2); /// ``` pub fn compare_exchange(&self, current: T, new: T) -> Result { - unsafe { atomic_compare_exchange_weak(self.value.get(), current, new) } + unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) } } /// Fetches the value, and applies a function to it that returns an optional @@ -311,8 +313,8 @@ macro_rules! impl_arithmetic { /// ``` #[inline] pub fn fetch_add(&self, val: $t) -> $t { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = value.wrapping_add(val); old @@ -334,8 +336,8 @@ macro_rules! impl_arithmetic { /// ``` #[inline] pub fn fetch_sub(&self, val: $t) -> $t { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = value.wrapping_sub(val); old @@ -355,8 +357,8 @@ macro_rules! impl_arithmetic { /// ``` #[inline] pub fn fetch_and(&self, val: $t) -> $t { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value &= val; old @@ -376,8 +378,8 @@ macro_rules! impl_arithmetic { /// ``` #[inline] pub fn fetch_nand(&self, val: $t) -> $t { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = !(old & val); old @@ -397,8 +399,8 @@ macro_rules! impl_arithmetic { /// ``` #[inline] pub fn fetch_or(&self, val: $t) -> $t { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value |= val; old @@ -418,8 +420,8 @@ macro_rules! impl_arithmetic { /// ``` #[inline] pub fn fetch_xor(&self, val: $t) -> $t { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value ^= val; old @@ -440,8 +442,8 @@ macro_rules! impl_arithmetic { /// ``` #[inline] pub fn fetch_max(&self, val: $t) -> $t { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = cmp::max(old, val); old @@ -462,8 +464,8 @@ macro_rules! impl_arithmetic { /// ``` #[inline] pub fn fetch_min(&self, val: $t) -> $t { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = cmp::min(old, val); old @@ -489,11 +491,11 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_add(&self, val: $t) -> $t { if can_transmute::<$t, $atomic>() { - let a = unsafe { &*(self.value.get() as *const $atomic) }; + let a = unsafe { &*(self.as_ptr() as *const $atomic) }; a.fetch_add(val, Ordering::AcqRel) } else { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = value.wrapping_add(val); old @@ -517,11 +519,11 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_sub(&self, val: $t) -> $t { if can_transmute::<$t, $atomic>() { - let a = unsafe { &*(self.value.get() as *const $atomic) }; + let a = unsafe { &*(self.as_ptr() as *const $atomic) }; a.fetch_sub(val, Ordering::AcqRel) } else { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = value.wrapping_sub(val); old @@ -543,11 +545,11 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_and(&self, val: $t) -> $t { if can_transmute::<$t, $atomic>() { - let a = unsafe { &*(self.value.get() as *const $atomic) }; + let a = unsafe { &*(self.as_ptr() as *const $atomic) }; a.fetch_and(val, Ordering::AcqRel) } else { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value &= val; old @@ -569,11 +571,11 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_nand(&self, val: $t) -> $t { if can_transmute::<$t, $atomic>() { - let a = unsafe { &*(self.value.get() as *const $atomic) }; + let a = unsafe { &*(self.as_ptr() as *const $atomic) }; a.fetch_nand(val, Ordering::AcqRel) } else { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = !(old & val); old @@ -595,11 +597,11 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_or(&self, val: $t) -> $t { if can_transmute::<$t, $atomic>() { - let a = unsafe { &*(self.value.get() as *const $atomic) }; + let a = unsafe { &*(self.as_ptr() as *const $atomic) }; a.fetch_or(val, Ordering::AcqRel) } else { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value |= val; old @@ -621,11 +623,11 @@ macro_rules! impl_arithmetic { #[inline] pub fn fetch_xor(&self, val: $t) -> $t { if can_transmute::<$t, $atomic>() { - let a = unsafe { &*(self.value.get() as *const $atomic) }; + let a = unsafe { &*(self.as_ptr() as *const $atomic) }; a.fetch_xor(val, Ordering::AcqRel) } else { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value ^= val; old @@ -651,8 +653,8 @@ macro_rules! impl_arithmetic { // TODO: Atomic*::fetch_max requires Rust 1.45. self.fetch_update(|old| Some(cmp::max(old, val))).unwrap() } else { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = cmp::max(old, val); old @@ -678,8 +680,8 @@ macro_rules! impl_arithmetic { // TODO: Atomic*::fetch_min requires Rust 1.45. self.fetch_update(|old| Some(cmp::min(old, val))).unwrap() } else { - let _guard = lock(self.value.get() as usize).write(); - let value = unsafe { &mut *(self.value.get()) }; + let _guard = lock(self.as_ptr() as usize).write(); + let value = unsafe { &mut *(self.as_ptr()) }; let old = *value; *value = cmp::min(old, val); old @@ -738,7 +740,7 @@ impl AtomicCell { /// ``` #[inline] pub fn fetch_and(&self, val: bool) -> bool { - let a = unsafe { &*(self.value.get() as *const AtomicBool) }; + let a = unsafe { &*(self.as_ptr() as *const AtomicBool) }; a.fetch_and(val, Ordering::AcqRel) } @@ -762,7 +764,7 @@ impl AtomicCell { /// ``` #[inline] pub fn fetch_nand(&self, val: bool) -> bool { - let a = unsafe { &*(self.value.get() as *const AtomicBool) }; + let a = unsafe { &*(self.as_ptr() as *const AtomicBool) }; a.fetch_nand(val, Ordering::AcqRel) } @@ -783,7 +785,7 @@ impl AtomicCell { /// ``` #[inline] pub fn fetch_or(&self, val: bool) -> bool { - let a = unsafe { &*(self.value.get() as *const AtomicBool) }; + let a = unsafe { &*(self.as_ptr() as *const AtomicBool) }; a.fetch_or(val, Ordering::AcqRel) } @@ -804,7 +806,7 @@ impl AtomicCell { /// ``` #[inline] pub fn fetch_xor(&self, val: bool) -> bool { - let a = unsafe { &*(self.value.get() as *const AtomicBool) }; + let a = unsafe { &*(self.as_ptr() as *const AtomicBool) }; a.fetch_xor(val, Ordering::AcqRel) } } From cf35e362d7b44c6b7638b7d7fd0ecc3d3b30661c Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 24 May 2022 22:08:24 +0900 Subject: [PATCH 3/4] impl Drop for AtomicCell --- crossbeam-utils/src/atomic/atomic_cell.rs | 34 +++++++++++++++++++++-- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/crossbeam-utils/src/atomic/atomic_cell.rs b/crossbeam-utils/src/atomic/atomic_cell.rs index 3225ff1cc..9fed45d4c 100644 --- a/crossbeam-utils/src/atomic/atomic_cell.rs +++ b/crossbeam-utils/src/atomic/atomic_cell.rs @@ -5,7 +5,7 @@ use crate::primitive::sync::atomic::{self, AtomicBool}; use core::cell::UnsafeCell; use core::cmp; use core::fmt; -use core::mem::{self, MaybeUninit}; +use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::sync::atomic::Ordering; use core::ptr; @@ -39,6 +39,10 @@ pub struct AtomicCell { /// /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state: /// + /// + /// Note: + /// - we'll never store uninitialized `T` due to our API only using initialized `T`. + /// - this `MaybeUninit` does *not* fix . value: UnsafeCell>, } @@ -68,6 +72,9 @@ impl AtomicCell { /// Consumes the atomic and returns the contained value. /// + /// This is safe because passing `self` by value guarantees that no other threads are + /// concurrently accessing the atomic data. + /// /// # Examples /// /// ``` @@ -79,8 +86,13 @@ impl AtomicCell { /// assert_eq!(v, 7); /// ``` pub fn into_inner(self) -> T { - // SAFETY: we'll never store uninitialized `T` - unsafe { self.value.into_inner().assume_init() } + let this = ManuallyDrop::new(self); + // SAFETY: + // - passing `self` by value guarantees that no other threads are concurrently + // accessing the atomic data + // - the raw pointer passed in is valid because we got it from an owned value. + // - `ManuallyDrop` prevents double dropping `T` + unsafe { this.as_ptr().read() } } /// Returns `true` if operations on values of this type are lock-free. @@ -294,6 +306,22 @@ impl AtomicCell { } } +// `MaybeUninit` prevents `T` from being dropped, so we need to implement `Drop` +// for `AtomicCell` to avoid leaks of non-`Copy` types. +impl Drop for AtomicCell { + fn drop(&mut self) { + if mem::needs_drop::() { + // SAFETY: + // - the mutable reference guarantees that no other threads are concurrently accessing the atomic data + // - the raw pointer passed in is valid because we got it from a reference + // - `MaybeUninit` prevents double dropping `T` + unsafe { + self.as_ptr().drop_in_place(); + } + } + } +} + macro_rules! impl_arithmetic { ($t:ty, fallback, $example:tt) => { impl AtomicCell<$t> { From 012b0c29824029737ad3b40d48a842e9ce15a58e Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Tue, 24 May 2022 22:18:19 +0900 Subject: [PATCH 4/4] Fix miri test --- crossbeam-utils/tests/atomic_cell.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crossbeam-utils/tests/atomic_cell.rs b/crossbeam-utils/tests/atomic_cell.rs index 93be2bbba..a1d102210 100644 --- a/crossbeam-utils/tests/atomic_cell.rs +++ b/crossbeam-utils/tests/atomic_cell.rs @@ -336,8 +336,12 @@ fn issue_748() { #[test] fn issue_833() { use std::num::NonZeroU128; + use std::sync::atomic::{AtomicBool, Ordering}; use std::thread; + #[cfg(miri)] + const N: usize = 10_000; + #[cfg(not(miri))] const N: usize = 1_000_000; #[allow(dead_code)] @@ -350,15 +354,16 @@ fn issue_833() { Some(nonzero) => nonzero, None => unreachable!(), })); + static FINISHED: AtomicBool = AtomicBool::new(false); - thread::spawn(|| { + let handle = thread::spawn(|| { let cell = match &STATIC { Enum::NeverConstructed => unreachable!(), Enum::Cell(cell) => cell, }; let x = NonZeroU128::new(0xFFFF_FFFF_FFFF_FFFF_0000_0000_0000_0000).unwrap(); let y = NonZeroU128::new(0x0000_0000_0000_0000_FFFF_FFFF_FFFF_FFFF).unwrap(); - loop { + while !FINISHED.load(Ordering::Relaxed) { cell.store(x); cell.store(y); } @@ -369,4 +374,7 @@ fn issue_833() { unreachable!(":("); } } + + FINISHED.store(true, Ordering::Relaxed); + handle.join().unwrap(); }