Skip to content

Commit

Permalink
Merge pull request #86 from vorner/ub-ref
Browse files Browse the repository at this point in the history
UB fixes
  • Loading branch information
vorner committed Dec 25, 2022
2 parents a41840b + 97a65cd commit ca4b62f
Show file tree
Hide file tree
Showing 5 changed files with 50 additions and 11 deletions.
3 changes: 1 addition & 2 deletions .github/workflows/test.yaml
Expand Up @@ -231,8 +231,7 @@ jobs:
- name: Run miri
env:
PROPTEST_CASES: "10"
# TODO: Do something with the stacked borrows. Figure out what it means.
MIRIFLAGS: "-Zmiri-disable-isolation -Zmiri-disable-stacked-borrows"
MIRIFLAGS: "-Zmiri-disable-isolation -Zmiri-permissive-provenance"
run: cargo miri test --all-features

thread_sanitizer-MacOS:
Expand Down
3 changes: 3 additions & 0 deletions CHANGELOG.md
@@ -1,3 +1,6 @@
* Fix a data race reported by MIRI.
* Avoid violating stacked borrows (AFAIK these are still experimental and not
normative, but better safe than sorry). (#80).
* The `AccessConvert` wrapper is needed less often in practice (#77).

# 1.5.1
Expand Down
13 changes: 9 additions & 4 deletions src/debt/list.rs
Expand Up @@ -58,7 +58,12 @@ pub(crate) struct Node {
fast: FastSlots,
helping: HelpingSlots,
in_use: AtomicUsize,
next: Option<&'static Node>,
// Next node in the list.
//
// It is a pointer because we touch it before synchronization (we don't _dereference_ it before
// synchronization, only manipulate the pointer itself). That is illegal according to strict
// interpretation of the rules by MIRI on references.
next: *const Node,
active_writers: AtomicUsize,
}

Expand All @@ -68,7 +73,7 @@ impl Default for Node {
fast: FastSlots::default(),
helping: HelpingSlots::default(),
in_use: AtomicUsize::new(NODE_USED),
next: None,
next: ptr::null(),
active_writers: AtomicUsize::new(0),
}
}
Expand All @@ -94,7 +99,7 @@ impl Node {
if result.is_some() {
return result;
}
current = node.next;
current = unsafe { node.next.as_ref() };
}
None
}
Expand Down Expand Up @@ -165,7 +170,7 @@ impl Node {
// compare_exchange below.
let mut head = LIST_HEAD.load(Relaxed);
loop {
node.next = unsafe { head.as_ref() };
node.next = head;
if let Err(old) = LIST_HEAD.compare_exchange_weak(
head, node,
// We need to release *the whole chain* here. For that, we need to
Expand Down
13 changes: 10 additions & 3 deletions src/lib.rs
Expand Up @@ -883,7 +883,7 @@ macro_rules! t {
const WRITER_CNT: usize = 2;
const READER_CNT: usize = 3;
#[cfg(miri)]
const ITERATIONS: usize = 10;
const ITERATIONS: usize = 5;
#[cfg(not(miri))]
const ITERATIONS: usize = 100;
const SEQ: usize = 50;
Expand Down Expand Up @@ -1160,6 +1160,9 @@ macro_rules! t {
#[test]
/// Make sure the reference count and compare_and_swap works as expected.
fn cas_ref_cnt() {
#[cfg(miri)]
const ITERATIONS: usize = 10;
#[cfg(not(miri))]
const ITERATIONS: usize = 50;
let shared = ArcSwap::from(Arc::new(0));
for i in 0..ITERATIONS {
Expand All @@ -1173,7 +1176,7 @@ macro_rules! t {
// Fill up the slots sometimes
let fillup = || {
if i % 2 == 0 {
Some((0..50).map(|_| shared.load()).collect::<Vec<_>>())
Some((0..ITERATIONS).map(|_| shared.load()).collect::<Vec<_>>())
} else {
None
}
Expand Down Expand Up @@ -1272,10 +1275,14 @@ mod tests {
/// created, but contain full Arcs.
#[test]
fn lease_overflow() {
#[cfg(miri)]
const GUARD_COUNT: usize = 100;
#[cfg(not(miri))]
const GUARD_COUNT: usize = 1000;
let a = Arc::new(0);
let shared = ArcSwap::from(Arc::clone(&a));
assert_eq!(2, Arc::strong_count(&a));
let mut guards = (0..1000).map(|_| shared.load()).collect::<Vec<_>>();
let mut guards = (0..GUARD_COUNT).map(|_| shared.load()).collect::<Vec<_>>();
let count = Arc::strong_count(&a);
assert!(count > 2);
let guard = shared.load();
Expand Down
29 changes: 27 additions & 2 deletions src/ref_cnt.rs
@@ -1,3 +1,4 @@
use std::mem;
use std::ptr;
use std::rc::Rc;
use std::sync::Arc;
Expand Down Expand Up @@ -92,7 +93,29 @@ unsafe impl<T> RefCnt for Arc<T> {
Arc::into_raw(me) as *mut T
}
fn as_ptr(me: &Arc<T>) -> *mut T {
me as &T as *const T as *mut T
// Slightly convoluted way to do this, but this avoids stacked borrows violations. The same
// intention as
//
// me as &T as *const T as *mut T
//
// We first create a "shallow copy" of me - one that doesn't really own its ref count
// (that's OK, me _does_ own it, so it can't be destroyed in the meantime).
// Then we can use into_raw (which preserves not having the ref count).
//
// We need to "revert" the changes we did. In current std implementation, the combination
// of from_raw and forget is no-op. But formally, into_raw shall be paired with from_raw
// and that read shall be paired with forget to properly "close the brackets". In future
// versions of STD, these may become something else that's not really no-op (unlikely, but
// possible), so we future-proof it a bit.

// SAFETY: &T cast to *const T will always be aligned, initialised and valid for reads
let ptr = Arc::into_raw(unsafe { std::ptr::read(me) });
let ptr = ptr as *mut T;

// SAFETY: We got the pointer from into_raw just above
mem::forget(unsafe { Arc::from_raw(ptr) });

ptr
}
unsafe fn from_ptr(ptr: *const T) -> Arc<T> {
Arc::from_raw(ptr)
Expand All @@ -105,7 +128,9 @@ unsafe impl<T> RefCnt for Rc<T> {
Rc::into_raw(me) as *mut T
}
fn as_ptr(me: &Rc<T>) -> *mut T {
me as &T as *const T as *mut T
// SAFETY: &T cast to *const T will always be aligned, initialised and valid for reads
let ptr = Rc::into_raw(unsafe { std::ptr::read(me) });
ptr as *mut T
}
unsafe fn from_ptr(ptr: *const T) -> Rc<T> {
Rc::from_raw(ptr)
Expand Down

2 comments on commit ca4b62f

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Track benchmarks

Benchmark suite Current: ca4b62f Previous: a41840b Ratio
uncontended/load 17 ns/iter (± 0) 18 ns/iter (± 0) 0.94
uncontended/load_full 29 ns/iter (± 0) 32 ns/iter (± 0) 0.91
uncontended/load_many 47 ns/iter (± 0) 51 ns/iter (± 0) 0.92
uncontended/store 147 ns/iter (± 1) 158 ns/iter (± 0) 0.93
uncontended/cache 1 ns/iter (± 0) 0 ns/iter (± 0) Infinity
concurrent_loads/load 20 ns/iter (± 8) 22 ns/iter (± 8) 0.91
concurrent_loads/load_full 36 ns/iter (± 13) 41 ns/iter (± 15) 0.88
concurrent_loads/load_many 80 ns/iter (± 24) 87 ns/iter (± 25) 0.92
concurrent_loads/store 1634 ns/iter (± 516) 1252 ns/iter (± 712) 1.31
concurrent_loads/cache 1 ns/iter (± 0) 1 ns/iter (± 0) 1
concurrent_store/load 75 ns/iter (± 2) 49 ns/iter (± 1) 1.53
concurrent_store/load_full 135 ns/iter (± 7) 98 ns/iter (± 17) 1.38
concurrent_store/load_many 158 ns/iter (± 15) 129 ns/iter (± 2) 1.22
concurrent_store/store 944 ns/iter (± 18) 1393 ns/iter (± 30) 0.68
concurrent_store/cache 1 ns/iter (± 0) 1 ns/iter (± 0) 1

This comment was automatically generated by workflow using github-action-benchmark.

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Performance Alert ⚠️

Possible performance regression was detected for benchmark 'Track benchmarks'.
Benchmark result of this commit is worse than the previous benchmark result exceeding threshold 1.50.

Benchmark suite Current: ca4b62f Previous: a41840b Ratio
uncontended/cache 1 ns/iter (± 0) 0 ns/iter (± 0) Infinity
concurrent_store/load 75 ns/iter (± 2) 49 ns/iter (± 1) 1.53

This comment was automatically generated by workflow using github-action-benchmark.

CC: @vorner

Please sign in to comment.