Skip to content

Commit

Permalink
Clean up once code
Browse files Browse the repository at this point in the history
This was originally intended to just change `swap` to `compare_exchange`
however other changes were needed too:

* Load with `Release` ordering seemed wrong because if it loaded `DONE`
  before while loop it wouldn't synchronize with the storing thread.
* There's no point in repeatedly checking if the value is `NONE` so this
  check was moved about while loop
* Due to a mistake I wanted to inspect using `miri` but couldn't because
  of FFI call. Separating the once implementation from consumer allowed
  writing a test in pure Rust and inspect via `miri`.
* The initializing code now skips loading the value again because it
  already knows it's initialized.
* Orderings are now explained in the comments
  • Loading branch information
Kixunil committed Jan 5, 2022
1 parent f531be3 commit e19209d
Showing 1 changed file with 84 additions and 29 deletions.
113 changes: 84 additions & 29 deletions secp256k1-sys/src/lib.rs
Expand Up @@ -663,7 +663,6 @@ impl<T> CPtr for [T] {
#[cfg(fuzzing)]
mod fuzz_dummy {
use super::*;
use core::sync::atomic::{AtomicUsize, Ordering};

#[cfg(rust_secp_no_symbol_renaming)] compile_error!("We do not support fuzzing with rust_secp_no_symbol_renaming");

Expand All @@ -673,6 +672,80 @@ mod fuzz_dummy {
fn rustsecp256k1_v0_4_1_context_preallocated_clone(cx: *const Context, prealloc: *mut c_void) -> *mut Context;
}

mod once {
use core::sync::atomic::{AtomicUsize, Ordering};

const NONE: usize = 0;
const WORKING: usize = 1;
const DONE: usize = 2;

pub(crate) struct Once(AtomicUsize);

impl Once {
pub(crate) const INIT: Once = Once(AtomicUsize::new(NONE));

pub(crate) fn run(&self, f: impl FnOnce()) {
// Acquire ordering because if it's DONE the following reads must go after this load.
let mut have_ctx = self.0.load(Ordering::Acquire);
if have_ctx == NONE {
// Ordering: on success we're only signalling to other thread that work is in progress
// without transferring other data, so it should be Relaxed, on failure the value may be DONE,
// so we want to Acquire to safely proceed in that case.
// However compare_exchange doesn't allow failure ordering to be stronger than success
// so both are Acquire.
match self.0.compare_exchange(NONE, WORKING, Ordering::Acquire, Ordering::Acquire) {
Ok(_) => {
f();
// We wrote data in memory that others threads may read so we need Release
self.0.store(DONE, Ordering::Release);
return;
},
Err(value) => have_ctx = value,
}
}
while have_ctx != DONE {
// Another thread is building, just busy-loop until they're done.
assert_eq!(have_ctx, WORKING);
// This thread will read whatever the other thread wrote so this needs to be Acquire.
have_ctx = self.0.load(Ordering::Acquire);
#[cfg(feature = "std")]
::std::thread::yield_now();
}
}
}

#[cfg(all(test, feature = "std"))]
mod tests {
use super::Once;

#[test]
fn test_once() {
use std::cell::UnsafeCell;
static ONCE: Once = Once::INIT;
struct PretendSync(UnsafeCell<u32>);

static VALUE: PretendSync = PretendSync(UnsafeCell::new(42));
unsafe impl Sync for PretendSync {}

let threads = (0..5).map(|_| ::std::thread::spawn(|| {
ONCE.run(|| unsafe {
*VALUE.0.get() = 47;
});
unsafe {
assert_eq!(*VALUE.0.get(), 47);
}
}))
.collect::<Vec<_>>();
for thread in threads {
thread.join().unwrap();
}
unsafe {
assert_eq!(*VALUE.0.get(), 47);
}
}
}
}

#[cfg(feature = "lowmemory")]
const CTX_SIZE: usize = 1024 * 65;
#[cfg(not(feature = "lowmemory"))]
Expand All @@ -683,41 +756,23 @@ mod fuzz_dummy {
CTX_SIZE
}

static HAVE_PREALLOCATED_CONTEXT: AtomicUsize = AtomicUsize::new(0);
const HAVE_CONTEXT_NONE: usize = 0;
const HAVE_CONTEXT_WORKING: usize = 1;
const HAVE_CONTEXT_DONE: usize = 2;
static HAVE_PREALLOCATED_CONTEXT: once::Once = once::Once::INIT;
static mut PREALLOCATED_CONTEXT: [u8; CTX_SIZE] = [0; CTX_SIZE];
pub unsafe fn secp256k1_context_preallocated_create(prealloc: *mut c_void, flags: c_uint) -> *mut Context {
// While applications should generally avoid creating too many contexts, sometimes fuzzers
// perform tasks repeatedly which real applications may only do rarely. Thus, we want to
// avoid being overly slow here. We do so by having a static context and copying it into
// new buffers instead of recalculating it. Because we shouldn't rely on std, we use a
// simple hand-written OnceFlag built out of an atomic to gate the global static.
let mut have_ctx = HAVE_PREALLOCATED_CONTEXT.load(Ordering::Relaxed);
while have_ctx != HAVE_CONTEXT_DONE {
if have_ctx == HAVE_CONTEXT_NONE {
have_ctx = HAVE_PREALLOCATED_CONTEXT.swap(HAVE_CONTEXT_WORKING, Ordering::AcqRel);
if have_ctx == HAVE_CONTEXT_NONE {
assert!(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::<c_uint>() <= CTX_SIZE);
assert_eq!(rustsecp256k1_v0_4_1_context_preallocated_create(
PREALLOCATED_CONTEXT[..].as_ptr() as *mut c_void,
SECP256K1_START_SIGN | SECP256K1_START_VERIFY),
PREALLOCATED_CONTEXT[..].as_ptr() as *mut Context);
assert_eq!(HAVE_PREALLOCATED_CONTEXT.swap(HAVE_CONTEXT_DONE, Ordering::AcqRel),
HAVE_CONTEXT_WORKING);
} else if have_ctx == HAVE_CONTEXT_DONE {
// Another thread finished while we were swapping.
HAVE_PREALLOCATED_CONTEXT.store(HAVE_CONTEXT_DONE, Ordering::Release);
}
} else {
// Another thread is building, just busy-loop until they're done.
assert_eq!(have_ctx, HAVE_CONTEXT_WORKING);
have_ctx = HAVE_PREALLOCATED_CONTEXT.load(Ordering::Acquire);
#[cfg(feature = "std")]
std::thread::yield_now();
}
}

HAVE_PREALLOCATED_CONTEXT.run(|| {
assert!(rustsecp256k1_v0_4_1_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::<c_uint>() <= CTX_SIZE);
assert_eq!(rustsecp256k1_v0_4_1_context_preallocated_create(
PREALLOCATED_CONTEXT[..].as_ptr() as *mut c_void,
SECP256K1_START_SIGN | SECP256K1_START_VERIFY),
PREALLOCATED_CONTEXT[..].as_ptr() as *mut Context);
});

ptr::copy_nonoverlapping(PREALLOCATED_CONTEXT[..].as_ptr(), prealloc as *mut u8, CTX_SIZE);
let ptr = (prealloc as *mut u8).add(CTX_SIZE).sub(std::mem::size_of::<c_uint>());
(ptr as *mut c_uint).write(flags);
Expand Down

0 comments on commit e19209d

Please sign in to comment.