Skip to content

Commit

Permalink
Auto merge of #2114 - cbeuw:shim-rmw, r=RalfJung
Browse files Browse the repository at this point in the history
Use atomic RMW for `{mutex, rwlock, cond, srwlock}_get_or_create_id` functions

This is required for #1963

`{mutex, rwlock, cond, srwlock}_get_or_create_id()` currently checks whether an ID field is 0 using an atomic read, allocate one and get a new ID if it is, then write it in a separate atomic write. This is fine without weak memory. For instance, in `pthread_mutex_lock` which may be called by two threads concurrently, only one thread can read 0, create and then write a new ID, the later-run thread will always see the newly created ID and never 0.
```rust
    fn pthread_mutex_lock(&mut self, mutex_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> {
        let this = self.eval_context_mut();

        let kind = mutex_get_kind(this, mutex_op)?.check_init()?;
        let id = mutex_get_or_create_id(this, mutex_op)?;
        let active_thread = this.get_active_thread();
```

However, with weak memory behaviour, both threads may read 0: the first thread has to see 0 because nothing else was written to it, and the second thread is not guaranteed to observe the latest value, causing a duplicate mutex to be created and both threads "successfully" acquiring the lock at the same time.

This is a pretty typical pattern requiring the use of atomic RMWs. RMW *always* reads the latest value in a location, so only one thread can create the new mutex and ID, all others scheduled later will see the new ID.
  • Loading branch information
bors committed May 13, 2022
2 parents d33e7fc + 9e38dc4 commit 3f111c1
Show file tree
Hide file tree
Showing 5 changed files with 170 additions and 63 deletions.
20 changes: 5 additions & 15 deletions src/data_race.rs
Expand Up @@ -73,9 +73,9 @@ use rustc_middle::{mir, ty::layout::TyAndLayout};
use rustc_target::abi::Size;

use crate::{
AllocId, AllocRange, ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MemoryKind,
MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, OpTy, Pointer, RangeMap, Scalar,
ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp, VectorIdx,
AllocId, AllocRange, HelpersEvalContextExt, ImmTy, Immediate, InterpResult, MPlaceTy,
MemoryKind, MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, OpTy, Pointer, RangeMap,
Scalar, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp, VectorIdx,
};

pub type AllocExtra = VClockAlloc;
Expand Down Expand Up @@ -450,12 +450,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicReadOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);

// Ensure that the following read at an offset is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
this.read_scalar_atomic(&value_place, atomic)
}

Expand All @@ -469,12 +464,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);

// Ensure that the following read at an offset is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
this.write_scalar_atomic(value.into(), &value_place, atomic)
}

Expand Down
25 changes: 17 additions & 8 deletions src/helpers.rs
Expand Up @@ -597,18 +597,31 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
}

fn read_scalar_at_offset(
/// Calculates the MPlaceTy given the offset and layout of an access on an operand
fn deref_operand_and_offset(
&self,
op: &OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, Tag>> {
let this = self.eval_context_ref();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);
// Ensure that the following read at an offset is within bounds

// Ensure that the access is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
Ok(value_place)
}

fn read_scalar_at_offset(
&self,
op: &OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
this.read_scalar(&value_place.into())
}

Expand All @@ -620,11 +633,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ()> {
let this = self.eval_context_mut();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);
// Ensure that the following read at an offset is within bounds
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
this.write_scalar(value, &value_place.into())
}

Expand Down
96 changes: 66 additions & 30 deletions src/shims/posix/sync.rs
Expand Up @@ -112,16 +112,28 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
mutex_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, MutexId> {
let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid mutex id. Need to allocate
// a new mutex.
let id = ecx.mutex_create();
mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
Ok(id)
} else {
Ok(MutexId::from_u32(id))
}
let value_place = ecx.deref_operand_and_offset(mutex_op, 4, ecx.machine.layouts.u32)?;

ecx.mutex_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
false,
)?
.to_scalar_pair()
.expect("compare_exchange returns a scalar pair");

Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(MutexId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}

// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
Expand Down Expand Up @@ -156,16 +168,28 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
rwlock_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, RwLockId> {
let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid rwlock id. Need to allocate
// a new read-write lock.
let id = ecx.rwlock_create();
rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
Ok(id)
} else {
Ok(RwLockId::from_u32(id))
}
let value_place = ecx.deref_operand_and_offset(rwlock_op, 4, ecx.machine.layouts.u32)?;

ecx.rwlock_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
false,
)?
.to_scalar_pair()
.expect("compare_exchange returns a scalar pair");

Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}

// pthread_condattr_t
Expand Down Expand Up @@ -228,16 +252,28 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
cond_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, CondvarId> {
let id = cond_get_id(ecx, cond_op)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid conditional variable id.
// Need to allocate a new id.
let id = ecx.condvar_create();
cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
Ok(id)
} else {
Ok(CondvarId::from_u32(id))
}
let value_place = ecx.deref_operand_and_offset(cond_op, 4, ecx.machine.layouts.u32)?;

ecx.condvar_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
false,
)?
.to_scalar_pair()
.expect("compare_exchange returns a scalar pair");

Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}

fn cond_get_clock_id<'mir, 'tcx: 'mir>(
Expand Down
32 changes: 22 additions & 10 deletions src/shims/windows/sync.rs
Expand Up @@ -7,16 +7,28 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
lock_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, RwLockId> {
let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid rwlock id. Need to allocate
// a new rwlock.
let id = ecx.rwlock_create();
ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?;
Ok(id)
} else {
Ok(RwLockId::from_u32(id))
}
let value_place = ecx.deref_operand_and_offset(lock_op, 0, ecx.machine.layouts.u32)?;

ecx.rwlock_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
false,
)?
.to_scalar_pair()
.expect("compare_exchange returns a scalar pair");

Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}

impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
Expand Down
60 changes: 60 additions & 0 deletions src/sync.rs
Expand Up @@ -215,6 +215,24 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.machine.threads.sync.mutexes.push(Default::default())
}

#[inline]
/// Provides the closure with the next MutexId. Creates that mutex if the closure returns None,
/// otherwise returns the value from the closure
fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>
where
F: FnOnce(&mut MiriEvalContext<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
{
let this = self.eval_context_mut();
let next_index = this.machine.threads.sync.mutexes.next_index();
if let Some(old) = existing(this, next_index)? {
Ok(old)
} else {
let new_index = this.machine.threads.sync.mutexes.push(Default::default());
assert_eq!(next_index, new_index);
Ok(new_index)
}
}

#[inline]
/// Get the id of the thread that currently owns this lock.
fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
Expand Down Expand Up @@ -297,6 +315,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.machine.threads.sync.rwlocks.push(Default::default())
}

#[inline]
/// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
/// otherwise returns the value from the closure
fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
where
F: FnOnce(
&mut MiriEvalContext<'mir, 'tcx>,
RwLockId,
) -> InterpResult<'tcx, Option<RwLockId>>,
{
let this = self.eval_context_mut();
let next_index = this.machine.threads.sync.rwlocks.next_index();
if let Some(old) = existing(this, next_index)? {
Ok(old)
} else {
let new_index = this.machine.threads.sync.rwlocks.push(Default::default());
assert_eq!(next_index, new_index);
Ok(new_index)
}
}

#[inline]
/// Check if locked.
fn rwlock_is_locked(&self, id: RwLockId) -> bool {
Expand Down Expand Up @@ -445,6 +484,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.machine.threads.sync.condvars.push(Default::default())
}

#[inline]
/// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
/// otherwise returns the value from the closure
fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
where
F: FnOnce(
&mut MiriEvalContext<'mir, 'tcx>,
CondvarId,
) -> InterpResult<'tcx, Option<CondvarId>>,
{
let this = self.eval_context_mut();
let next_index = this.machine.threads.sync.condvars.next_index();
if let Some(old) = existing(this, next_index)? {
Ok(old)
} else {
let new_index = this.machine.threads.sync.condvars.push(Default::default());
assert_eq!(next_index, new_index);
Ok(new_index)
}
}

#[inline]
/// Is the conditional variable awaited?
fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
Expand Down

0 comments on commit 3f111c1

Please sign in to comment.