Skip to content

Commit

Permalink
sync: add #[must_use] to lock guards (#4886)
Browse files Browse the repository at this point in the history
  • Loading branch information
xxchan committed Aug 9, 2022
1 parent 199878e commit ff6fbc3
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 17 deletions.
10 changes: 5 additions & 5 deletions benches/sync_rwlock.rs
Expand Up @@ -14,7 +14,7 @@ fn read_uncontended(b: &mut Bencher) {
rt.block_on(async move {
for _ in 0..6 {
let read = lock.read().await;
black_box(read);
let _read = black_box(read);
}
})
});
Expand All @@ -28,7 +28,7 @@ fn read_concurrent_uncontended_multi(b: &mut Bencher) {

async fn task(lock: Arc<RwLock<()>>) {
let read = lock.read().await;
black_box(read);
let _read = black_box(read);
}

let lock = Arc::new(RwLock::new(()));
Expand All @@ -55,7 +55,7 @@ fn read_concurrent_uncontended(b: &mut Bencher) {

async fn task(lock: Arc<RwLock<()>>) {
let read = lock.read().await;
black_box(read);
let _read = black_box(read);
}

let lock = Arc::new(RwLock::new(()));
Expand All @@ -82,7 +82,7 @@ fn read_concurrent_contended_multi(b: &mut Bencher) {

async fn task(lock: Arc<RwLock<()>>) {
let read = lock.read().await;
black_box(read);
let _read = black_box(read);
}

let lock = Arc::new(RwLock::new(()));
Expand Down Expand Up @@ -110,7 +110,7 @@ fn read_concurrent_contended(b: &mut Bencher) {

async fn task(lock: Arc<RwLock<()>>) {
let read = lock.read().await;
black_box(read);
let _read = black_box(read);
}

let lock = Arc::new(RwLock::new(()));
Expand Down
3 changes: 2 additions & 1 deletion tokio/src/sync/mutex.rs
Expand Up @@ -141,6 +141,7 @@ pub struct Mutex<T: ?Sized> {
///
/// The lock is automatically released whenever the guard is dropped, at which
/// point `lock` will succeed yet again.
#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MutexGuard<'a, T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
Expand Down Expand Up @@ -766,7 +767,7 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> {
/// # async fn main() {
/// # let mutex = Mutex::new(0u32);
/// # let guard = mutex.lock().await;
/// # unlock_and_relock(guard).await;
/// # let _guard = unlock_and_relock(guard).await;
/// # }
/// ```
#[inline]
Expand Down
1 change: 1 addition & 0 deletions tokio/src/sync/rwlock/read_guard.rs
Expand Up @@ -12,6 +12,7 @@ use std::ops;
///
/// [`read`]: method@crate::sync::RwLock::read
/// [`RwLock`]: struct@crate::sync::RwLock
#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockReadGuard<'a, T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(super) resource_span: tracing::Span,
Expand Down
1 change: 1 addition & 0 deletions tokio/src/sync/rwlock/write_guard.rs
Expand Up @@ -14,6 +14,7 @@ use std::ops;
///
/// [`write`]: method@crate::sync::RwLock::write
/// [`RwLock`]: struct@crate::sync::RwLock
#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockWriteGuard<'a, T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(super) resource_span: tracing::Span,
Expand Down
10 changes: 5 additions & 5 deletions tokio/tests/sync_mutex.rs
Expand Up @@ -53,7 +53,7 @@ fn readiness() {
// But once g unlocks, we can acquire it
drop(g);
assert!(t2.is_woken());
assert_ready!(t2.poll());
let _t2 = assert_ready!(t2.poll());
}

/*
Expand Down Expand Up @@ -103,7 +103,7 @@ async fn aborted_future_1() {
timeout(Duration::from_millis(1u64), async move {
let iv = interval(Duration::from_millis(1000));
tokio::pin!(iv);
m2.lock().await;
let _g = m2.lock().await;
iv.as_mut().tick().await;
iv.as_mut().tick().await;
})
Expand All @@ -112,7 +112,7 @@ async fn aborted_future_1() {
}
// This should succeed as there is no lock left for the mutex.
timeout(Duration::from_millis(1u64), async move {
m1.lock().await;
let _g = m1.lock().await;
})
.await
.expect("Mutex is locked");
Expand All @@ -134,15 +134,15 @@ async fn aborted_future_2() {
let m2 = m1.clone();
// Try to lock mutex in a future that is aborted prematurely
timeout(Duration::from_millis(1u64), async move {
m2.lock().await;
let _g = m2.lock().await;
})
.await
.unwrap_err();
}
}
// This should succeed as there is no lock left for the mutex.
timeout(Duration::from_millis(1u64), async move {
m1.lock().await;
let _g = m1.lock().await;
})
.await
.expect("Mutex is locked");
Expand Down
2 changes: 1 addition & 1 deletion tokio/tests/sync_panic.rs
Expand Up @@ -30,7 +30,7 @@ fn mutex_blocking_lock_panic_caller() -> Result<(), Box<dyn Error>> {
let rt = basic();
rt.block_on(async {
let mutex = Mutex::new(5_u32);
mutex.blocking_lock();
let _g = mutex.blocking_lock();
});
});

Expand Down
10 changes: 5 additions & 5 deletions tokio/tests/sync_rwlock.rs
Expand Up @@ -31,7 +31,7 @@ fn read_shared() {
let mut t1 = spawn(rwlock.read());
let _g1 = assert_ready!(t1.poll());
let mut t2 = spawn(rwlock.read());
assert_ready!(t2.poll());
let _g2 = assert_ready!(t2.poll());
}

// When there is an active shared owner, exclusive access should not be possible
Expand Down Expand Up @@ -75,7 +75,7 @@ fn exhaust_reading() {
let g2 = reads.pop().unwrap();
drop(g2);
assert!(t1.is_woken());
assert_ready!(t1.poll());
let _g1 = assert_ready!(t1.poll());
}

// When there is an active exclusive owner, subsequent exclusive access should not be possible
Expand All @@ -100,7 +100,7 @@ fn write_shared_drop() {
assert_pending!(t2.poll());
drop(g1);
assert!(t2.is_woken());
assert_ready!(t2.poll());
let _g2 = assert_ready!(t2.poll());
}

// when there is an active shared owner, and exclusive access is triggered,
Expand All @@ -112,7 +112,7 @@ fn write_read_shared_pending() {
let _g1 = assert_ready!(t1.poll());

let mut t2 = spawn(rwlock.read());
assert_ready!(t2.poll());
let _g2 = assert_ready!(t2.poll());

let mut t3 = spawn(rwlock.write());
assert_pending!(t3.poll());
Expand All @@ -137,7 +137,7 @@ fn write_read_shared_drop_pending() {
drop(t2);

assert!(t3.is_woken());
assert_ready!(t3.poll());
let _t3 = assert_ready!(t3.poll());
}

// Acquire an RwLock nonexclusively by a single task
Expand Down

0 comments on commit ff6fbc3

Please sign in to comment.