Skip to content

Commit

Permalink
Port "fix race with dropping mpsc::Receiver" to 0.3
Browse files Browse the repository at this point in the history
  • Loading branch information
taiki-e committed Jan 1, 2021
1 parent 73aaf4a commit 4b20091
Show file tree
Hide file tree
Showing 2 changed files with 194 additions and 17 deletions.
75 changes: 59 additions & 16 deletions futures-channel/src/mpsc/mod.rs
Expand Up @@ -86,6 +86,7 @@ use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use std::thread;

use crate::mpsc::queue::Queue;

Expand Down Expand Up @@ -1047,7 +1048,12 @@ impl<T> Receiver<T> {
}
None => {
let state = decode_state(inner.state.load(SeqCst));
if state.is_open || state.num_messages != 0 {
if state.is_closed() {
// If closed flag is set AND there are no pending messages
// it means end of stream
self.inner = None;
Poll::Ready(None)
} else {
// If queue is open, we need to return Pending
// to be woken up when new messages arrive.
// If queue is closed but num_messages is non-zero,
Expand All @@ -1056,11 +1062,6 @@ impl<T> Receiver<T> {
// so we need to park until sender unparks the task
// after queueing the message.
Poll::Pending
} else {
// If closed flag is set AND there are no pending messages
// it means end of stream
self.inner = None;
Poll::Ready(None)
}
}
}
Expand Down Expand Up @@ -1126,8 +1127,26 @@ impl<T> Drop for Receiver<T> {
// Drain the channel of all pending messages
self.close();
if self.inner.is_some() {
while let Poll::Ready(Some(..)) = self.next_message() {
// ...
loop {
match self.next_message() {
Poll::Ready(Some(_)) => {}
Poll::Ready(None) => break,
Poll::Pending => {
let state = decode_state(self.inner.as_ref().unwrap().state.load(SeqCst));

// If the channel is closed, then there is no need to park.
if state.is_closed() {
break;
}

// TODO: Spinning isn't ideal, it might be worth
// investigating using a condvar or some other strategy
// here. That said, if this case is hit, then another thread
// is about to push the value into the queue and this isn't
// the only spinlock in the impl right now.
thread::yield_now();
}
}
}
}
}
Expand Down Expand Up @@ -1173,7 +1192,12 @@ impl<T> UnboundedReceiver<T> {
}
None => {
let state = decode_state(inner.state.load(SeqCst));
if state.is_open || state.num_messages != 0 {
if state.is_closed() {
// If closed flag is set AND there are no pending messages
// it means end of stream
self.inner = None;
Poll::Ready(None)
} else {
// If queue is open, we need to return Pending
// to be woken up when new messages arrive.
// If queue is closed but num_messages is non-zero,
Expand All @@ -1182,11 +1206,6 @@ impl<T> UnboundedReceiver<T> {
// so we need to park until sender unparks the task
// after queueing the message.
Poll::Pending
} else {
// If closed flag is set AND there are no pending messages
// it means end of stream
self.inner = None;
Poll::Ready(None)
}
}
}
Expand Down Expand Up @@ -1240,8 +1259,26 @@ impl<T> Drop for UnboundedReceiver<T> {
// Drain the channel of all pending messages
self.close();
if self.inner.is_some() {
while let Poll::Ready(Some(..)) = self.next_message() {
// ...
loop {
match self.next_message() {
Poll::Ready(Some(_)) => {}
Poll::Ready(None) => break,
Poll::Pending => {
let state = decode_state(self.inner.as_ref().unwrap().state.load(SeqCst));

// If the channel is closed, then there is no need to park.
if state.is_closed() {
break;
}

// TODO: Spinning isn't ideal, it might be worth
// investigating using a condvar or some other strategy
// here. That said, if this case is hit, then another thread
// is about to push the value into the queue and this isn't
// the only spinlock in the impl right now.
thread::yield_now();
}
}
}
}
}
Expand Down Expand Up @@ -1289,6 +1326,12 @@ unsafe impl<T: Send> Sync for UnboundedInner<T> {}
unsafe impl<T: Send> Send for BoundedInner<T> {}
unsafe impl<T: Send> Sync for BoundedInner<T> {}

impl State {
fn is_closed(&self) -> bool {
!self.is_open && self.num_messages == 0
}
}

/*
*
* ===== Helpers =====
Expand Down
136 changes: 135 additions & 1 deletion futures-channel/tests/mpsc-close.rs
@@ -1,9 +1,13 @@
use futures::channel::mpsc;
use futures::executor::block_on;
use futures::future::Future;
use futures::sink::SinkExt;
use futures::stream::StreamExt;
use std::sync::Arc;
use futures::task::{Context, Poll};
use std::pin::Pin;
use std::sync::{Arc, Weak};
use std::thread;
use std::time::{Duration, Instant};

#[test]
fn smoke() {
Expand Down Expand Up @@ -142,3 +146,133 @@ fn single_receiver_drop_closes_channel_and_drains() {
assert!(sender.is_closed());
}
}

// Stress test that `try_send()`s occurring concurrently with receiver
// close/drops don't appear as successful sends.
#[test]
fn stress_try_send_as_receiver_closes() {
const AMT: usize = 10000;
// To provide variable timing characteristics (in the hopes of
// reproducing the collision that leads to a race), we busy-re-poll
// the test MPSC receiver a variable number of times before actually
// stopping. We vary this countdown between 1 and the following
// value.
const MAX_COUNTDOWN: usize = 20;
// When we detect that a successfully sent item is still in the
// queue after a disconnect, we spin for up to 100ms to confirm that
// it is a persistent condition and not a concurrency illusion.
const SPIN_TIMEOUT_S: u64 = 10;
const SPIN_SLEEP_MS: u64 = 10;
struct TestRx {
rx: mpsc::Receiver<Arc<()>>,
// The number of times to query `rx` before dropping it.
poll_count: usize
}
struct TestTask {
command_rx: mpsc::Receiver<TestRx>,
test_rx: Option<mpsc::Receiver<Arc<()>>>,
countdown: usize,
}
impl TestTask {
/// Create a new TestTask
fn new() -> (TestTask, mpsc::Sender<TestRx>) {
let (command_tx, command_rx) = mpsc::channel::<TestRx>(0);
(
TestTask {
command_rx,
test_rx: None,
countdown: 0, // 0 means no countdown is in progress.
},
command_tx,
)
}
}
impl Future for TestTask {
type Output = ();

fn poll(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Self::Output> {
// Poll the test channel, if one is present.
if let Some(ref mut rx) = self.test_rx {
if let Poll::Ready(v) = rx.poll_next_unpin(cx) {
let _ = v.expect("test finished unexpectedly!");
}
self.countdown -= 1;
// Busy-poll until the countdown is finished.
cx.waker().wake_by_ref();
}
// Accept any newly submitted MPSC channels for testing.
match self.command_rx.poll_next_unpin(cx) {
Poll::Ready(Some(TestRx { rx, poll_count })) => {
self.test_rx = Some(rx);
self.countdown = poll_count;
cx.waker().wake_by_ref();
},
Poll::Ready(None) => return Poll::Ready(()),
Poll::Pending => {},
}
if self.countdown == 0 {
// Countdown complete -- drop the Receiver.
self.test_rx = None;
}
Poll::Pending
}
}
let (f, mut cmd_tx) = TestTask::new();
let bg = thread::spawn(move || block_on(f));
for i in 0..AMT {
let (mut test_tx, rx) = mpsc::channel(0);
let poll_count = i % MAX_COUNTDOWN;
cmd_tx.try_send(TestRx { rx, poll_count }).unwrap();
let mut prev_weak: Option<Weak<()>> = None;
let mut attempted_sends = 0;
let mut successful_sends = 0;
loop {
// Create a test item.
let item = Arc::new(());
let weak = Arc::downgrade(&item);
match test_tx.try_send(item) {
Ok(_) => {
prev_weak = Some(weak);
successful_sends += 1;
}
Err(ref e) if e.is_full() => {}
Err(ref e) if e.is_disconnected() => {
// Test for evidence of the race condition.
if let Some(prev_weak) = prev_weak {
if prev_weak.upgrade().is_some() {
// The previously sent item is still allocated.
// However, there appears to be some aspect of the
// concurrency that can legitimately cause the Arc
// to be momentarily valid. Spin for up to 100ms
// waiting for the previously sent item to be
// dropped.
let t0 = Instant::now();
let mut spins = 0;
loop {
if prev_weak.upgrade().is_none() {
break;
}
assert!(t0.elapsed() < Duration::from_secs(SPIN_TIMEOUT_S),
"item not dropped on iteration {} after \
{} sends ({} successful). spin=({})",
i, attempted_sends, successful_sends, spins
);
spins += 1;
thread::sleep(Duration::from_millis(SPIN_SLEEP_MS));
}
}
}
break;
}
Err(ref e) => panic!("unexpected error: {}", e),
}
attempted_sends += 1;
}
}
drop(cmd_tx);
bg.join()
.expect("background thread join");
}

0 comments on commit 4b20091

Please sign in to comment.