From 85da253f540db3c1178d82776acb3787df2eda50 Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Sun, 11 Jun 2023 09:35:10 -0400 Subject: [PATCH 01/10] add mapped_futures --- futures-util/Cargo.toml | 1 + .../src/stream/mapped_futures/abort.rs | 12 + .../src/stream/mapped_futures/iter.rs | 192 ++++ futures-util/src/stream/mapped_futures/mod.rs | 870 ++++++++++++++++++ .../mapped_futures/ready_to_run_queue.rs | 123 +++ .../src/stream/mapped_futures/task.rs | 180 ++++ futures-util/src/stream/mod.rs | 8 + 7 files changed, 1386 insertions(+) create mode 100644 futures-util/src/stream/mapped_futures/abort.rs create mode 100644 futures-util/src/stream/mapped_futures/iter.rs create mode 100644 futures-util/src/stream/mapped_futures/mod.rs create mode 100644 futures-util/src/stream/mapped_futures/ready_to_run_queue.rs create mode 100644 futures-util/src/stream/mapped_futures/task.rs diff --git a/futures-util/Cargo.toml b/futures-util/Cargo.toml index daf46c26ae..187ebdaaac 100644 --- a/futures-util/Cargo.toml +++ b/futures-util/Cargo.toml @@ -48,6 +48,7 @@ pin-project-lite = "0.2.6" futures = { path = "../futures", features = ["async-await", "thread-pool"] } futures-test = { path = "../futures-test" } tokio = "0.1.11" +futures-timer = "3.0.2" [package.metadata.docs.rs] all-features = true diff --git a/futures-util/src/stream/mapped_futures/abort.rs b/futures-util/src/stream/mapped_futures/abort.rs new file mode 100644 index 0000000000..1a42d24369 --- /dev/null +++ b/futures-util/src/stream/mapped_futures/abort.rs @@ -0,0 +1,12 @@ +pub(super) fn abort(s: &str) -> ! { + struct DoublePanic; + + impl Drop for DoublePanic { + fn drop(&mut self) { + panic!("panicking twice to abort the program"); + } + } + + let _bomb = DoublePanic; + panic!("{}", s); +} diff --git a/futures-util/src/stream/mapped_futures/iter.rs b/futures-util/src/stream/mapped_futures/iter.rs new file mode 100644 index 0000000000..ed7d3758ed --- /dev/null +++ b/futures-util/src/stream/mapped_futures/iter.rs @@ -0,0 +1,192 @@ +use super::task::{HashTask, Task}; +use super::MappedFutures; +use core::hash::Hash; +use core::marker::PhantomData; +use core::pin::Pin; +use core::ptr; +use core::sync::atomic::Ordering::Relaxed; + +/// Mutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub struct IterPinMut<'a, K: Hash + Eq, Fut> { + pub(super) task: *const Task, + pub(super) len: usize, + pub(super) _marker: PhantomData<&'a mut MappedFutures>, +} + +/// Mutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub struct IterMut<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinMut<'a, K, Fut>); + +/// Immutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub struct IterPinRef<'a, K: Hash + Eq, Fut> { + pub(super) task: *const Task, + pub(super) len: usize, + pub(super) pending_next_all: *mut Task, + pub(super) _marker: PhantomData<&'a MappedFutures>, +} + +/// Immutable iterator over all the futures in the unordered set. +#[derive(Debug)] +pub struct Iter<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinRef<'a, K, Fut>); + +/// Owned iterator over all futures in the unordered set. +#[derive(Debug)] +pub struct IntoIter { + pub(super) len: usize, + pub(super) inner: MappedFutures, +} + +impl Iterator for IntoIter { + type Item = Fut; + + fn next(&mut self) -> Option { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + let task = self.inner.head_all.get_mut(); + + if (*task).is_null() { + return None; + } + + unsafe { + // Moving out of the future is safe because it is `Unpin` + let future = (*(**task).future.get()).take().unwrap(); + + // Mutable access to a previously shared `MappedFutures` implies + // that the other threads already released the object before the + // current thread acquired it, so relaxed ordering can be used and + // valid `next_all` checks can be skipped. + let next = (**task).next_all.load(Relaxed); + *task = next; + if !task.is_null() { + *(**task).prev_all.get() = ptr::null_mut(); + } + self.len -= 1; + Some(future) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl ExactSizeIterator for IntoIter {} + +impl<'a, K: Hash + Eq, Fut> Iterator for IterPinMut<'a, K, Fut> { + type Item = Pin<&'a mut Fut>; + + fn next(&mut self) -> Option { + if self.task.is_null() { + return None; + } + + unsafe { + let future = (*(*self.task).future.get()).as_mut().unwrap(); + + // Mutable access to a previously shared `MappedFutures` implies + // that the other threads already released the object before the + // current thread acquired it, so relaxed ordering can be used and + // valid `next_all` checks can be skipped. + let next = (*self.task).next_all.load(Relaxed); + self.task = next; + self.len -= 1; + Some(Pin::new_unchecked(future)) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl ExactSizeIterator for IterPinMut<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for IterMut<'a, K, Fut> { + type Item = &'a mut Fut; + + fn next(&mut self) -> Option { + self.0.next().map(Pin::get_mut) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl ExactSizeIterator for IterMut<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut> Iterator for IterPinRef<'a, K, Fut> { + type Item = Pin<&'a Fut>; + + fn next(&mut self) -> Option { + if self.task.is_null() { + return None; + } + + unsafe { + let future = (*(*self.task).future.get()).as_ref().unwrap(); + + // Relaxed ordering can be used since acquire ordering when + // `head_all` was initially read for this iterator implies acquire + // ordering for all previously inserted nodes (and we don't need to + // read `len_all` again for any other nodes). + let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed); + self.task = next; + self.len -= 1; + Some(Pin::new_unchecked(future)) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl ExactSizeIterator for IterPinRef<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for Iter<'a, K, Fut> { + type Item = &'a Fut; + + fn next(&mut self) -> Option { + self.0.next().map(Pin::get_ref) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl ExactSizeIterator for Iter<'_, K, Fut> {} + +pub struct Keys<'a, K: Hash + Eq, Fut> { + pub(super) inner: std::iter::Map< + std::collections::hash_set::Iter<'a, HashTask>, + Box) -> &'a K>, + >, +} +impl ExactSizeIterator for Keys<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut> Iterator for Keys<'a, K, Fut> { + type Item = &'a K; + + fn next(&mut self) -> Option { + self.inner.next() + } +} + +// SAFETY: we do nothing thread-local and there is no interior mutability, +// so the usual structural `Send`/`Sync` apply. +unsafe impl Send for IterPinRef<'_, K, Fut> {} +unsafe impl Sync for IterPinRef<'_, K, Fut> {} + +unsafe impl Send for IterPinMut<'_, K, Fut> {} +unsafe impl Sync for IterPinMut<'_, K, Fut> {} + +unsafe impl Send for IntoIter {} +unsafe impl Sync for IntoIter {} + +unsafe impl Send for Keys<'_, K, Fut> {} +unsafe impl Sync for Keys<'_, K, Fut> {} diff --git a/futures-util/src/stream/mapped_futures/mod.rs b/futures-util/src/stream/mapped_futures/mod.rs new file mode 100644 index 0000000000..bd7cc4ba3e --- /dev/null +++ b/futures-util/src/stream/mapped_futures/mod.rs @@ -0,0 +1,870 @@ +//! An unbounded map of futures. + +use crate::task::AtomicWaker; +use alloc::sync::{Arc, Weak}; +use core::cell::UnsafeCell; +use core::fmt::{self, Debug}; +use core::hash::Hash; +use core::iter::FromIterator; +use core::marker::PhantomData; +use core::mem; +use core::pin::Pin; +use core::ptr; +use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst}; +use core::sync::atomic::{AtomicBool, AtomicPtr}; +use futures_core::future::Future; +use futures_core::stream::{FusedStream, Stream}; +use futures_core::task::{Context, Poll}; +use std::collections::HashSet; + +mod abort; + +mod iter; +use self::iter::Keys; +#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 +pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; + +mod task; +use self::task::{HashTask, Task}; + +mod ready_to_run_queue; +use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; + +/// A map of futures which may complete in any order. +/// +/// This structure is optimized to manage a large number of futures. +/// Futures managed by [`MappedFutures`] will only be polled when they +/// generate wake-up notifications. This reduces the required amount of work +/// needed to poll large numbers of futures. +/// +/// [`MappedFutures`] can be filled by [`collect`](Iterator::collect)ing an +/// iterator of futures into a [`MappedFutures`], or by +/// [`insert`](MappedFutures::insert)ing futures onto an existing +/// [`MappedFutures`]. When new futures are added, +/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving +/// wake-ups for new futures. +/// +/// Note that you can create a ready-made [`MappedFutures`] via the +/// [`collect`](Iterator::collect) method, or you can start with an empty set +/// with the [`MappedFutures::new`] constructor. +#[must_use = "streams do nothing unless polled"] +pub struct MappedFutures { + hash_set: HashSet>, + ready_to_run_queue: Arc>, + head_all: AtomicPtr>, + is_terminated: AtomicBool, +} + +unsafe impl Send for MappedFutures {} +unsafe impl Sync for MappedFutures {} +impl Unpin for MappedFutures {} + +// MappedFutures is implemented using two linked lists. One which links all +// futures managed by a `MappedFutures` and one that tracks futures that have +// been scheduled for polling. The first linked list allows for thread safe +// insertion of nodes at the head as well as forward iteration, but is otherwise +// not thread safe and is only accessed by the thread that owns the +// `MappedFutures` value for any other operations. The second linked list is +// an implementation of the intrusive MPSC queue algorithm described by +// 1024cores.net. +// +// When a future is submitted to the set, a task is allocated and inserted in +// both linked lists. The next call to `poll_next` will (eventually) see this +// task and call `poll` on the future. +// +// Before a managed future is polled, the current context's waker is replaced +// with one that is aware of the specific future being run. This ensures that +// wake-up notifications generated by that specific future are visible to +// `MappedFutures`. When a wake-up notification is received, the task is +// inserted into the ready to run queue, so that its future can be polled later. +// +// Each task is wrapped in an `Arc` and thereby atomically reference counted. +// Also, each task contains an `AtomicBool` which acts as a flag that indicates +// whether the task is currently inserted in the atomic queue. When a wake-up +// notification is received, the task will only be inserted into the ready to +// run queue if it isn't inserted already. + +impl Default for MappedFutures { + fn default() -> Self { + Self::new() + } +} + +impl MappedFutures { + /// Constructs a new, empty [`MappedFutures`]. + /// + /// The returned [`MappedFutures`] does not contain any futures. + /// In this state, [`MappedFutures::poll_next`](Stream::poll_next) will + /// return [`Poll::Ready(None)`](Poll::Ready). + pub fn new() -> Self { + let stub = Arc::new(Task { + future: UnsafeCell::new(None), + next_all: AtomicPtr::new(ptr::null_mut()), + prev_all: UnsafeCell::new(ptr::null()), + len_all: UnsafeCell::new(0), + next_ready_to_run: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + ready_to_run_queue: Weak::new(), + woken: AtomicBool::new(false), + key: UnsafeCell::new(None), + }); + let stub_ptr = Arc::as_ptr(&stub); + let ready_to_run_queue = Arc::new(ReadyToRunQueue { + waker: AtomicWaker::new(), + head: AtomicPtr::new(stub_ptr as *mut _), + tail: UnsafeCell::new(stub_ptr), + stub, + }); + + Self { + hash_set: HashSet::new(), + head_all: AtomicPtr::new(ptr::null_mut()), + ready_to_run_queue, + is_terminated: AtomicBool::new(false), + } + } + + /// Returns the number of futures contained in the set. + /// + /// This represents the total number of in-flight futures. + pub fn len(&self) -> usize { + let (_, len) = self.atomic_load_head_and_len_all(); + len + } + + /// Returns `true` if the set contains no futures. + pub fn is_empty(&self) -> bool { + // Relaxed ordering can be used here since we don't need to read from + // the head pointer, only check whether it is null. + self.head_all.load(Relaxed).is_null() + } + + /// Insert a future into the set. + /// + /// This method adds the given future to the set. This method will not + /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must + /// ensure that [`MappedFutures::poll_next`](Stream::poll_next) is called + /// in order to receive wake-up notifications for the given future. + /// + /// This method will remove and drop a future that is already mapped to the provided key. + /// Returns true if another future was not removed to make room for the provided future. + pub fn insert(&mut self, key: K, future: Fut) -> bool { + let replacing = self.cancel(&key); + let task = Arc::new(Task { + future: UnsafeCell::new(Some(future)), + next_all: AtomicPtr::new(self.pending_next_all()), + prev_all: UnsafeCell::new(ptr::null_mut()), + len_all: UnsafeCell::new(0), + next_ready_to_run: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue), + woken: AtomicBool::new(false), + key: UnsafeCell::new(Some(key)), + }); + + // Reset the `is_terminated` flag if we've previously marked ourselves + // as terminated. + self.is_terminated.store(false, Relaxed); + + // Right now our task has a strong reference count of 1. We transfer + // ownership of this reference count to our internal linked list + // and we'll reclaim ownership through the `unlink` method below. + let ptr = self.link(task); + + // We'll need to get the future "into the system" to start tracking it, + // e.g. getting its wake-up notifications going to us tracking which + // futures are ready. To do that we unconditionally enqueue it for + // polling here. + self.ready_to_run_queue.enqueue(ptr); + !replacing + } + + /// Insert a future into the set and return the displaced future, if there was one. + /// + /// This method adds the given future to the set. This method will not + /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must + /// ensure that [`MappedFutures::poll_next`](Stream::poll_next) is called + /// in order to receive wake-up notifications for the given future. + /// Returns true if another future was ma + pub fn replace(&mut self, key: K, future: Fut) -> Option + where + Fut: Unpin, + { + let replacing = self.remove(&key); + let task = Arc::new(Task { + future: UnsafeCell::new(Some(future)), + next_all: AtomicPtr::new(self.pending_next_all()), + prev_all: UnsafeCell::new(ptr::null_mut()), + len_all: UnsafeCell::new(0), + next_ready_to_run: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue), + woken: AtomicBool::new(false), + key: UnsafeCell::new(Some(key)), + }); + + // Reset the `is_terminated` flag if we've previously marked ourselves + // as terminated. + self.is_terminated.store(false, Relaxed); + + // Right now our task has a strong reference count of 1. We transfer + // ownership of this reference count to our internal linked list + // and we'll reclaim ownership through the `unlink` method below. + let ptr = self.link(task); + + // We'll need to get the future "into the system" to start tracking it, + // e.g. getting its wake-up notifications going to us tracking which + // futures are ready. To do that we unconditionally enqueue it for + // polling here. + self.ready_to_run_queue.enqueue(ptr); + replacing + } + + /// Remove a future from the set, dropping it. + /// + /// Returns true if a future was cancelled. + pub fn cancel(&mut self, key: &K) -> bool { + if let Some(task) = self.hash_set.get(key) { + unsafe { + if let Some(_) = (*task.future.get()).take() { + self.unlink(Arc::as_ptr(&task.inner)); + return true; + } + } + } + false + } + + /// Remove a future from the set and return it. + pub fn remove(&mut self, key: &K) -> Option + where + Fut: Unpin, + { + if let Some(task) = self.hash_set.get(key) { + unsafe { + let fut = (*task.future.get()).take().unwrap(); + self.unlink(Arc::as_ptr(&task.inner)); + return Some(fut); + } + } + None + } + + /// Returns `true` if the map contains a future for the specified key. + pub fn contains(&mut self, key: &K) -> bool { + self.hash_set.contains(key) + } + + /// Get a pinned mutable reference to the mapped future. + pub fn get_pin_mut(&mut self, key: &K) -> Option> { + if let Some(task_ref) = self.hash_set.get(key) { + unsafe { + if let Some(fut) = &mut *task_ref.inner.future.get() { + return Some(Pin::new_unchecked(fut)); + } + } + } + None + } + + /// Get a pinned mutable reference to the mapped future. + pub fn get_mut(&mut self, key: &K) -> Option<&mut Fut> + where + Fut: Unpin, + { + if let Some(task_ref) = self.hash_set.get(key) { + unsafe { + if let Some(fut) = &mut *task_ref.inner.future.get() { + return Some(fut); + } + } + } + None + } + + /// Get a shared reference to the mapped future. + pub fn get(&mut self, key: &K) -> Option<&Fut> + where + Fut: Unpin, + { + if let Some(task_ref) = self.hash_set.get(key) { + unsafe { + if let Some(fut) = &*task_ref.inner.future.get() { + return Some(fut); + } + } + } + None + } + + /// Get a pinned shared reference to the mapped future. + pub fn get_pin(&mut self, key: &K) -> Option> { + if let Some(task_ref) = self.hash_set.get(key) { + unsafe { + if let Some(fut) = &*task_ref.future.get() { + return Some(Pin::new_unchecked(fut)); + } + } + } + None + } + + pub fn keys<'a>(&'a self) -> Keys<'a, K, Fut> { + Keys { + inner: self.hash_set.iter().map(Box::new(|hash_task| HashTask::key_unwrap(hash_task))), + } + } + + /// Returns an iterator that allows inspecting each future in the set. + pub fn iter(&self) -> Iter<'_, K, Fut> + where + Fut: Unpin, + { + Iter(Pin::new(self).iter_pin_ref()) + } + + /// Returns an iterator that allows inspecting each future in the set. + pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, K, Fut> { + let (task, len) = self.atomic_load_head_and_len_all(); + let pending_next_all = self.pending_next_all(); + + IterPinRef { task, len, pending_next_all, _marker: PhantomData } + } + + /// Returns an iterator that allows modifying each future in the set. + pub fn iter_mut(&mut self) -> IterMut<'_, K, Fut> + where + Fut: Unpin, + { + IterMut(Pin::new(self).iter_pin_mut()) + } + + /// Returns an iterator that allows modifying each future in the set. + pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, K, Fut> { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + let task = *self.head_all.get_mut(); + let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; + + IterPinMut { task, len, _marker: PhantomData } + } + + /// Returns the current head node and number of futures in the list of all + /// futures within a context where access is shared with other threads + /// (mostly for use with the `len` and `iter_pin_ref` methods). + fn atomic_load_head_and_len_all(&self) -> (*const Task, usize) { + let task = self.head_all.load(Acquire); + let len = if task.is_null() { + 0 + } else { + unsafe { + (*task).spin_next_all(self.pending_next_all(), Acquire); + *(*task).len_all.get() + } + }; + + (task, len) + } + + /// Releases the task. It destroys the future inside and either drops + /// the `Arc` or transfers ownership to the ready to run queue. + /// The task this method is called on must have been unlinked before. + fn release_task(&mut self, task: Arc>) { + // `release_task` must only be called on unlinked tasks + debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); + unsafe { + debug_assert!((*task.prev_all.get()).is_null()); + } + + // The future is done, try to reset the queued flag. This will prevent + // `wake` from doing any work in the future + let prev = task.queued.swap(true, SeqCst); + + // Drop the future, even if it hasn't finished yet. This is safe + // because we're dropping the future on the thread that owns + // `MappedFutures`, which correctly tracks `Fut`'s lifetimes and + // such. + unsafe { + // Set to `None` rather than `take()`ing to prevent moving the + // future. + *task.future.get() = None; + + let key = &*task.key.get(); + if let Some(key) = key { + self.hash_set.remove(key); + } + } + + // If the queued flag was previously set, then it means that this task + // is still in our internal ready to run queue. We then transfer + // ownership of our reference count to the ready to run queue, and it'll + // come along and free it later, noticing that the future is `None`. + // + // If, however, the queued flag was *not* set then we're safe to + // release our reference count on the task. The queued flag was set + // above so all future `enqueue` operations will not actually + // enqueue the task, so our task will never see the ready to run queue + // again. The task itself will be deallocated once all reference counts + // have been dropped elsewhere by the various wakers that contain it. + if prev { + mem::forget(task); + } + } + + /// Insert a new task into the internal linked list. + fn link(&mut self, task: Arc>) -> *const Task { + // `next_all` should already be reset to the pending state before this + // function is called. + debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); + + let hash_task = HashTask { inner: task.clone() }; + self.hash_set.insert(hash_task); + + let ptr = Arc::into_raw(task); + + // Atomically swap out the old head node to get the node that should be + // assigned to `next_all`. + let next = self.head_all.swap(ptr as *mut _, AcqRel); + + unsafe { + // Store the new list length in the new node. + let new_len = if next.is_null() { + 1 + } else { + // Make sure `next_all` has been written to signal that it is + // safe to read `len_all`. + (*next).spin_next_all(self.pending_next_all(), Acquire); + *(*next).len_all.get() + 1 + }; + *(*ptr).len_all.get() = new_len; + + // Write the old head as the next node pointer, signaling to other + // threads that `len_all` and `next_all` are ready to read. + (*ptr).next_all.store(next, Release); + + // `prev_all` updates don't need to be synchronized, as the field is + // only ever used after exclusive access has been acquired. + if !next.is_null() { + *(*next).prev_all.get() = ptr; + } + } + + ptr + } + + /// Remove the task from the linked list tracking all tasks currently + /// managed by `MappedFutures`. + /// This method is unsafe because it has be guaranteed that `task` is a + /// valid pointer. + unsafe fn unlink(&mut self, task: *const Task) -> Arc> { + // Compute the new list length now in case we're removing the head node + // and won't be able to retrieve the correct length later. + let head = *self.head_all.get_mut(); + debug_assert!(!head.is_null()); + let new_len = *(*head).len_all.get() - 1; + + if let Some(key) = (*task).key() { + self.hash_set.remove(key); + } + + let task = Arc::from_raw(task); + let next = task.next_all.load(Relaxed); + let prev = *task.prev_all.get(); + task.next_all.store(self.pending_next_all(), Relaxed); + *task.prev_all.get() = ptr::null_mut(); + + if !next.is_null() { + *(*next).prev_all.get() = prev; + } + + if !prev.is_null() { + (*prev).next_all.store(next, Relaxed); + } else { + *self.head_all.get_mut() = next; + } + + // Store the new list length in the head node. + let head = *self.head_all.get_mut(); + if !head.is_null() { + *(*head).len_all.get() = new_len; + } + + task + } + + /// Returns the reserved value for `Task::next_all` to indicate a pending + /// assignment from the thread that inserted the task. + /// + /// `MappedFutures::link` needs to update `Task` pointers in an order + /// that ensures any iterators created on other threads can correctly + /// traverse the entire `Task` list using the chain of `next_all` pointers. + /// This could be solved with a compare-exchange loop that stores the + /// current `head_all` in `next_all` and swaps out `head_all` with the new + /// `Task` pointer if the head hasn't already changed. Under heavy thread + /// contention, this compare-exchange loop could become costly. + /// + /// An alternative is to initialize `next_all` to a reserved pending state + /// first, perform an atomic swap on `head_all`, and finally update + /// `next_all` with the old head node. Iterators will then either see the + /// pending state value or the correct next node pointer, and can reload + /// `next_all` as needed until the correct value is loaded. The number of + /// retries needed (if any) would be small and will always be finite, so + /// this should generally perform better than the compare-exchange loop. + /// + /// A valid `Task` pointer in the `head_all` list is guaranteed to never be + /// this value, so it is safe to use as a reserved value until the correct + /// value can be written. + fn pending_next_all(&self) -> *mut Task { + // The `ReadyToRunQueue` stub is never inserted into the `head_all` + // list, and its pointer value will remain valid for the lifetime of + // this `MappedFutures`, so we can make use of its value here. + Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _ + } +} + +impl Stream for MappedFutures { + type Item = (K, Fut::Output); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let len = self.len(); + + // Keep track of how many child futures we have polled, + // in case we want to forcibly yield. + let mut polled = 0; + let mut yielded = 0; + + // Ensure `parent` is correctly set. + self.ready_to_run_queue.waker.register(cx.waker()); + + loop { + // Safety: &mut self guarantees the mutual exclusion `dequeue` + // expects + let task = match unsafe { self.ready_to_run_queue.dequeue() } { + Dequeue::Empty => { + if self.is_empty() { + // We can only consider ourselves terminated once we + // have yielded a `None` + *self.is_terminated.get_mut() = true; + return Poll::Ready(None); + } else { + return Poll::Pending; + } + } + Dequeue::Inconsistent => { + // At this point, it may be worth yielding the thread & + // spinning a few times... but for now, just yield using the + // task system. + cx.waker().wake_by_ref(); + return Poll::Pending; + } + Dequeue::Data(task) => task, + }; + + debug_assert!(task != self.ready_to_run_queue.stub()); + + // Safety: + // - `task` is a valid pointer. + // - We are the only thread that accesses the `UnsafeCell` that + // contains the future + let future = match unsafe { &mut *(*task).future.get() } { + Some(future) => future, + + // If the future has already gone away then we're just + // cleaning out this task. See the comment in + // `release_task` for more information, but we're basically + // just taking ownership of our reference count here. + None => { + // This case only happens when `release_task` was called + // for this task before and couldn't drop the task + // because it was already enqueued in the ready to run + // queue. + + // Safety: `task` is a valid pointer + let task = unsafe { Arc::from_raw(task) }; + + // Double check that the call to `release_task` really + // happened. Calling it required the task to be unlinked. + debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); + unsafe { + debug_assert!((*task.prev_all.get()).is_null()); + } + continue; + } + }; + + // Safety: `task` is a valid pointer + let task = unsafe { self.unlink(task) }; + + // Unset queued flag: This must be done before polling to ensure + // that the future's task gets rescheduled if it sends a wake-up + // notification **during** the call to `poll`. + let prev = task.queued.swap(false, SeqCst); + assert!(prev); + + // We're going to need to be very careful if the `poll` + // method below panics. We need to (a) not leak memory and + // (b) ensure that we still don't have any use-after-frees. To + // manage this we do a few things: + // + // * A "bomb" is created which if dropped abnormally will call + // `release_task`. That way we'll be sure the memory management + // of the `task` is managed correctly. In particular + // `release_task` will drop the future. This ensures that it is + // dropped on this thread and not accidentally on a different + // thread (bad). + // * We unlink the task from our internal queue to preemptively + // assume it'll panic, in which case we'll want to discard it + // regardless. + struct Bomb<'a, K: Hash + Eq, Fut> { + queue: &'a mut MappedFutures, + task: Option>>, + } + + impl Drop for Bomb<'_, K, Fut> { + fn drop(&mut self) { + if let Some(task) = self.task.take() { + self.queue.release_task(task); + } + } + } + + let mut bomb = Bomb { task: Some(task), queue: &mut *self }; + + // Poll the underlying future with the appropriate waker + // implementation. This is where a large bit of the unsafety + // starts to stem from internally. The waker is basically just + // our `Arc>` and can schedule the future for polling by + // enqueuing itself in the ready to run queue. + // + // Critically though `Task` won't actually access `Fut`, the + // future, while it's floating around inside of wakers. + // These structs will basically just use `Fut` to size + // the internal allocation, appropriately accessing fields and + // deallocating the task if need be. + let res = { + let task = bomb.task.as_ref().unwrap(); + // We are only interested in whether the future is awoken before it + // finishes polling, so reset the flag here. + task.woken.store(false, Relaxed); + let waker = Task::waker_ref(task); + let mut cx = Context::from_waker(&waker); + + // Safety: We won't move the future ever again + let future = unsafe { Pin::new_unchecked(future) }; + + future.poll(&mut cx) + }; + polled += 1; + + match res { + Poll::Pending => { + let task = bomb.task.take().unwrap(); + // If the future was awoken during polling, we assume + // the future wanted to explicitly yield. + yielded += task.woken.load(Relaxed) as usize; + bomb.queue.link(task); + + // If a future yields, we respect it and yield here. + // If all futures have been polled, we also yield here to + // avoid starving other tasks waiting on the executor. + // (polling the same future twice per iteration may cause + // the problem: https://github.com/rust-lang/futures-rs/pull/2333) + if yielded >= 2 || polled == len { + cx.waker().wake_by_ref(); + return Poll::Pending; + } + continue; + } + Poll::Ready(output) => { + return Poll::Ready(Some((bomb.task.as_ref().unwrap().take_key(), output))); + } + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } +} + +impl Debug for MappedFutures { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "MappedFutures {{ ... }}") + } +} + +impl MappedFutures { + /// Clears the set, removing all futures. + pub fn clear(&mut self) { + self.clear_head_all(); + self.hash_set.clear(); + + // we just cleared all the tasks, and we have &mut self, so this is safe. + unsafe { self.ready_to_run_queue.clear() }; + + self.is_terminated.store(false, Relaxed); + } + + fn clear_head_all(&mut self) { + while !self.head_all.get_mut().is_null() { + let head = *self.head_all.get_mut(); + let task = unsafe { self.unlink(head) }; + self.release_task(task); + } + } +} + +impl Drop for MappedFutures { + fn drop(&mut self) { + // When a `MappedFutures` is dropped we want to drop all futures + // associated with it. At the same time though there may be tons of + // wakers flying around which contain `Task` references + // inside them. We'll let those naturally get deallocated. + self.clear_head_all(); + + // Note that at this point we could still have a bunch of tasks in the + // ready to run queue. None of those tasks, however, have futures + // associated with them so they're safe to destroy on any thread. At + // this point the `MappedFutures` struct, the owner of the one strong + // reference to the ready to run queue will drop the strong reference. + // At that point whichever thread releases the strong refcount last (be + // it this thread or some other thread as part of an `upgrade`) will + // clear out the ready to run queue and free all remaining tasks. + // + // While that freeing operation isn't guaranteed to happen here, it's + // guaranteed to happen "promptly" as no more "blocking work" will + // happen while there's a strong refcount held. + } +} + +impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a MappedFutures { + type Item = &'a Fut; + type IntoIter = Iter<'a, K, Fut>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a mut MappedFutures { + type Item = &'a mut Fut; + type IntoIter = IterMut<'a, K, Fut>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl IntoIterator for MappedFutures { + type Item = Fut; + type IntoIter = IntoIter; + + fn into_iter(mut self) -> Self::IntoIter { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + let task = *self.head_all.get_mut(); + let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; + + IntoIter { len, inner: self } + } +} + +impl FromIterator<(K, Fut)> for MappedFutures { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let acc = Self::new(); + iter.into_iter().fold(acc, |mut acc, (key, item)| { + acc.insert(key, item); + acc + }) + } +} + +impl FusedStream for MappedFutures { + fn is_terminated(&self) -> bool { + self.is_terminated.load(Relaxed) + } +} + +impl Extend<(K, Fut)> for MappedFutures { + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for (key, item) in iter { + self.insert(key, item); + } + } +} + +#[cfg(test)] +pub mod tests { + use crate::stream::*; + use futures::executor::block_on; + use futures::future::LocalBoxFuture; + use futures_timer::Delay; + // use futures_util::StreamExt; + // use crate::StreamExt; + use std::time::Duration; + + fn insert_millis(futs: &mut MappedFutures, key: u32, millis: u64) { + futs.insert(key, Delay::new(Duration::from_millis(millis))); + } + + fn insert_millis_pinned( + futs: &mut MappedFutures>, + key: u32, + millis: u64, + ) { + futs.insert(key, Box::pin(Delay::new(Duration::from_millis(millis)))); + } + + #[test] + fn map_futures() { + let mut futures: MappedFutures = MappedFutures::new(); + insert_millis(&mut futures, 1, 50); + insert_millis(&mut futures, 2, 50); + insert_millis(&mut futures, 3, 150); + insert_millis(&mut futures, 4, 200); + + assert_eq!(block_on(futures.next()).unwrap().0, 1); + assert_eq!(futures.cancel(&3), true); + assert_eq!(block_on(futures.next()).unwrap().0, 2); + assert_eq!(block_on(futures.next()).unwrap().0, 4); + assert_eq!(block_on(futures.next()), None); + } + + #[test] + fn remove_pinned() { + let mut futures: MappedFutures> = MappedFutures::new(); + insert_millis_pinned(&mut futures, 1, 50); + insert_millis_pinned(&mut futures, 3, 150); + insert_millis_pinned(&mut futures, 4, 200); + + assert_eq!(block_on(futures.next()).unwrap().0, 1); + assert_eq!(block_on(futures.remove(&3).unwrap()), ()); + insert_millis_pinned(&mut futures, 2, 60); + assert_eq!(block_on(futures.next()).unwrap().0, 4); + assert_eq!(block_on(futures.next()).unwrap().0, 2); + assert_eq!(block_on(futures.next()), None); + } + + #[test] + fn mutate() { + let mut futures: MappedFutures = MappedFutures::new(); + insert_millis(&mut futures, 1, 50); + insert_millis(&mut futures, 2, 100); + insert_millis(&mut futures, 3, 150); + insert_millis(&mut futures, 4, 200); + + assert_eq!(block_on(futures.next()).unwrap().0, 1); + futures.get_mut(&3).unwrap().reset(Duration::from_millis(30)); + assert_eq!(block_on(futures.next()).unwrap().0, 3); + assert_eq!(block_on(futures.next()).unwrap().0, 2); + assert_eq!(block_on(futures.next()).unwrap().0, 4); + assert_eq!(block_on(futures.next()), None); + } +} diff --git a/futures-util/src/stream/mapped_futures/ready_to_run_queue.rs b/futures-util/src/stream/mapped_futures/ready_to_run_queue.rs new file mode 100644 index 0000000000..b607044723 --- /dev/null +++ b/futures-util/src/stream/mapped_futures/ready_to_run_queue.rs @@ -0,0 +1,123 @@ +use crate::task::AtomicWaker; +use alloc::sync::Arc; +use core::cell::UnsafeCell; +use core::hash::Hash; +use core::ptr; +use core::sync::atomic::AtomicPtr; +use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; + +use super::abort::abort; +use super::task::Task; + +pub(super) enum Dequeue { + Data(*const Task), + Empty, + Inconsistent, +} + +pub(super) struct ReadyToRunQueue { + // The waker of the task using `MappedFutures`. + pub(super) waker: AtomicWaker, + + // Head/tail of the readiness queue + pub(super) head: AtomicPtr>, + pub(super) tail: UnsafeCell<*const Task>, + pub(super) stub: Arc>, +} + +/// An MPSC queue into which the tasks containing the futures are inserted +/// whenever the future inside is scheduled for polling. +impl ReadyToRunQueue { + /// The enqueue function from the 1024cores intrusive MPSC queue algorithm. + pub(super) fn enqueue(&self, task: *const Task) { + unsafe { + debug_assert!((*task).queued.load(Relaxed)); + + // This action does not require any coordination + (*task).next_ready_to_run.store(ptr::null_mut(), Relaxed); + + // Note that these atomic orderings come from 1024cores + let task = task as *mut _; + let prev = self.head.swap(task, AcqRel); + (*prev).next_ready_to_run.store(task, Release); + } + } + + /// The dequeue function from the 1024cores intrusive MPSC queue algorithm + /// + /// Note that this is unsafe as it required mutual exclusion (only one + /// thread can call this) to be guaranteed elsewhere. + pub(super) unsafe fn dequeue(&self) -> Dequeue { + let mut tail = *self.tail.get(); + let mut next = (*tail).next_ready_to_run.load(Acquire); + + if tail == self.stub() { + if next.is_null() { + return Dequeue::Empty; + } + + *self.tail.get() = next; + tail = next; + next = (*next).next_ready_to_run.load(Acquire); + } + + if !next.is_null() { + *self.tail.get() = next; + debug_assert!(tail != self.stub()); + return Dequeue::Data(tail); + } + + if self.head.load(Acquire) as *const _ != tail { + return Dequeue::Inconsistent; + } + + self.enqueue(self.stub()); + + next = (*tail).next_ready_to_run.load(Acquire); + + if !next.is_null() { + *self.tail.get() = next; + return Dequeue::Data(tail); + } + + Dequeue::Inconsistent + } + + pub(super) fn stub(&self) -> *const Task { + Arc::as_ptr(&self.stub) + } + + // Clear the queue of tasks. + // + // Note that each task has a strong reference count associated with it + // which is owned by the ready to run queue. This method just pulls out + // tasks and drops their refcounts. + // + // # Safety + // + // - All tasks **must** have had their futures dropped already (by MappedFutures::clear) + // - The caller **must** guarantee unique access to `self` + pub(crate) unsafe fn clear(&self) { + loop { + // SAFETY: We have the guarantee of mutual exclusion required by `dequeue`. + match self.dequeue() { + Dequeue::Empty => break, + Dequeue::Inconsistent => abort("inconsistent in drop"), + Dequeue::Data(ptr) => drop(Arc::from_raw(ptr)), + } + } + } +} + +impl Drop for ReadyToRunQueue { + fn drop(&mut self) { + // Once we're in the destructor for `Inner` we need to clear out + // the ready to run queue of tasks if there's anything left in there. + + // All tasks have had their futures dropped already by the `MappedFutures` + // destructor above, and we have &mut self, so this is safe. + unsafe { + self.clear(); + } + } +} diff --git a/futures-util/src/stream/mapped_futures/task.rs b/futures-util/src/stream/mapped_futures/task.rs new file mode 100644 index 0000000000..0c1387c467 --- /dev/null +++ b/futures-util/src/stream/mapped_futures/task.rs @@ -0,0 +1,180 @@ +use alloc::sync::{Arc, Weak}; +use core::cell::UnsafeCell; +use core::sync::atomic::Ordering::{self, Relaxed, SeqCst}; +use core::sync::atomic::{AtomicBool, AtomicPtr}; +use std::borrow::Borrow; +use std::ops::Deref; + +use super::abort::abort; +use super::ReadyToRunQueue; +use crate::task::{waker_ref, ArcWake, WakerRef}; +use core::hash::Hash; + +pub(super) struct Task { + // The future + pub(super) future: UnsafeCell>, + + // Next pointer for linked list tracking all active tasks (use + // `spin_next_all` to read when access is shared across threads) + pub(super) next_all: AtomicPtr>, + + // Previous task in linked list tracking all active tasks + pub(super) prev_all: UnsafeCell<*const Task>, + + // Length of the linked list tracking all active tasks when this node was + // inserted (use `spin_next_all` to synchronize before reading when access + // is shared across threads) + pub(super) len_all: UnsafeCell, + + // Next pointer in ready to run queue + pub(super) next_ready_to_run: AtomicPtr>, + + // Queue that we'll be enqueued to when woken + pub(super) ready_to_run_queue: Weak>, + + // Whether or not this task is currently in the ready to run queue + pub(super) queued: AtomicBool, + + // Whether the future was awoken during polling + // It is possible for this flag to be set to true after the polling, + // but it will be ignored. + pub(super) woken: AtomicBool, + pub(super) key: UnsafeCell>, +} + +// Wrapper struct; exists effectively to implement hash on the type Arc +pub(super) struct HashTask { + pub(super) inner: Arc>, +} + +impl HashTask { + fn key(&self) -> Option<&K> { + Task::key(&*self) + } + pub(super) fn key_unwrap(&self) -> &K { + unsafe { (&*self.key.get()).as_ref().unwrap() } + } +} + +impl Deref for HashTask { + type Target = Task; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl Borrow for HashTask { + fn borrow(&self) -> &K { + // Never use Task in a context where this method may be called + // IE. The Stub task never goes into the HashSet + unsafe { (*self.key.get()).as_ref().unwrap() } + } +} + +impl Hash for HashTask { + fn hash(&self, state: &mut H) { + unsafe { (*self.key.get()).as_ref() }.unwrap().hash(state) + } +} + +impl PartialEq for HashTask { + fn eq(&self, other: &Self) -> bool { + self.key() == other.key() + } +} +impl Eq for HashTask {} +unsafe impl Send for HashTask {} +unsafe impl Sync for HashTask {} + +// `Task` can be sent across threads safely because it ensures that +// the underlying `Fut` type isn't touched from any of its methods. +// +// The parent (`super`) module is trusted not to access `future` +// across different threads. +unsafe impl Send for Task {} +unsafe impl Sync for Task {} + +impl ArcWake for Task { + fn wake_by_ref(arc_self: &Arc) { + let inner = match arc_self.ready_to_run_queue.upgrade() { + Some(inner) => inner, + None => return, + }; + + arc_self.woken.store(true, Relaxed); + + // It's our job to enqueue this task it into the ready to run queue. To + // do this we set the `queued` flag, and if successful we then do the + // actual queueing operation, ensuring that we're only queued once. + // + // Once the task is inserted call `wake` to notify the parent task, + // as it'll want to come along and run our task later. + // + // Note that we don't change the reference count of the task here, + // we merely enqueue the raw pointer. The `FuturesUnordered` + // implementation guarantees that if we set the `queued` flag that + // there's a reference count held by the main `FuturesUnordered` queue + // still. + let prev = arc_self.queued.swap(true, SeqCst); + if !prev { + inner.enqueue(Arc::as_ptr(arc_self)); + inner.waker.wake(); + } + } +} + +impl Task { + /// Returns a waker reference for this task without cloning the Arc. + pub(super) fn waker_ref(this: &Arc) -> WakerRef<'_> { + waker_ref(this) + } + + /// Spins until `next_all` is no longer set to `pending_next_all`. + /// + /// The temporary `pending_next_all` value is typically overwritten fairly + /// quickly after a node is inserted into the list of all futures, so this + /// should rarely spin much. + /// + /// When it returns, the correct `next_all` value is returned. + /// + /// `Relaxed` or `Acquire` ordering can be used. `Acquire` ordering must be + /// used before `len_all` can be safely read. + #[inline] + pub(super) fn spin_next_all( + &self, + pending_next_all: *mut Self, + ordering: Ordering, + ) -> *const Self { + loop { + let next = self.next_all.load(ordering); + if next != pending_next_all { + return next; + } + } + } + pub(super) fn key(&self) -> Option<&K> { + unsafe { (&*self.key.get()).as_ref() } + } + pub(super) fn take_key(&self) -> K { + unsafe { (*self.key.get()).take().unwrap() } + } +} + +impl Drop for Task { + fn drop(&mut self) { + // Since `Task` is sent across all threads for any lifetime, + // regardless of `Fut`, we, to guarantee memory safety, can't actually + // touch `Fut` at any time except when we have a reference to the + // `FuturesUnordered` itself . + // + // Consequently it *should* be the case that we always drop futures from + // the `FuturesUnordered` instance. This is a bomb, just in case there's + // a bug in that logic. + unsafe { + if (*self.future.get()).is_some() { + abort("future still here when dropping"); + } + } + } +} diff --git a/futures-util/src/stream/mod.rs b/futures-util/src/stream/mod.rs index 5a1f766aaa..65d6d38a82 100644 --- a/futures-util/src/stream/mod.rs +++ b/futures-util/src/stream/mod.rs @@ -123,6 +123,14 @@ pub mod futures_unordered; #[doc(inline)] pub use self::futures_unordered::FuturesUnordered; +#[cfg(not(futures_no_atomic_cas))] +#[cfg(feature = "alloc")] +pub mod mapped_futures; +#[cfg(not(futures_no_atomic_cas))] +#[cfg(feature = "alloc")] +#[doc(inline)] +pub use self::mapped_futures::MappedFutures; + #[cfg(not(futures_no_atomic_cas))] #[cfg(feature = "alloc")] pub mod select_all; From 373f965d0412825a9fe1e4aacd36273fc95bccfa Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Sun, 11 Jun 2023 10:08:04 -0400 Subject: [PATCH 02/10] add comments --- futures-util/src/stream/mapped_futures/iter.rs | 14 ++++++++------ futures-util/src/stream/mapped_futures/mod.rs | 1 + 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/futures-util/src/stream/mapped_futures/iter.rs b/futures-util/src/stream/mapped_futures/iter.rs index ed7d3758ed..0ec0ff4898 100644 --- a/futures-util/src/stream/mapped_futures/iter.rs +++ b/futures-util/src/stream/mapped_futures/iter.rs @@ -38,6 +38,14 @@ pub struct IntoIter { pub(super) inner: MappedFutures, } +/// Immutable iterator over all keys in the mapping. +pub struct Keys<'a, K: Hash + Eq, Fut> { + pub(super) inner: std::iter::Map< + std::collections::hash_set::Iter<'a, HashTask>, + Box) -> &'a K>, + >, +} + impl Iterator for IntoIter { type Item = Fut; @@ -161,12 +169,6 @@ impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for Iter<'a, K, Fut> { impl ExactSizeIterator for Iter<'_, K, Fut> {} -pub struct Keys<'a, K: Hash + Eq, Fut> { - pub(super) inner: std::iter::Map< - std::collections::hash_set::Iter<'a, HashTask>, - Box) -> &'a K>, - >, -} impl ExactSizeIterator for Keys<'_, K, Fut> {} impl<'a, K: Hash + Eq, Fut> Iterator for Keys<'a, K, Fut> { diff --git a/futures-util/src/stream/mapped_futures/mod.rs b/futures-util/src/stream/mapped_futures/mod.rs index bd7cc4ba3e..f70d98ad03 100644 --- a/futures-util/src/stream/mapped_futures/mod.rs +++ b/futures-util/src/stream/mapped_futures/mod.rs @@ -309,6 +309,7 @@ impl MappedFutures { None } + /// Returns an iterator of keys in the mapping. pub fn keys<'a>(&'a self) -> Keys<'a, K, Fut> { Keys { inner: self.hash_set.iter().map(Box::new(|hash_task| HashTask::key_unwrap(hash_task))), From 24082a0e230a92c3594232cc9f8cb8a6c865b99a Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Sun, 24 Sep 2023 21:15:45 -0400 Subject: [PATCH 03/10] futures_unordered consumes futures_keyed --- .../abort.rs | 0 futures-util/src/stream/futures_keyed/iter.rs | 173 +++++ futures-util/src/stream/futures_keyed/mod.rs | 677 ++++++++++++++++++ .../ready_to_run_queue.rs | 29 +- .../task.rs | 34 +- .../src/stream/futures_unordered/iter.rs | 83 +-- .../src/stream/futures_unordered/mod.rs | 463 +----------- futures-util/src/stream/mod.rs | 1 + 8 files changed, 926 insertions(+), 534 deletions(-) rename futures-util/src/stream/{futures_unordered => futures_keyed}/abort.rs (100%) create mode 100644 futures-util/src/stream/futures_keyed/iter.rs create mode 100644 futures-util/src/stream/futures_keyed/mod.rs rename futures-util/src/stream/{futures_unordered => futures_keyed}/ready_to_run_queue.rs (83%) rename futures-util/src/stream/{futures_unordered => futures_keyed}/task.rs (80%) diff --git a/futures-util/src/stream/futures_unordered/abort.rs b/futures-util/src/stream/futures_keyed/abort.rs similarity index 100% rename from futures-util/src/stream/futures_unordered/abort.rs rename to futures-util/src/stream/futures_keyed/abort.rs diff --git a/futures-util/src/stream/futures_keyed/iter.rs b/futures-util/src/stream/futures_keyed/iter.rs new file mode 100644 index 0000000000..847aba0f64 --- /dev/null +++ b/futures-util/src/stream/futures_keyed/iter.rs @@ -0,0 +1,173 @@ +use super::task::Task; +use super::FuturesKeyed; +use core::hash::Hash; +use core::marker::PhantomData; +use core::pin::Pin; +use core::ptr; +use core::sync::atomic::Ordering::Relaxed; + +/// Mutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub(crate) struct IterPinMut<'a, K: Hash + Eq, Fut> { + pub(super) task: *const Task, + pub(crate) len: usize, + pub(super) _marker: PhantomData<&'a mut FuturesKeyed>, +} + +/// Mutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub struct IterMut<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinMut<'a, K, Fut>); + +/// Immutable iterator over all futures in the unordered set. +#[derive(Debug)] +pub(crate) struct IterPinRef<'a, K: Hash + Eq, Fut> { + pub(super) task: *const Task, + pub(crate) len: usize, + pub(super) pending_next_all: *mut Task, + pub(super) _marker: PhantomData<&'a FuturesKeyed>, +} + +/// Immutable iterator over all the futures in the unordered set. +#[derive(Debug)] +pub struct Iter<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinRef<'a, K, Fut>); + +/// Owned iterator over all futures in the unordered set. +#[derive(Debug)] +pub(crate) struct IntoIter { + pub(crate) len: usize, + pub(super) inner: FuturesKeyed, +} + +impl Iterator for IntoIter { + type Item = Fut; + + fn next(&mut self) -> Option { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + let task = self.inner.head_all.get_mut(); + + if (*task).is_null() { + return None; + } + + unsafe { + // Moving out of the future is safe because it is `Unpin` + let future = (*(**task).future.get()).take().unwrap(); + + // Mutable access to a previously shared `FuturesKeyed` implies + // that the other threads already released the object before the + // current thread acquired it, so relaxed ordering can be used and + // valid `next_all` checks can be skipped. + let next = (**task).next_all.load(Relaxed); + *task = next; + if !task.is_null() { + *(**task).prev_all.get() = ptr::null_mut(); + } + self.len -= 1; + Some(future) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl ExactSizeIterator for IntoIter {} + +impl<'a, K: Hash + Eq, Fut> Iterator for IterPinMut<'a, K, Fut> { + type Item = Pin<&'a mut Fut>; + + fn next(&mut self) -> Option { + if self.task.is_null() { + return None; + } + + unsafe { + let future = (*(*self.task).future.get()).as_mut().unwrap(); + + // Mutable access to a previously shared `FuturesKeyed` implies + // that the other threads already released the object before the + // current thread acquired it, so relaxed ordering can be used and + // valid `next_all` checks can be skipped. + let next = (*self.task).next_all.load(Relaxed); + self.task = next; + self.len -= 1; + Some(Pin::new_unchecked(future)) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl ExactSizeIterator for IterPinMut<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for IterMut<'a, K, Fut> { + type Item = &'a mut Fut; + + fn next(&mut self) -> Option { + self.0.next().map(Pin::get_mut) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl ExactSizeIterator for IterMut<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut> Iterator for IterPinRef<'a, K, Fut> { + type Item = Pin<&'a Fut>; + + fn next(&mut self) -> Option { + if self.task.is_null() { + return None; + } + + unsafe { + let future = (*(*self.task).future.get()).as_ref().unwrap(); + + // Relaxed ordering can be used since acquire ordering when + // `head_all` was initially read for this iterator implies acquire + // ordering for all previously inserted nodes (and we don't need to + // read `len_all` again for any other nodes). + let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed); + self.task = next; + self.len -= 1; + Some(Pin::new_unchecked(future)) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.len, Some(self.len)) + } +} + +impl ExactSizeIterator for IterPinRef<'_, K, Fut> {} + +impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for Iter<'a, K, Fut> { + type Item = &'a Fut; + + fn next(&mut self) -> Option { + self.0.next().map(Pin::get_ref) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +impl ExactSizeIterator for Iter<'_, K, Fut> {} + +// SAFETY: we do nothing thread-local and there is no interior mutability, +// so the usual structural `Send`/`Sync` apply. +unsafe impl Send for IterPinRef<'_, K, Fut> {} +unsafe impl Sync for IterPinRef<'_, K, Fut> {} + +unsafe impl Send for IterPinMut<'_, K, Fut> {} +unsafe impl Sync for IterPinMut<'_, K, Fut> {} + +unsafe impl Send for IntoIter {} +unsafe impl Sync for IntoIter {} diff --git a/futures-util/src/stream/futures_keyed/mod.rs b/futures-util/src/stream/futures_keyed/mod.rs new file mode 100644 index 0000000000..080cdf7f8c --- /dev/null +++ b/futures-util/src/stream/futures_keyed/mod.rs @@ -0,0 +1,677 @@ +//! An unbounded set of futures. +//! +//! This module is only available when the `std` or `alloc` feature of this +//! library is activated, and it is activated by default. + +/// This replaces futures_unordered as the main runner of unordered futures +/// The same futures_unordered api will now be a consumer of this module. +/// This module is changed from futures_unordered only in that Task structs contain a hashable key, +/// and that Task will be returned with its future's output when each future completes. +/// The whole Task must be returned, since we don't want the Key to need to be Copy or Clone, and +/// so the Key that is used to track the tasks in the consuming module must be the same instance of +/// the Key that is contained in the returned Task. +use crate::task::AtomicWaker; +use alloc::sync::{Arc, Weak}; +use core::cell::UnsafeCell; +use core::fmt::{self, Debug}; +use core::hash::Hash; +use core::iter::FromIterator; +use core::marker::PhantomData; +use core::mem; +use core::pin::Pin; +use core::ptr; +use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst}; +use core::sync::atomic::{AtomicBool, AtomicPtr}; +use futures_core::future::Future; +use futures_core::stream::{FusedStream, Stream}; +use futures_core::task::{Context, Poll}; +// use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError}; + +mod abort; + +mod iter; +#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 +pub(super) use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; + +mod task; +use self::task::Task; + +mod ready_to_run_queue; +use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; + +/// A set of futures which may complete in any order. +/// +/// See [`FuturesOrdered`](crate::stream::FuturesOrdered) for a version of this +/// type that preserves a FIFO order. +/// +/// This structure is optimized to manage a large number of futures. +/// Futures managed by [`FuturesKeyed`] will only be polled when they +/// generate wake-up notifications. This reduces the required amount of work +/// needed to poll large numbers of futures. +/// +/// [`FuturesKeyed`] can be filled by [`collect`](Iterator::collect)ing an +/// iterator of futures into a [`FuturesKeyed`], or by +/// [`push`](FuturesKeyed::push)ing futures onto an existing +/// [`FuturesKeyed`]. When new futures are added, +/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving +/// wake-ups for new futures. +/// +/// Note that you can create a ready-made [`FuturesKeyed`] via the +/// [`collect`](Iterator::collect) method, or you can start with an empty set +/// with the [`FuturesKeyed::new`] constructor. +/// +/// This type is only available when the `std` or `alloc` feature of this +/// library is activated, and it is activated by default. +#[must_use = "streams do nothing unless polled"] +pub(super) struct FuturesKeyed { + ready_to_run_queue: Arc>, + head_all: AtomicPtr>, + is_terminated: AtomicBool, +} + +unsafe impl Send for FuturesKeyed {} +unsafe impl Sync for FuturesKeyed {} +impl Unpin for FuturesKeyed {} + +// impl Spawn for FuturesKeyed> { +// fn spawn_obj(&self, key: K, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> { +// self.push(key, future_obj); +// Ok(()) +// } +// } +// +// impl LocalSpawn for FuturesKeyed> { +// fn spawn_local_obj( +// &self, +// key: K, +// future_obj: LocalFutureObj<'static, ()>, +// ) -> Result<(), SpawnError> { +// self.push(key, future_obj); +// Ok(()) +// } +// } + +// FuturesKeyed is implemented using two linked lists. One which links all +// futures managed by a `FuturesKeyed` and one that tracks futures that have +// been scheduled for polling. The first linked list allows for thread safe +// insertion of nodes at the head as well as forward iteration, but is otherwise +// not thread safe and is only accessed by the thread that owns the +// `FuturesKeyed` value for any other operations. The second linked list is +// an implementation of the intrusive MPSC queue algorithm described by +// 1024cores.net. +// +// When a future is submitted to the set, a task is allocated and inserted in +// both linked lists. The next call to `poll_next` will (eventually) see this +// task and call `poll` on the future. +// +// Before a managed future is polled, the current context's waker is replaced +// with one that is aware of the specific future being run. This ensures that +// wake-up notifications generated by that specific future are visible to +// `FuturesKeyed`. When a wake-up notification is received, the task is +// inserted into the ready to run queue, so that its future can be polled later. +// +// Each task is wrapped in an `Arc` and thereby atomically reference counted. +// Also, each task contains an `AtomicBool` which acts as a flag that indicates +// whether the task is currently inserted in the atomic queue. When a wake-up +// notification is received, the task will only be inserted into the ready to +// run queue if it isn't inserted already. + +impl Default for FuturesKeyed { + fn default() -> Self { + Self::new() + } +} + +impl FuturesKeyed { + /// Constructs a new, empty [`FuturesKeyed`]. + /// + /// The returned [`FuturesKeyed`] does not contain any futures. + /// In this state, [`FuturesKeyed::poll_next`](Stream::poll_next) will + /// return [`Poll::Ready(None)`](Poll::Ready). + pub(super) fn new() -> Self { + let stub = Arc::new(Task { + future: UnsafeCell::new(None), + key: UnsafeCell::new(None), + next_all: AtomicPtr::new(ptr::null_mut()), + prev_all: UnsafeCell::new(ptr::null()), + len_all: UnsafeCell::new(0), + next_ready_to_run: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + ready_to_run_queue: Weak::new(), + woken: AtomicBool::new(false), + }); + let stub_ptr = Arc::as_ptr(&stub); + let ready_to_run_queue = Arc::new(ReadyToRunQueue { + waker: AtomicWaker::new(), + head: AtomicPtr::new(stub_ptr as *mut _), + tail: UnsafeCell::new(stub_ptr), + stub, + }); + + Self { + head_all: AtomicPtr::new(ptr::null_mut()), + ready_to_run_queue, + is_terminated: AtomicBool::new(false), + } + } + + /// Returns the number of futures contained in the set. + /// + /// This represents the total number of in-flight futures. + pub(super) fn len(&self) -> usize { + let (_, len) = self.atomic_load_head_and_len_all(); + len + } + + /// Returns `true` if the set contains no futures. + pub(super) fn is_empty(&self) -> bool { + // Relaxed ordering can be used here since we don't need to read from + // the head pointer, only check whether it is null. + self.head_all.load(Relaxed).is_null() + } + + /// Push a future into the set. + /// + /// This method adds the given future to the set. This method will not + /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must + /// ensure that [`FuturesKeyed::poll_next`](Stream::poll_next) is called + /// in order to receive wake-up notifications for the given future. + pub(super) fn push(&self, key: K, future: Fut) { + let task = Arc::new(Task { + future: UnsafeCell::new(Some(future)), + key: UnsafeCell::new(Some(key)), + next_all: AtomicPtr::new(self.pending_next_all()), + prev_all: UnsafeCell::new(ptr::null_mut()), + len_all: UnsafeCell::new(0), + next_ready_to_run: AtomicPtr::new(ptr::null_mut()), + queued: AtomicBool::new(true), + ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue), + woken: AtomicBool::new(false), + }); + + // Reset the `is_terminated` flag if we've previously marked ourselves + // as terminated. + self.is_terminated.store(false, Relaxed); + + // Right now our task has a strong reference count of 1. We transfer + // ownership of this reference count to our internal linked list + // and we'll reclaim ownership through the `unlink` method below. + let ptr = self.link(task); + + // We'll need to get the future "into the system" to start tracking it, + // e.g. getting its wake-up notifications going to us tracking which + // futures are ready. To do that we unconditionally enqueue it for + // polling here. + self.ready_to_run_queue.enqueue(ptr); + } + + /// Returns an iterator that allows inspecting each future in the set. + pub(super) fn iter(&self) -> Iter<'_, K, Fut> + where + Fut: Unpin, + { + Iter(Pin::new(self).iter_pin_ref()) + } + + /// Returns an iterator that allows inspecting each future in the set. + pub(super) fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, K, Fut> { + let (task, len) = self.atomic_load_head_and_len_all(); + let pending_next_all = self.pending_next_all(); + + IterPinRef { task, len, pending_next_all, _marker: PhantomData } + } + + /// Returns an iterator that allows modifying each future in the set. + pub(super) fn iter_mut(&mut self) -> IterMut<'_, K, Fut> + where + Fut: Unpin, + { + IterMut(Pin::new(self).iter_pin_mut()) + } + + /// Returns an iterator that allows modifying each future in the set. + pub(super) fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, K, Fut> { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + let task = *self.head_all.get_mut(); + let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; + + IterPinMut { task, len, _marker: PhantomData } + } + + /// Returns the current head node and number of futures in the list of all + /// futures within a context where access is shared with other threads + /// (mostly for use with the `len` and `iter_pin_ref` methods). + fn atomic_load_head_and_len_all(&self) -> (*const Task, usize) { + let task = self.head_all.load(Acquire); + let len = if task.is_null() { + 0 + } else { + unsafe { + (*task).spin_next_all(self.pending_next_all(), Acquire); + *(*task).len_all.get() + } + }; + + (task, len) + } + + /// Releases the task. It destroys the future inside and either drops + /// the `Arc` or transfers ownership to the ready to run queue. + /// The task this method is called on must have been unlinked before. + fn release_task(&mut self, task: Arc>) { + // `release_task` must only be called on unlinked tasks + debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); + unsafe { + debug_assert!((*task.prev_all.get()).is_null()); + } + + // The future is done, try to reset the queued flag. This will prevent + // `wake` from doing any work in the future + let prev = task.queued.swap(true, SeqCst); + + // Drop the future, even if it hasn't finished yet. This is safe + // because we're dropping the future on the thread that owns + // `FuturesKeyed`, which correctly tracks `Fut`'s lifetimes and + // such. + unsafe { + // Set to `None` rather than `take()`ing to prevent moving the + // future. + *task.future.get() = None; + } + + // If the queued flag was previously set, then it means that this task + // is still in our internal ready to run queue. We then transfer + // ownership of our reference count to the ready to run queue, and it'll + // come along and free it later, noticing that the future is `None`. + // + // If, however, the queued flag was *not* set then we're safe to + // release our reference count on the task. The queued flag was set + // above so all future `enqueue` operations will not actually + // enqueue the task, so our task will never see the ready to run queue + // again. The task itself will be deallocated once all reference counts + // have been dropped elsewhere by the various wakers that contain it. + if prev { + mem::forget(task); + } + } + + /// Insert a new task into the internal linked list. + fn link(&self, task: Arc>) -> *const Task { + // `next_all` should already be reset to the pending state before this + // function is called. + debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); + let ptr = Arc::into_raw(task); + + // Atomically swap out the old head node to get the node that should be + // assigned to `next_all`. + let next = self.head_all.swap(ptr as *mut _, AcqRel); + + unsafe { + // Store the new list length in the new node. + let new_len = if next.is_null() { + 1 + } else { + // Make sure `next_all` has been written to signal that it is + // safe to read `len_all`. + (*next).spin_next_all(self.pending_next_all(), Acquire); + *(*next).len_all.get() + 1 + }; + *(*ptr).len_all.get() = new_len; + + // Write the old head as the next node pointer, signaling to other + // threads that `len_all` and `next_all` are ready to read. + (*ptr).next_all.store(next, Release); + + // `prev_all` updates don't need to be synchronized, as the field is + // only ever used after exclusive access has been acquired. + if !next.is_null() { + *(*next).prev_all.get() = ptr; + } + } + + ptr + } + + /// Remove the task from the linked list tracking all tasks currently + /// managed by `FuturesKeyed`. + /// This method is unsafe because it has be guaranteed that `task` is a + /// valid pointer. + unsafe fn unlink(&mut self, task: *const Task) -> Arc> { + // Compute the new list length now in case we're removing the head node + // and won't be able to retrieve the correct length later. + let head = *self.head_all.get_mut(); + debug_assert!(!head.is_null()); + let new_len = *(*head).len_all.get() - 1; + + let task = Arc::from_raw(task); + let next = task.next_all.load(Relaxed); + let prev = *task.prev_all.get(); + task.next_all.store(self.pending_next_all(), Relaxed); + *task.prev_all.get() = ptr::null_mut(); + + if !next.is_null() { + *(*next).prev_all.get() = prev; + } + + if !prev.is_null() { + (*prev).next_all.store(next, Relaxed); + } else { + *self.head_all.get_mut() = next; + } + + // Store the new list length in the head node. + let head = *self.head_all.get_mut(); + if !head.is_null() { + *(*head).len_all.get() = new_len; + } + + task + } + + /// Returns the reserved value for `Task::next_all` to indicate a pending + /// assignment from the thread that inserted the task. + /// + /// `FuturesKeyed::link` needs to update `Task` pointers in an order + /// that ensures any iterators created on other threads can correctly + /// traverse the entire `Task` list using the chain of `next_all` pointers. + /// This could be solved with a compare-exchange loop that stores the + /// current `head_all` in `next_all` and swaps out `head_all` with the new + /// `Task` pointer if the head hasn't already changed. Under heavy thread + /// contention, this compare-exchange loop could become costly. + /// + /// An alternative is to initialize `next_all` to a reserved pending state + /// first, perform an atomic swap on `head_all`, and finally update + /// `next_all` with the old head node. Iterators will then either see the + /// pending state value or the correct next node pointer, and can reload + /// `next_all` as needed until the correct value is loaded. The number of + /// retries needed (if any) would be small and will always be finite, so + /// this should generally perform better than the compare-exchange loop. + /// + /// A valid `Task` pointer in the `head_all` list is guaranteed to never be + /// this value, so it is safe to use as a reserved value until the correct + /// value can be written. + fn pending_next_all(&self) -> *mut Task { + // The `ReadyToRunQueue` stub is never inserted into the `head_all` + // list, and its pointer value will remain valid for the lifetime of + // this `FuturesKeyed`, so we can make use of its value here. + Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _ + } +} + +impl Stream for FuturesKeyed { + type Item = (Arc>, Fut::Output); + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let len = self.len(); + + // Keep track of how many child futures we have polled, + // in case we want to forcibly yield. + let mut polled = 0; + let mut yielded = 0; + + // Ensure `parent` is correctly set. + self.ready_to_run_queue.waker.register(cx.waker()); + + loop { + // Safety: &mut self guarantees the mutual exclusion `dequeue` + // expects + let task = match unsafe { self.ready_to_run_queue.dequeue() } { + Dequeue::Empty => { + if self.is_empty() { + // We can only consider ourselves terminated once we + // have yielded a `None` + *self.is_terminated.get_mut() = true; + return Poll::Ready(None); + } else { + return Poll::Pending; + } + } + Dequeue::Inconsistent => { + // At this point, it may be worth yielding the thread & + // spinning a few times... but for now, just yield using the + // task system. + cx.waker().wake_by_ref(); + return Poll::Pending; + } + Dequeue::Data(task) => task, + }; + + debug_assert!(task != self.ready_to_run_queue.stub()); + + // Safety: + // - `task` is a valid pointer. + // - We are the only thread that accesses the `UnsafeCell` that + // contains the future + let future = match unsafe { &mut *(*task).future.get() } { + Some(future) => future, + + // If the future has already gone away then we're just + // cleaning out this task. See the comment in + // `release_task` for more information, but we're basically + // just taking ownership of our reference count here. + None => { + // This case only happens when `release_task` was called + // for this task before and couldn't drop the task + // because it was already enqueued in the ready to run + // queue. + + // Safety: `task` is a valid pointer + let task = unsafe { Arc::from_raw(task) }; + + // Double check that the call to `release_task` really + // happened. Calling it required the task to be unlinked. + debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); + unsafe { + debug_assert!((*task.prev_all.get()).is_null()); + } + continue; + } + }; + + // Safety: `task` is a valid pointer + let task = unsafe { self.unlink(task) }; + + // Unset queued flag: This must be done before polling to ensure + // that the future's task gets rescheduled if it sends a wake-up + // notification **during** the call to `poll`. + let prev = task.queued.swap(false, SeqCst); + assert!(prev); + + // We're going to need to be very careful if the `poll` + // method below panics. We need to (a) not leak memory and + // (b) ensure that we still don't have any use-after-frees. To + // manage this we do a few things: + // + // * A "bomb" is created which if dropped abnormally will call + // `release_task`. That way we'll be sure the memory management + // of the `task` is managed correctly. In particular + // `release_task` will drop the future. This ensures that it is + // dropped on this thread and not accidentally on a different + // thread (bad). + // * We unlink the task from our internal queue to preemptively + // assume it'll panic, in which case we'll want to discard it + // regardless. + struct Bomb<'a, K: Hash + Eq, Fut> { + queue: &'a mut FuturesKeyed, + task: Option>>, + } + + impl Drop for Bomb<'_, K, Fut> { + fn drop(&mut self) { + if let Some(task) = self.task.take() { + self.queue.release_task(task); + } + } + } + + let mut bomb = Bomb { task: Some(task), queue: &mut *self }; + + // Poll the underlying future with the appropriate waker + // implementation. This is where a large bit of the unsafety + // starts to stem from internally. The waker is basically just + // our `Arc>` and can schedule the future for polling by + // enqueuing itself in the ready to run queue. + // + // Critically though `Task` won't actually access `Fut`, the + // future, while it's floating around inside of wakers. + // These structs will basically just use `Fut` to size + // the internal allocation, appropriately accessing fields and + // deallocating the task if need be. + let res = { + let task = bomb.task.as_ref().unwrap(); + // We are only interested in whether the future is awoken before it + // finishes polling, so reset the flag here. + task.woken.store(false, Relaxed); + let waker = Task::waker_ref(task); + let mut cx = Context::from_waker(&waker); + + // Safety: We won't move the future ever again + let future = unsafe { Pin::new_unchecked(future) }; + + future.poll(&mut cx) + }; + polled += 1; + + match res { + Poll::Pending => { + let task = bomb.task.take().unwrap(); + // If the future was awoken during polling, we assume + // the future wanted to explicitly yield. + yielded += task.woken.load(Relaxed) as usize; + bomb.queue.link(task); + + // If a future yields, we respect it and yield here. + // If all futures have been polled, we also yield here to + // avoid starving other tasks waiting on the executor. + // (polling the same future twice per iteration may cause + // the problem: https://github.com/rust-lang/futures-rs/pull/2333) + if yielded >= 2 || polled == len { + cx.waker().wake_by_ref(); + return Poll::Pending; + } + continue; + } + Poll::Ready(output) => { + return Poll::Ready(Some((bomb.task.take().unwrap(), output))) + } + } + } + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.len(); + (len, Some(len)) + } +} + +impl Debug for FuturesKeyed { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "FuturesKeyed {{ ... }}") + } +} + +impl FuturesKeyed { + /// Clears the set, removing all futures. + pub(super) fn clear(&mut self) { + self.clear_head_all(); + + // we just cleared all the tasks, and we have &mut self, so this is safe. + unsafe { self.ready_to_run_queue.clear() }; + + self.is_terminated.store(false, Relaxed); + } + + fn clear_head_all(&mut self) { + while !self.head_all.get_mut().is_null() { + let head = *self.head_all.get_mut(); + let task = unsafe { self.unlink(head) }; + self.release_task(task); + } + } +} + +impl Drop for FuturesKeyed { + fn drop(&mut self) { + // When a `FuturesKeyed` is dropped we want to drop all futures + // associated with it. At the same time though there may be tons of + // wakers flying around which contain `Task` references + // inside them. We'll let those naturally get deallocated. + self.clear_head_all(); + + // Note that at this point we could still have a bunch of tasks in the + // ready to run queue. None of those tasks, however, have futures + // associated with them so they're safe to destroy on any thread. At + // this point the `FuturesKeyed` struct, the owner of the one strong + // reference to the ready to run queue will drop the strong reference. + // At that point whichever thread releases the strong refcount last (be + // it this thread or some other thread as part of an `upgrade`) will + // clear out the ready to run queue and free all remaining tasks. + // + // While that freeing operation isn't guaranteed to happen here, it's + // guaranteed to happen "promptly" as no more "blocking work" will + // happen while there's a strong refcount held. + } +} + +impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a FuturesKeyed { + type Item = &'a Fut; + type IntoIter = Iter<'a, K, Fut>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a mut FuturesKeyed { + type Item = &'a mut Fut; + type IntoIter = IterMut<'a, K, Fut>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl IntoIterator for FuturesKeyed { + type Item = Fut; + type IntoIter = IntoIter; + + fn into_iter(mut self) -> Self::IntoIter { + // `head_all` can be accessed directly and we don't need to spin on + // `Task::next_all` since we have exclusive access to the set. + let task = *self.head_all.get_mut(); + let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; + + IntoIter { len, inner: self } + } +} + +impl FromIterator<(K, Fut)> for FuturesKeyed { + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let acc = Self::new(); + iter.into_iter().fold(acc, |acc, (key, fut)| { + acc.push(key, fut); + acc + }) + } +} + +impl FusedStream for FuturesKeyed { + fn is_terminated(&self) -> bool { + self.is_terminated.load(Relaxed) + } +} + +impl Extend<(K, Fut)> for FuturesKeyed { + fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for (key, fut) in iter { + self.push(key, fut); + } + } +} diff --git a/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs b/futures-util/src/stream/futures_keyed/ready_to_run_queue.rs similarity index 83% rename from futures-util/src/stream/futures_unordered/ready_to_run_queue.rs rename to futures-util/src/stream/futures_keyed/ready_to_run_queue.rs index 4518705320..6a0803f060 100644 --- a/futures-util/src/stream/futures_unordered/ready_to_run_queue.rs +++ b/futures-util/src/stream/futures_keyed/ready_to_run_queue.rs @@ -1,6 +1,7 @@ use crate::task::AtomicWaker; use alloc::sync::Arc; use core::cell::UnsafeCell; +use core::hash::Hash; use core::ptr; use core::sync::atomic::AtomicPtr; use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; @@ -8,27 +9,27 @@ use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; use super::abort::abort; use super::task::Task; -pub(super) enum Dequeue { - Data(*const Task), +pub(super) enum Dequeue { + Data(*const Task), Empty, Inconsistent, } -pub(super) struct ReadyToRunQueue { - // The waker of the task using `FuturesUnordered`. +pub(super) struct ReadyToRunQueue { + // The waker of the task using `FuturesKeyed`. pub(super) waker: AtomicWaker, // Head/tail of the readiness queue - pub(super) head: AtomicPtr>, - pub(super) tail: UnsafeCell<*const Task>, - pub(super) stub: Arc>, + pub(super) head: AtomicPtr>, + pub(super) tail: UnsafeCell<*const Task>, + pub(super) stub: Arc>, } /// An MPSC queue into which the tasks containing the futures are inserted /// whenever the future inside is scheduled for polling. -impl ReadyToRunQueue { +impl ReadyToRunQueue { /// The enqueue function from the 1024cores intrusive MPSC queue algorithm. - pub(super) fn enqueue(&self, task: *const Task) { + pub(super) fn enqueue(&self, task: *const Task) { unsafe { debug_assert!((*task).queued.load(Relaxed)); @@ -46,7 +47,7 @@ impl ReadyToRunQueue { /// /// Note that this is unsafe as it required mutual exclusion (only one /// thread can call this) to be guaranteed elsewhere. - pub(super) unsafe fn dequeue(&self) -> Dequeue { + pub(super) unsafe fn dequeue(&self) -> Dequeue { let mut tail = *self.tail.get(); let mut next = (*tail).next_ready_to_run.load(Acquire); @@ -82,7 +83,7 @@ impl ReadyToRunQueue { Dequeue::Inconsistent } - pub(super) fn stub(&self) -> *const Task { + pub(super) fn stub(&self) -> *const Task { Arc::as_ptr(&self.stub) } @@ -94,7 +95,7 @@ impl ReadyToRunQueue { // // # Safety // - // - All tasks **must** have had their futures dropped already (by FuturesUnordered::clear) + // - All tasks **must** have had their futures dropped already (by FuturesKeyed::clear) // - The caller **must** guarantee unique access to `self` pub(crate) unsafe fn clear(&self) { loop { @@ -108,12 +109,12 @@ impl ReadyToRunQueue { } } -impl Drop for ReadyToRunQueue { +impl Drop for ReadyToRunQueue { fn drop(&mut self) { // Once we're in the destructor for `Inner` we need to clear out // the ready to run queue of tasks if there's anything left in there. - // All tasks have had their futures dropped already by the `FuturesUnordered` + // All tasks have had their futures dropped already by the `FuturesKeyed` // destructor above, and we have &mut self, so this is safe. unsafe { self.clear(); diff --git a/futures-util/src/stream/futures_unordered/task.rs b/futures-util/src/stream/futures_keyed/task.rs similarity index 80% rename from futures-util/src/stream/futures_unordered/task.rs rename to futures-util/src/stream/futures_keyed/task.rs index ec2114effa..c175f4150f 100644 --- a/futures-util/src/stream/futures_unordered/task.rs +++ b/futures-util/src/stream/futures_keyed/task.rs @@ -6,17 +6,21 @@ use core::sync::atomic::{AtomicBool, AtomicPtr}; use super::abort::abort; use super::ReadyToRunQueue; use crate::task::{waker_ref, ArcWake, WakerRef}; +use core::hash::Hash; -pub(super) struct Task { +pub(crate) struct Task { // The future pub(super) future: UnsafeCell>, + // The key + pub(super) key: UnsafeCell>, + // Next pointer for linked list tracking all active tasks (use // `spin_next_all` to read when access is shared across threads) - pub(super) next_all: AtomicPtr>, + pub(super) next_all: AtomicPtr>, // Previous task in linked list tracking all active tasks - pub(super) prev_all: UnsafeCell<*const Task>, + pub(super) prev_all: UnsafeCell<*const Task>, // Length of the linked list tracking all active tasks when this node was // inserted (use `spin_next_all` to synchronize before reading when access @@ -24,10 +28,10 @@ pub(super) struct Task { pub(super) len_all: UnsafeCell, // Next pointer in ready to run queue - pub(super) next_ready_to_run: AtomicPtr>, + pub(super) next_ready_to_run: AtomicPtr>, // Queue that we'll be enqueued to when woken - pub(super) ready_to_run_queue: Weak>, + pub(super) ready_to_run_queue: Weak>, // Whether or not this task is currently in the ready to run queue pub(super) queued: AtomicBool, @@ -43,10 +47,10 @@ pub(super) struct Task { // // The parent (`super`) module is trusted not to access `future` // across different threads. -unsafe impl Send for Task {} -unsafe impl Sync for Task {} +unsafe impl Send for Task {} +unsafe impl Sync for Task {} -impl ArcWake for Task { +impl ArcWake for Task { fn wake_by_ref(arc_self: &Arc) { let inner = match arc_self.ready_to_run_queue.upgrade() { Some(inner) => inner, @@ -63,9 +67,9 @@ impl ArcWake for Task { // as it'll want to come along and run our task later. // // Note that we don't change the reference count of the task here, - // we merely enqueue the raw pointer. The `FuturesUnordered` + // we merely enqueue the raw pointer. The `FuturesKeyed` // implementation guarantees that if we set the `queued` flag that - // there's a reference count held by the main `FuturesUnordered` queue + // there's a reference count held by the main `FuturesKeyed` queue // still. let prev = arc_self.queued.swap(true, SeqCst); if !prev { @@ -75,7 +79,7 @@ impl ArcWake for Task { } } -impl Task { +impl Task { /// Returns a waker reference for this task without cloning the Arc. pub(super) fn waker_ref(this: &Arc) -> WakerRef<'_> { waker_ref(this) @@ -106,15 +110,15 @@ impl Task { } } -impl Drop for Task { +impl Drop for Task { fn drop(&mut self) { - // Since `Task` is sent across all threads for any lifetime, + // Since `Task` is sent across all threads for any lifetime, // regardless of `Fut`, we, to guarantee memory safety, can't actually // touch `Fut` at any time except when we have a reference to the - // `FuturesUnordered` itself . + // `FuturesKeyed` itself . // // Consequently it *should* be the case that we always drop futures from - // the `FuturesUnordered` instance. This is a bomb, just in case there's + // the `FuturesKeyed` instance. This is a bomb, just in case there's // a bug in that logic. unsafe { if (*self.future.get()).is_some() { diff --git a/futures-util/src/stream/futures_unordered/iter.rs b/futures-util/src/stream/futures_unordered/iter.rs index 20248c70fe..913a115b2d 100644 --- a/futures-util/src/stream/futures_unordered/iter.rs +++ b/futures-util/src/stream/futures_unordered/iter.rs @@ -1,16 +1,10 @@ -use super::task::Task; -use super::FuturesUnordered; -use core::marker::PhantomData; +use crate::stream::futures_keyed; use core::pin::Pin; -use core::ptr; -use core::sync::atomic::Ordering::Relaxed; /// Mutable iterator over all futures in the unordered set. #[derive(Debug)] pub struct IterPinMut<'a, Fut> { - pub(super) task: *const Task, - pub(super) len: usize, - pub(super) _marker: PhantomData<&'a mut FuturesUnordered>, + pub(super) inner: futures_keyed::IterPinMut<'a, (), Fut>, } /// Mutable iterator over all futures in the unordered set. @@ -20,10 +14,7 @@ pub struct IterMut<'a, Fut: Unpin>(pub(super) IterPinMut<'a, Fut>); /// Immutable iterator over all futures in the unordered set. #[derive(Debug)] pub struct IterPinRef<'a, Fut> { - pub(super) task: *const Task, - pub(super) len: usize, - pub(super) pending_next_all: *mut Task, - pub(super) _marker: PhantomData<&'a FuturesUnordered>, + pub(super) inner: futures_keyed::IterPinRef<'a, (), Fut>, } /// Immutable iterator over all the futures in the unordered set. @@ -33,42 +24,18 @@ pub struct Iter<'a, Fut: Unpin>(pub(super) IterPinRef<'a, Fut>); /// Owned iterator over all futures in the unordered set. #[derive(Debug)] pub struct IntoIter { - pub(super) len: usize, - pub(super) inner: FuturesUnordered, + pub(super) inner: futures_keyed::IntoIter<(), Fut>, } impl Iterator for IntoIter { type Item = Fut; fn next(&mut self) -> Option { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = self.inner.head_all.get_mut(); - - if (*task).is_null() { - return None; - } - - unsafe { - // Moving out of the future is safe because it is `Unpin` - let future = (*(**task).future.get()).take().unwrap(); - - // Mutable access to a previously shared `FuturesUnordered` implies - // that the other threads already released the object before the - // current thread acquired it, so relaxed ordering can be used and - // valid `next_all` checks can be skipped. - let next = (**task).next_all.load(Relaxed); - *task = next; - if !task.is_null() { - *(**task).prev_all.get() = ptr::null_mut(); - } - self.len -= 1; - Some(future) - } + self.inner.next() } fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) + (self.inner.len, Some(self.inner.len)) } } @@ -78,26 +45,11 @@ impl<'a, Fut> Iterator for IterPinMut<'a, Fut> { type Item = Pin<&'a mut Fut>; fn next(&mut self) -> Option { - if self.task.is_null() { - return None; - } - - unsafe { - let future = (*(*self.task).future.get()).as_mut().unwrap(); - - // Mutable access to a previously shared `FuturesUnordered` implies - // that the other threads already released the object before the - // current thread acquired it, so relaxed ordering can be used and - // valid `next_all` checks can be skipped. - let next = (*self.task).next_all.load(Relaxed); - self.task = next; - self.len -= 1; - Some(Pin::new_unchecked(future)) - } + self.inner.next() } fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) + (self.inner.len, Some(self.inner.len)) } } @@ -121,26 +73,11 @@ impl<'a, Fut> Iterator for IterPinRef<'a, Fut> { type Item = Pin<&'a Fut>; fn next(&mut self) -> Option { - if self.task.is_null() { - return None; - } - - unsafe { - let future = (*(*self.task).future.get()).as_ref().unwrap(); - - // Relaxed ordering can be used since acquire ordering when - // `head_all` was initially read for this iterator implies acquire - // ordering for all previously inserted nodes (and we don't need to - // read `len_all` again for any other nodes). - let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed); - self.task = next; - self.len -= 1; - Some(Pin::new_unchecked(future)) - } + self.inner.next() } fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) + (self.inner.len, Some(self.inner.len)) } } diff --git a/futures-util/src/stream/futures_unordered/mod.rs b/futures-util/src/stream/futures_unordered/mod.rs index 6b5804dc41..29fbea56c3 100644 --- a/futures-util/src/stream/futures_unordered/mod.rs +++ b/futures-util/src/stream/futures_unordered/mod.rs @@ -2,34 +2,21 @@ //! //! This module is only available when the `std` or `alloc` feature of this //! library is activated, and it is activated by default. +//! -use crate::task::AtomicWaker; -use alloc::sync::{Arc, Weak}; -use core::cell::UnsafeCell; use core::fmt::{self, Debug}; use core::iter::FromIterator; -use core::marker::PhantomData; -use core::mem; use core::pin::Pin; -use core::ptr; -use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst}; -use core::sync::atomic::{AtomicBool, AtomicPtr}; use futures_core::future::Future; use futures_core::stream::{FusedStream, Stream}; use futures_core::task::{Context, Poll}; use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError}; -mod abort; - mod iter; #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; -mod task; -use self::task::Task; - -mod ready_to_run_queue; -use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; +use crate::stream::futures_keyed::FuturesKeyed; /// A set of futures which may complete in any order. /// @@ -56,9 +43,7 @@ use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; /// library is activated, and it is activated by default. #[must_use = "streams do nothing unless polled"] pub struct FuturesUnordered { - ready_to_run_queue: Arc>, - head_all: AtomicPtr>, - is_terminated: AtomicBool, + inner: FuturesKeyed<(), Fut>, } unsafe impl Send for FuturesUnordered {} @@ -67,14 +52,14 @@ impl Unpin for FuturesUnordered {} impl Spawn for FuturesUnordered> { fn spawn_obj(&self, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> { - self.push(future_obj); + self.inner.push((), future_obj); Ok(()) } } impl LocalSpawn for FuturesUnordered> { fn spawn_local_obj(&self, future_obj: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { - self.push(future_obj); + self.inner.push((), future_obj); Ok(()) } } @@ -117,44 +102,21 @@ impl FuturesUnordered { /// In this state, [`FuturesUnordered::poll_next`](Stream::poll_next) will /// return [`Poll::Ready(None)`](Poll::Ready). pub fn new() -> Self { - let stub = Arc::new(Task { - future: UnsafeCell::new(None), - next_all: AtomicPtr::new(ptr::null_mut()), - prev_all: UnsafeCell::new(ptr::null()), - len_all: UnsafeCell::new(0), - next_ready_to_run: AtomicPtr::new(ptr::null_mut()), - queued: AtomicBool::new(true), - ready_to_run_queue: Weak::new(), - woken: AtomicBool::new(false), - }); - let stub_ptr = Arc::as_ptr(&stub); - let ready_to_run_queue = Arc::new(ReadyToRunQueue { - waker: AtomicWaker::new(), - head: AtomicPtr::new(stub_ptr as *mut _), - tail: UnsafeCell::new(stub_ptr), - stub, - }); - - Self { - head_all: AtomicPtr::new(ptr::null_mut()), - ready_to_run_queue, - is_terminated: AtomicBool::new(false), - } + Self { inner: FuturesKeyed::new() } } /// Returns the number of futures contained in the set. /// /// This represents the total number of in-flight futures. pub fn len(&self) -> usize { - let (_, len) = self.atomic_load_head_and_len_all(); - len + self.inner.len() } /// Returns `true` if the set contains no futures. pub fn is_empty(&self) -> bool { // Relaxed ordering can be used here since we don't need to read from // the head pointer, only check whether it is null. - self.head_all.load(Relaxed).is_null() + self.inner.is_empty() } /// Push a future into the set. @@ -164,31 +126,7 @@ impl FuturesUnordered { /// ensure that [`FuturesUnordered::poll_next`](Stream::poll_next) is called /// in order to receive wake-up notifications for the given future. pub fn push(&self, future: Fut) { - let task = Arc::new(Task { - future: UnsafeCell::new(Some(future)), - next_all: AtomicPtr::new(self.pending_next_all()), - prev_all: UnsafeCell::new(ptr::null_mut()), - len_all: UnsafeCell::new(0), - next_ready_to_run: AtomicPtr::new(ptr::null_mut()), - queued: AtomicBool::new(true), - ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue), - woken: AtomicBool::new(false), - }); - - // Reset the `is_terminated` flag if we've previously marked ourselves - // as terminated. - self.is_terminated.store(false, Relaxed); - - // Right now our task has a strong reference count of 1. We transfer - // ownership of this reference count to our internal linked list - // and we'll reclaim ownership through the `unlink` method below. - let ptr = self.link(task); - - // We'll need to get the future "into the system" to start tracking it, - // e.g. getting its wake-up notifications going to us tracking which - // futures are ready. To do that we unconditionally enqueue it for - // polling here. - self.ready_to_run_queue.enqueue(ptr); + self.inner.push((), future); } /// Returns an iterator that allows inspecting each future in the set. @@ -201,10 +139,10 @@ impl FuturesUnordered { /// Returns an iterator that allows inspecting each future in the set. pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, Fut> { - let (task, len) = self.atomic_load_head_and_len_all(); - let pending_next_all = self.pending_next_all(); - - IterPinRef { task, len, pending_next_all, _marker: PhantomData } + // IterPinRef { inner: Pin::new(&self.inner).iter_pin_ref() } + IterPinRef { + inner: unsafe { Pin::map_unchecked(self, |thing| &thing.inner) }.iter_pin_ref(), + } } /// Returns an iterator that allows modifying each future in the set. @@ -216,172 +154,11 @@ impl FuturesUnordered { } /// Returns an iterator that allows modifying each future in the set. - pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, Fut> { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = *self.head_all.get_mut(); - let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; - - IterPinMut { task, len, _marker: PhantomData } - } - - /// Returns the current head node and number of futures in the list of all - /// futures within a context where access is shared with other threads - /// (mostly for use with the `len` and `iter_pin_ref` methods). - fn atomic_load_head_and_len_all(&self) -> (*const Task, usize) { - let task = self.head_all.load(Acquire); - let len = if task.is_null() { - 0 - } else { - unsafe { - (*task).spin_next_all(self.pending_next_all(), Acquire); - *(*task).len_all.get() - } - }; - - (task, len) - } - - /// Releases the task. It destroys the future inside and either drops - /// the `Arc` or transfers ownership to the ready to run queue. - /// The task this method is called on must have been unlinked before. - fn release_task(&mut self, task: Arc>) { - // `release_task` must only be called on unlinked tasks - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - unsafe { - debug_assert!((*task.prev_all.get()).is_null()); - } - - // The future is done, try to reset the queued flag. This will prevent - // `wake` from doing any work in the future - let prev = task.queued.swap(true, SeqCst); - - // Drop the future, even if it hasn't finished yet. This is safe - // because we're dropping the future on the thread that owns - // `FuturesUnordered`, which correctly tracks `Fut`'s lifetimes and - // such. - unsafe { - // Set to `None` rather than `take()`ing to prevent moving the - // future. - *task.future.get() = None; - } - - // If the queued flag was previously set, then it means that this task - // is still in our internal ready to run queue. We then transfer - // ownership of our reference count to the ready to run queue, and it'll - // come along and free it later, noticing that the future is `None`. - // - // If, however, the queued flag was *not* set then we're safe to - // release our reference count on the task. The queued flag was set - // above so all future `enqueue` operations will not actually - // enqueue the task, so our task will never see the ready to run queue - // again. The task itself will be deallocated once all reference counts - // have been dropped elsewhere by the various wakers that contain it. - if prev { - mem::forget(task); - } - } - - /// Insert a new task into the internal linked list. - fn link(&self, task: Arc>) -> *const Task { - // `next_all` should already be reset to the pending state before this - // function is called. - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - let ptr = Arc::into_raw(task); - - // Atomically swap out the old head node to get the node that should be - // assigned to `next_all`. - let next = self.head_all.swap(ptr as *mut _, AcqRel); - - unsafe { - // Store the new list length in the new node. - let new_len = if next.is_null() { - 1 - } else { - // Make sure `next_all` has been written to signal that it is - // safe to read `len_all`. - (*next).spin_next_all(self.pending_next_all(), Acquire); - *(*next).len_all.get() + 1 - }; - *(*ptr).len_all.get() = new_len; - - // Write the old head as the next node pointer, signaling to other - // threads that `len_all` and `next_all` are ready to read. - (*ptr).next_all.store(next, Release); - - // `prev_all` updates don't need to be synchronized, as the field is - // only ever used after exclusive access has been acquired. - if !next.is_null() { - *(*next).prev_all.get() = ptr; - } - } - - ptr - } - - /// Remove the task from the linked list tracking all tasks currently - /// managed by `FuturesUnordered`. - /// This method is unsafe because it has be guaranteed that `task` is a - /// valid pointer. - unsafe fn unlink(&mut self, task: *const Task) -> Arc> { - // Compute the new list length now in case we're removing the head node - // and won't be able to retrieve the correct length later. - let head = *self.head_all.get_mut(); - debug_assert!(!head.is_null()); - let new_len = *(*head).len_all.get() - 1; - - let task = Arc::from_raw(task); - let next = task.next_all.load(Relaxed); - let prev = *task.prev_all.get(); - task.next_all.store(self.pending_next_all(), Relaxed); - *task.prev_all.get() = ptr::null_mut(); - - if !next.is_null() { - *(*next).prev_all.get() = prev; - } - - if !prev.is_null() { - (*prev).next_all.store(next, Relaxed); - } else { - *self.head_all.get_mut() = next; - } - - // Store the new list length in the head node. - let head = *self.head_all.get_mut(); - if !head.is_null() { - *(*head).len_all.get() = new_len; + pub fn iter_pin_mut(self: Pin<&mut Self>) -> IterPinMut<'_, Fut> { + // IterPinMut { inner: Pin::new(&mut self.inner).iter_pin_mut() } + IterPinMut { + inner: unsafe { Pin::map_unchecked_mut(self, |thing| &mut thing.inner) }.iter_pin_mut(), } - - task - } - - /// Returns the reserved value for `Task::next_all` to indicate a pending - /// assignment from the thread that inserted the task. - /// - /// `FuturesUnordered::link` needs to update `Task` pointers in an order - /// that ensures any iterators created on other threads can correctly - /// traverse the entire `Task` list using the chain of `next_all` pointers. - /// This could be solved with a compare-exchange loop that stores the - /// current `head_all` in `next_all` and swaps out `head_all` with the new - /// `Task` pointer if the head hasn't already changed. Under heavy thread - /// contention, this compare-exchange loop could become costly. - /// - /// An alternative is to initialize `next_all` to a reserved pending state - /// first, perform an atomic swap on `head_all`, and finally update - /// `next_all` with the old head node. Iterators will then either see the - /// pending state value or the correct next node pointer, and can reload - /// `next_all` as needed until the correct value is loaded. The number of - /// retries needed (if any) would be small and will always be finite, so - /// this should generally perform better than the compare-exchange loop. - /// - /// A valid `Task` pointer in the `head_all` list is guaranteed to never be - /// this value, so it is safe to use as a reserved value until the correct - /// value can be written. - fn pending_next_all(&self) -> *mut Task { - // The `ReadyToRunQueue` stub is never inserted into the `head_all` - // list, and its pointer value will remain valid for the lifetime of - // this `FuturesUnordered`, so we can make use of its value here. - Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _ } } @@ -389,157 +166,9 @@ impl Stream for FuturesUnordered { type Item = Fut::Output; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let len = self.len(); - - // Keep track of how many child futures we have polled, - // in case we want to forcibly yield. - let mut polled = 0; - let mut yielded = 0; - - // Ensure `parent` is correctly set. - self.ready_to_run_queue.waker.register(cx.waker()); - - loop { - // Safety: &mut self guarantees the mutual exclusion `dequeue` - // expects - let task = match unsafe { self.ready_to_run_queue.dequeue() } { - Dequeue::Empty => { - if self.is_empty() { - // We can only consider ourselves terminated once we - // have yielded a `None` - *self.is_terminated.get_mut() = true; - return Poll::Ready(None); - } else { - return Poll::Pending; - } - } - Dequeue::Inconsistent => { - // At this point, it may be worth yielding the thread & - // spinning a few times... but for now, just yield using the - // task system. - cx.waker().wake_by_ref(); - return Poll::Pending; - } - Dequeue::Data(task) => task, - }; - - debug_assert!(task != self.ready_to_run_queue.stub()); - - // Safety: - // - `task` is a valid pointer. - // - We are the only thread that accesses the `UnsafeCell` that - // contains the future - let future = match unsafe { &mut *(*task).future.get() } { - Some(future) => future, - - // If the future has already gone away then we're just - // cleaning out this task. See the comment in - // `release_task` for more information, but we're basically - // just taking ownership of our reference count here. - None => { - // This case only happens when `release_task` was called - // for this task before and couldn't drop the task - // because it was already enqueued in the ready to run - // queue. - - // Safety: `task` is a valid pointer - let task = unsafe { Arc::from_raw(task) }; - - // Double check that the call to `release_task` really - // happened. Calling it required the task to be unlinked. - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - unsafe { - debug_assert!((*task.prev_all.get()).is_null()); - } - continue; - } - }; - - // Safety: `task` is a valid pointer - let task = unsafe { self.unlink(task) }; - - // Unset queued flag: This must be done before polling to ensure - // that the future's task gets rescheduled if it sends a wake-up - // notification **during** the call to `poll`. - let prev = task.queued.swap(false, SeqCst); - assert!(prev); - - // We're going to need to be very careful if the `poll` - // method below panics. We need to (a) not leak memory and - // (b) ensure that we still don't have any use-after-frees. To - // manage this we do a few things: - // - // * A "bomb" is created which if dropped abnormally will call - // `release_task`. That way we'll be sure the memory management - // of the `task` is managed correctly. In particular - // `release_task` will drop the future. This ensures that it is - // dropped on this thread and not accidentally on a different - // thread (bad). - // * We unlink the task from our internal queue to preemptively - // assume it'll panic, in which case we'll want to discard it - // regardless. - struct Bomb<'a, Fut> { - queue: &'a mut FuturesUnordered, - task: Option>>, - } - - impl Drop for Bomb<'_, Fut> { - fn drop(&mut self) { - if let Some(task) = self.task.take() { - self.queue.release_task(task); - } - } - } - - let mut bomb = Bomb { task: Some(task), queue: &mut *self }; - - // Poll the underlying future with the appropriate waker - // implementation. This is where a large bit of the unsafety - // starts to stem from internally. The waker is basically just - // our `Arc>` and can schedule the future for polling by - // enqueuing itself in the ready to run queue. - // - // Critically though `Task` won't actually access `Fut`, the - // future, while it's floating around inside of wakers. - // These structs will basically just use `Fut` to size - // the internal allocation, appropriately accessing fields and - // deallocating the task if need be. - let res = { - let task = bomb.task.as_ref().unwrap(); - // We are only interested in whether the future is awoken before it - // finishes polling, so reset the flag here. - task.woken.store(false, Relaxed); - let waker = Task::waker_ref(task); - let mut cx = Context::from_waker(&waker); - - // Safety: We won't move the future ever again - let future = unsafe { Pin::new_unchecked(future) }; - - future.poll(&mut cx) - }; - polled += 1; - - match res { - Poll::Pending => { - let task = bomb.task.take().unwrap(); - // If the future was awoken during polling, we assume - // the future wanted to explicitly yield. - yielded += task.woken.load(Relaxed) as usize; - bomb.queue.link(task); - - // If a future yields, we respect it and yield here. - // If all futures have been polled, we also yield here to - // avoid starving other tasks waiting on the executor. - // (polling the same future twice per iteration may cause - // the problem: https://github.com/rust-lang/futures-rs/pull/2333) - if yielded >= 2 || polled == len { - cx.waker().wake_by_ref(); - return Poll::Pending; - } - continue; - } - Poll::Ready(output) => return Poll::Ready(Some(output)), - } + match std::task::ready!(Pin::new(&mut self.inner).poll_next(cx)) { + Some((_, output)) => Poll::Ready(Some(output)), + None => Poll::Ready(None), } } @@ -558,45 +187,15 @@ impl Debug for FuturesUnordered { impl FuturesUnordered { /// Clears the set, removing all futures. pub fn clear(&mut self) { - self.clear_head_all(); - - // we just cleared all the tasks, and we have &mut self, so this is safe. - unsafe { self.ready_to_run_queue.clear() }; - - self.is_terminated.store(false, Relaxed); - } - - fn clear_head_all(&mut self) { - while !self.head_all.get_mut().is_null() { - let head = *self.head_all.get_mut(); - let task = unsafe { self.unlink(head) }; - self.release_task(task); - } + self.inner.clear() } } -impl Drop for FuturesUnordered { - fn drop(&mut self) { - // When a `FuturesUnordered` is dropped we want to drop all futures - // associated with it. At the same time though there may be tons of - // wakers flying around which contain `Task` references - // inside them. We'll let those naturally get deallocated. - self.clear_head_all(); - - // Note that at this point we could still have a bunch of tasks in the - // ready to run queue. None of those tasks, however, have futures - // associated with them so they're safe to destroy on any thread. At - // this point the `FuturesUnordered` struct, the owner of the one strong - // reference to the ready to run queue will drop the strong reference. - // At that point whichever thread releases the strong refcount last (be - // it this thread or some other thread as part of an `upgrade`) will - // clear out the ready to run queue and free all remaining tasks. - // - // While that freeing operation isn't guaranteed to happen here, it's - // guaranteed to happen "promptly" as no more "blocking work" will - // happen while there's a strong refcount held. - } -} +// impl Drop for FuturesUnordered { +// fn drop(&mut self) { +// drop(self.inner) +// } +// } impl<'a, Fut: Unpin> IntoIterator for &'a FuturesUnordered { type Item = &'a Fut; @@ -620,13 +219,13 @@ impl IntoIterator for FuturesUnordered { type Item = Fut; type IntoIter = IntoIter; - fn into_iter(mut self) -> Self::IntoIter { + fn into_iter(self) -> Self::IntoIter { // `head_all` can be accessed directly and we don't need to spin on // `Task::next_all` since we have exclusive access to the set. - let task = *self.head_all.get_mut(); - let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; + // let task = *self.head_all.get_mut(); + // let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; - IntoIter { len, inner: self } + IntoIter { inner: self.inner.into_iter() } } } @@ -645,7 +244,7 @@ impl FromIterator for FuturesUnordered { impl FusedStream for FuturesUnordered { fn is_terminated(&self) -> bool { - self.is_terminated.load(Relaxed) + self.inner.is_terminated() } } diff --git a/futures-util/src/stream/mod.rs b/futures-util/src/stream/mod.rs index 65d6d38a82..08ae0362f2 100644 --- a/futures-util/src/stream/mod.rs +++ b/futures-util/src/stream/mod.rs @@ -115,6 +115,7 @@ mod futures_ordered; #[cfg(feature = "alloc")] pub use self::futures_ordered::FuturesOrdered; +mod futures_keyed; #[cfg(not(futures_no_atomic_cas))] #[cfg(feature = "alloc")] pub mod futures_unordered; From 642140128fceced69aaf4c28154f24031e235a35 Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Sat, 30 Sep 2023 14:38:05 -0400 Subject: [PATCH 04/10] implement mapped_futures and futures_unordered as consumer of futures_keyed --- futures-util/src/stream/futures_keyed/iter.rs | 77 +-- futures-util/src/stream/futures_keyed/mod.rs | 92 +-- .../futures_keyed/ready_to_run_queue.rs | 9 +- futures-util/src/stream/futures_keyed/task.rs | 76 ++- .../src/stream/futures_unordered/iter.rs | 14 +- .../src/stream/futures_unordered/mod.rs | 20 +- .../src/stream/mapped_futures/abort.rs | 12 - .../src/stream/mapped_futures/iter.rs | 126 ++-- futures-util/src/stream/mapped_futures/mod.rs | 594 +++--------------- .../mapped_futures/ready_to_run_queue.rs | 123 ---- .../src/stream/mapped_futures/task.rs | 180 ------ 11 files changed, 329 insertions(+), 994 deletions(-) delete mode 100644 futures-util/src/stream/mapped_futures/abort.rs delete mode 100644 futures-util/src/stream/mapped_futures/ready_to_run_queue.rs delete mode 100644 futures-util/src/stream/mapped_futures/task.rs diff --git a/futures-util/src/stream/futures_keyed/iter.rs b/futures-util/src/stream/futures_keyed/iter.rs index 847aba0f64..5caff4b37e 100644 --- a/futures-util/src/stream/futures_keyed/iter.rs +++ b/futures-util/src/stream/futures_keyed/iter.rs @@ -1,6 +1,5 @@ use super::task::Task; -use super::FuturesKeyed; -use core::hash::Hash; +use super::{FuturesKeyed, ReleasesTask}; use core::marker::PhantomData; use core::pin::Pin; use core::ptr; @@ -8,38 +7,40 @@ use core::sync::atomic::Ordering::Relaxed; /// Mutable iterator over all futures in the unordered set. #[derive(Debug)] -pub(crate) struct IterPinMut<'a, K: Hash + Eq, Fut> { +pub(crate) struct IterPinMut<'a, K, Fut, S: ReleasesTask> { pub(super) task: *const Task, pub(crate) len: usize, - pub(super) _marker: PhantomData<&'a mut FuturesKeyed>, + pub(super) _marker: PhantomData<&'a mut FuturesKeyed>, } /// Mutable iterator over all futures in the unordered set. #[derive(Debug)] -pub struct IterMut<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinMut<'a, K, Fut>); +pub(crate) struct IterMut<'a, K, Fut: Unpin, S: ReleasesTask>( + pub(super) IterPinMut<'a, K, Fut, S>, +); /// Immutable iterator over all futures in the unordered set. #[derive(Debug)] -pub(crate) struct IterPinRef<'a, K: Hash + Eq, Fut> { +pub(crate) struct IterPinRef<'a, K, Fut, S: ReleasesTask> { pub(super) task: *const Task, pub(crate) len: usize, pub(super) pending_next_all: *mut Task, - pub(super) _marker: PhantomData<&'a FuturesKeyed>, + pub(super) _marker: PhantomData<&'a FuturesKeyed>, } /// Immutable iterator over all the futures in the unordered set. #[derive(Debug)] -pub struct Iter<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinRef<'a, K, Fut>); +pub(crate) struct Iter<'a, K, Fut: Unpin, S: ReleasesTask>(pub(super) IterPinRef<'a, K, Fut, S>); /// Owned iterator over all futures in the unordered set. #[derive(Debug)] -pub(crate) struct IntoIter { +pub(crate) struct IntoIter> { pub(crate) len: usize, - pub(super) inner: FuturesKeyed, + pub(super) inner: FuturesKeyed, } -impl Iterator for IntoIter { - type Item = Fut; +impl> Iterator for IntoIter { + type Item = (K, Fut); fn next(&mut self) -> Option { // `head_all` can be accessed directly and we don't need to spin on @@ -53,6 +54,7 @@ impl Iterator for IntoIter { unsafe { // Moving out of the future is safe because it is `Unpin` let future = (*(**task).future.get()).take().unwrap(); + let key = (**task).take_key(); // Mutable access to a previously shared `FuturesKeyed` implies // that the other threads already released the object before the @@ -64,7 +66,7 @@ impl Iterator for IntoIter { *(**task).prev_all.get() = ptr::null_mut(); } self.len -= 1; - Some(future) + Some((key, future)) } } @@ -73,10 +75,10 @@ impl Iterator for IntoIter { } } -impl ExactSizeIterator for IntoIter {} +impl> ExactSizeIterator for IntoIter {} -impl<'a, K: Hash + Eq, Fut> Iterator for IterPinMut<'a, K, Fut> { - type Item = Pin<&'a mut Fut>; +impl<'a, K, Fut, S: ReleasesTask> Iterator for IterPinMut<'a, K, Fut, S> { + type Item = (&'a K, Pin<&'a mut Fut>); fn next(&mut self) -> Option { if self.task.is_null() { @@ -85,6 +87,7 @@ impl<'a, K: Hash + Eq, Fut> Iterator for IterPinMut<'a, K, Fut> { unsafe { let future = (*(*self.task).future.get()).as_mut().unwrap(); + let key = (*(*self.task).key.get()).as_ref().unwrap(); // Mutable access to a previously shared `FuturesKeyed` implies // that the other threads already released the object before the @@ -93,7 +96,7 @@ impl<'a, K: Hash + Eq, Fut> Iterator for IterPinMut<'a, K, Fut> { let next = (*self.task).next_all.load(Relaxed); self.task = next; self.len -= 1; - Some(Pin::new_unchecked(future)) + Some((key, Pin::new_unchecked(future))) } } @@ -102,13 +105,13 @@ impl<'a, K: Hash + Eq, Fut> Iterator for IterPinMut<'a, K, Fut> { } } -impl ExactSizeIterator for IterPinMut<'_, K, Fut> {} +impl> ExactSizeIterator for IterPinMut<'_, K, Fut, S> {} -impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for IterMut<'a, K, Fut> { - type Item = &'a mut Fut; +impl<'a, K, Fut: Unpin, S: ReleasesTask> Iterator for IterMut<'a, K, Fut, S> { + type Item = (&'a K, &'a mut Fut); fn next(&mut self) -> Option { - self.0.next().map(Pin::get_mut) + self.0.next().map(|opt| (opt.0, Pin::get_mut(opt.1))) } fn size_hint(&self) -> (usize, Option) { @@ -116,10 +119,10 @@ impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for IterMut<'a, K, Fut> { } } -impl ExactSizeIterator for IterMut<'_, K, Fut> {} +impl> ExactSizeIterator for IterMut<'_, K, Fut, S> {} -impl<'a, K: Hash + Eq, Fut> Iterator for IterPinRef<'a, K, Fut> { - type Item = Pin<&'a Fut>; +impl<'a, K, Fut, S: ReleasesTask> Iterator for IterPinRef<'a, K, Fut, S> { + type Item = (&'a K, Pin<&'a Fut>); fn next(&mut self) -> Option { if self.task.is_null() { @@ -128,6 +131,7 @@ impl<'a, K: Hash + Eq, Fut> Iterator for IterPinRef<'a, K, Fut> { unsafe { let future = (*(*self.task).future.get()).as_ref().unwrap(); + let key = (*(*self.task).key.get()).as_ref().unwrap(); // Relaxed ordering can be used since acquire ordering when // `head_all` was initially read for this iterator implies acquire @@ -136,7 +140,7 @@ impl<'a, K: Hash + Eq, Fut> Iterator for IterPinRef<'a, K, Fut> { let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed); self.task = next; self.len -= 1; - Some(Pin::new_unchecked(future)) + Some((key, Pin::new_unchecked(future))) } } @@ -145,13 +149,14 @@ impl<'a, K: Hash + Eq, Fut> Iterator for IterPinRef<'a, K, Fut> { } } -impl ExactSizeIterator for IterPinRef<'_, K, Fut> {} +impl> ExactSizeIterator for IterPinRef<'_, K, Fut, S> {} -impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for Iter<'a, K, Fut> { - type Item = &'a Fut; +impl<'a, K, Fut: Unpin, S: ReleasesTask> Iterator for Iter<'a, K, Fut, S> { + type Item = (&'a K, &'a Fut); fn next(&mut self) -> Option { - self.0.next().map(Pin::get_ref) + // self.0.next().map(Pin::get_ref) + self.0.next().map(|opt| (opt.0, Pin::get_ref(opt.1))) } fn size_hint(&self) -> (usize, Option) { @@ -159,15 +164,15 @@ impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for Iter<'a, K, Fut> { } } -impl ExactSizeIterator for Iter<'_, K, Fut> {} +impl> ExactSizeIterator for Iter<'_, K, Fut, S> {} // SAFETY: we do nothing thread-local and there is no interior mutability, // so the usual structural `Send`/`Sync` apply. -unsafe impl Send for IterPinRef<'_, K, Fut> {} -unsafe impl Sync for IterPinRef<'_, K, Fut> {} +unsafe impl> Send for IterPinRef<'_, K, Fut, S> {} +unsafe impl> Sync for IterPinRef<'_, K, Fut, S> {} -unsafe impl Send for IterPinMut<'_, K, Fut> {} -unsafe impl Sync for IterPinMut<'_, K, Fut> {} +unsafe impl> Send for IterPinMut<'_, K, Fut, S> {} +unsafe impl> Sync for IterPinMut<'_, K, Fut, S> {} -unsafe impl Send for IntoIter {} -unsafe impl Sync for IntoIter {} +unsafe impl> Send for IntoIter {} +unsafe impl> Sync for IntoIter {} diff --git a/futures-util/src/stream/futures_keyed/mod.rs b/futures-util/src/stream/futures_keyed/mod.rs index 080cdf7f8c..c7f9432185 100644 --- a/futures-util/src/stream/futures_keyed/mod.rs +++ b/futures-util/src/stream/futures_keyed/mod.rs @@ -14,7 +14,6 @@ use crate::task::AtomicWaker; use alloc::sync::{Arc, Weak}; use core::cell::UnsafeCell; use core::fmt::{self, Debug}; -use core::hash::Hash; use core::iter::FromIterator; use core::marker::PhantomData; use core::mem; @@ -33,7 +32,7 @@ mod iter; #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 pub(super) use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; -mod task; +pub(crate) mod task; use self::task::Task; mod ready_to_run_queue; @@ -63,24 +62,30 @@ use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; /// This type is only available when the `std` or `alloc` feature of this /// library is activated, and it is activated by default. #[must_use = "streams do nothing unless polled"] -pub(super) struct FuturesKeyed { +pub(super) struct FuturesKeyed> { ready_to_run_queue: Arc>, head_all: AtomicPtr>, is_terminated: AtomicBool, + pub(crate) inner: S, } -unsafe impl Send for FuturesKeyed {} -unsafe impl Sync for FuturesKeyed {} -impl Unpin for FuturesKeyed {} +pub(crate) trait ReleasesTask { + fn release_task(&mut self, key: &K); + fn new() -> Self; +} + +unsafe impl> Send for FuturesKeyed {} +unsafe impl> Sync for FuturesKeyed {} +impl> Unpin for FuturesKeyed {} -// impl Spawn for FuturesKeyed> { +// impl Spawn for FuturesKeyed> { // fn spawn_obj(&self, key: K, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> { // self.push(key, future_obj); // Ok(()) // } // } // -// impl LocalSpawn for FuturesKeyed> { +// impl LocalSpawn for FuturesKeyed> { // fn spawn_local_obj( // &self, // key: K, @@ -116,18 +121,24 @@ impl Unpin for FuturesKeyed {} // notification is received, the task will only be inserted into the ready to // run queue if it isn't inserted already. -impl Default for FuturesKeyed { +impl> Default for FuturesKeyed { fn default() -> Self { Self::new() } } -impl FuturesKeyed { +impl> FuturesKeyed { /// Constructs a new, empty [`FuturesKeyed`]. /// /// The returned [`FuturesKeyed`] does not contain any futures. /// In this state, [`FuturesKeyed::poll_next`](Stream::poll_next) will /// return [`Poll::Ready(None)`](Poll::Ready). + /// + /// Note that the key type over which this struct is generic does not have trait bounds + /// This is because, while I intend to use it as a key into hashmap structs, which would + /// require K: Hash + Eq, you could also use K for any other purpose. What is important is that + /// if you are storing the Task in another struct, if release_task is called, then you probably + /// want to remove the Task from anywhere else you're keeping it; use K for this. pub(super) fn new() -> Self { let stub = Arc::new(Task { future: UnsafeCell::new(None), @@ -152,6 +163,7 @@ impl FuturesKeyed { head_all: AtomicPtr::new(ptr::null_mut()), ready_to_run_queue, is_terminated: AtomicBool::new(false), + inner: S::new(), } } @@ -176,7 +188,7 @@ impl FuturesKeyed { /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must /// ensure that [`FuturesKeyed::poll_next`](Stream::poll_next) is called /// in order to receive wake-up notifications for the given future. - pub(super) fn push(&self, key: K, future: Fut) { + pub(super) fn push(&self, key: K, future: Fut) -> Arc> { let task = Arc::new(Task { future: UnsafeCell::new(Some(future)), key: UnsafeCell::new(Some(key)), @@ -196,17 +208,18 @@ impl FuturesKeyed { // Right now our task has a strong reference count of 1. We transfer // ownership of this reference count to our internal linked list // and we'll reclaim ownership through the `unlink` method below. - let ptr = self.link(task); + let ptr = self.link(task.clone()); // We'll need to get the future "into the system" to start tracking it, // e.g. getting its wake-up notifications going to us tracking which // futures are ready. To do that we unconditionally enqueue it for // polling here. self.ready_to_run_queue.enqueue(ptr); + task } /// Returns an iterator that allows inspecting each future in the set. - pub(super) fn iter(&self) -> Iter<'_, K, Fut> + pub(super) fn iter(&self) -> Iter<'_, K, Fut, S> where Fut: Unpin, { @@ -214,7 +227,7 @@ impl FuturesKeyed { } /// Returns an iterator that allows inspecting each future in the set. - pub(super) fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, K, Fut> { + pub(super) fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, K, Fut, S> { let (task, len) = self.atomic_load_head_and_len_all(); let pending_next_all = self.pending_next_all(); @@ -222,7 +235,7 @@ impl FuturesKeyed { } /// Returns an iterator that allows modifying each future in the set. - pub(super) fn iter_mut(&mut self) -> IterMut<'_, K, Fut> + pub(super) fn iter_mut(&mut self) -> IterMut<'_, K, Fut, S> where Fut: Unpin, { @@ -230,7 +243,7 @@ impl FuturesKeyed { } /// Returns an iterator that allows modifying each future in the set. - pub(super) fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, K, Fut> { + pub(super) fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, K, Fut, S> { // `head_all` can be accessed directly and we don't need to spin on // `Task::next_all` since we have exclusive access to the set. let task = *self.head_all.get_mut(); @@ -260,6 +273,9 @@ impl FuturesKeyed { /// the `Arc` or transfers ownership to the ready to run queue. /// The task this method is called on must have been unlinked before. fn release_task(&mut self, task: Arc>) { + if let Some(key) = task.key() { + self.inner.release_task(key); + } // `release_task` must only be called on unlinked tasks debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); unsafe { @@ -337,7 +353,7 @@ impl FuturesKeyed { /// managed by `FuturesKeyed`. /// This method is unsafe because it has be guaranteed that `task` is a /// valid pointer. - unsafe fn unlink(&mut self, task: *const Task) -> Arc> { + pub(crate) unsafe fn unlink(&mut self, task: *const Task) -> Arc> { // Compute the new list length now in case we're removing the head node // and won't be able to retrieve the correct length later. let head = *self.head_all.get_mut(); @@ -399,7 +415,7 @@ impl FuturesKeyed { } } -impl Stream for FuturesKeyed { +impl> Stream for FuturesKeyed { type Item = (Arc>, Fut::Output); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -492,12 +508,12 @@ impl Stream for FuturesKeyed { // * We unlink the task from our internal queue to preemptively // assume it'll panic, in which case we'll want to discard it // regardless. - struct Bomb<'a, K: Hash + Eq, Fut> { - queue: &'a mut FuturesKeyed, + struct Bomb<'a, K, Fut, S: ReleasesTask> { + queue: &'a mut FuturesKeyed, task: Option>>, } - impl Drop for Bomb<'_, K, Fut> { + impl> Drop for Bomb<'_, K, Fut, S> { fn drop(&mut self) { if let Some(task) = self.task.take() { self.queue.release_task(task); @@ -553,7 +569,9 @@ impl Stream for FuturesKeyed { continue; } Poll::Ready(output) => { - return Poll::Ready(Some((bomb.task.take().unwrap(), output))) + let task = bomb.task.take().unwrap(); + unsafe { (*task.future.get()).take() }; + return Poll::Ready(Some((task, output))); } } } @@ -565,13 +583,13 @@ impl Stream for FuturesKeyed { } } -impl Debug for FuturesKeyed { +impl> Debug for FuturesKeyed { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "FuturesKeyed {{ ... }}") } } -impl FuturesKeyed { +impl> FuturesKeyed { /// Clears the set, removing all futures. pub(super) fn clear(&mut self) { self.clear_head_all(); @@ -591,7 +609,7 @@ impl FuturesKeyed { } } -impl Drop for FuturesKeyed { +impl> Drop for FuturesKeyed { fn drop(&mut self) { // When a `FuturesKeyed` is dropped we want to drop all futures // associated with it. At the same time though there may be tons of @@ -614,27 +632,27 @@ impl Drop for FuturesKeyed { } } -impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a FuturesKeyed { - type Item = &'a Fut; - type IntoIter = Iter<'a, K, Fut>; +impl<'a, K, Fut: Unpin, S: ReleasesTask> IntoIterator for &'a FuturesKeyed { + type Item = (&'a K, &'a Fut); + type IntoIter = Iter<'a, K, Fut, S>; fn into_iter(self) -> Self::IntoIter { self.iter() } } -impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a mut FuturesKeyed { - type Item = &'a mut Fut; - type IntoIter = IterMut<'a, K, Fut>; +impl<'a, K, Fut: Unpin, S: ReleasesTask> IntoIterator for &'a mut FuturesKeyed { + type Item = (&'a K, &'a mut Fut); + type IntoIter = IterMut<'a, K, Fut, S>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } -impl IntoIterator for FuturesKeyed { - type Item = Fut; - type IntoIter = IntoIter; +impl> IntoIterator for FuturesKeyed { + type Item = (K, Fut); + type IntoIter = IntoIter; fn into_iter(mut self) -> Self::IntoIter { // `head_all` can be accessed directly and we don't need to spin on @@ -646,7 +664,7 @@ impl IntoIterator for FuturesKeyed { } } -impl FromIterator<(K, Fut)> for FuturesKeyed { +impl> FromIterator<(K, Fut)> for FuturesKeyed { fn from_iter(iter: I) -> Self where I: IntoIterator, @@ -659,13 +677,13 @@ impl FromIterator<(K, Fut)> for FuturesKeyed { } } -impl FusedStream for FuturesKeyed { +impl> FusedStream for FuturesKeyed { fn is_terminated(&self) -> bool { self.is_terminated.load(Relaxed) } } -impl Extend<(K, Fut)> for FuturesKeyed { +impl> Extend<(K, Fut)> for FuturesKeyed { fn extend(&mut self, iter: I) where I: IntoIterator, diff --git a/futures-util/src/stream/futures_keyed/ready_to_run_queue.rs b/futures-util/src/stream/futures_keyed/ready_to_run_queue.rs index 6a0803f060..574ac50b31 100644 --- a/futures-util/src/stream/futures_keyed/ready_to_run_queue.rs +++ b/futures-util/src/stream/futures_keyed/ready_to_run_queue.rs @@ -1,7 +1,6 @@ use crate::task::AtomicWaker; use alloc::sync::Arc; use core::cell::UnsafeCell; -use core::hash::Hash; use core::ptr; use core::sync::atomic::AtomicPtr; use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; @@ -9,13 +8,13 @@ use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; use super::abort::abort; use super::task::Task; -pub(super) enum Dequeue { +pub(super) enum Dequeue { Data(*const Task), Empty, Inconsistent, } -pub(super) struct ReadyToRunQueue { +pub(super) struct ReadyToRunQueue { // The waker of the task using `FuturesKeyed`. pub(super) waker: AtomicWaker, @@ -27,7 +26,7 @@ pub(super) struct ReadyToRunQueue { /// An MPSC queue into which the tasks containing the futures are inserted /// whenever the future inside is scheduled for polling. -impl ReadyToRunQueue { +impl ReadyToRunQueue { /// The enqueue function from the 1024cores intrusive MPSC queue algorithm. pub(super) fn enqueue(&self, task: *const Task) { unsafe { @@ -109,7 +108,7 @@ impl ReadyToRunQueue { } } -impl Drop for ReadyToRunQueue { +impl Drop for ReadyToRunQueue { fn drop(&mut self) { // Once we're in the destructor for `Inner` we need to clear out // the ready to run queue of tasks if there's anything left in there. diff --git a/futures-util/src/stream/futures_keyed/task.rs b/futures-util/src/stream/futures_keyed/task.rs index c175f4150f..70ed3d6b19 100644 --- a/futures-util/src/stream/futures_keyed/task.rs +++ b/futures-util/src/stream/futures_keyed/task.rs @@ -1,5 +1,7 @@ use alloc::sync::{Arc, Weak}; +use core::borrow::Borrow; use core::cell::UnsafeCell; +use core::ops::Deref; use core::sync::atomic::Ordering::{self, Relaxed, SeqCst}; use core::sync::atomic::{AtomicBool, AtomicPtr}; @@ -8,12 +10,12 @@ use super::ReadyToRunQueue; use crate::task::{waker_ref, ArcWake, WakerRef}; use core::hash::Hash; -pub(crate) struct Task { +pub(crate) struct Task { // The future - pub(super) future: UnsafeCell>, + pub(crate) future: UnsafeCell>, // The key - pub(super) key: UnsafeCell>, + pub(crate) key: UnsafeCell>, // Next pointer for linked list tracking all active tasks (use // `spin_next_all` to read when access is shared across threads) @@ -47,10 +49,10 @@ pub(crate) struct Task { // // The parent (`super`) module is trusted not to access `future` // across different threads. -unsafe impl Send for Task {} -unsafe impl Sync for Task {} +unsafe impl Send for Task {} +unsafe impl Sync for Task {} -impl ArcWake for Task { +impl ArcWake for Task { fn wake_by_ref(arc_self: &Arc) { let inner = match arc_self.ready_to_run_queue.upgrade() { Some(inner) => inner, @@ -79,7 +81,7 @@ impl ArcWake for Task { } } -impl Task { +impl Task { /// Returns a waker reference for this task without cloning the Arc. pub(super) fn waker_ref(this: &Arc) -> WakerRef<'_> { waker_ref(this) @@ -108,9 +110,15 @@ impl Task { } } } + pub(crate) fn key(&self) -> Option<&K> { + unsafe { (&*self.key.get()).as_ref() } + } + pub(crate) fn take_key(&self) -> K { + unsafe { (*self.key.get()).take().unwrap() } + } } -impl Drop for Task { +impl Drop for Task { fn drop(&mut self) { // Since `Task` is sent across all threads for any lifetime, // regardless of `Fut`, we, to guarantee memory safety, can't actually @@ -127,3 +135,55 @@ impl Drop for Task { } } } + +// Wrapper struct; exists effectively to implement hash on the type Arc +pub(crate) struct HashTask { + pub(crate) inner: Arc>, +} + +impl From>> for HashTask { + fn from(inner: Arc>) -> Self { + HashTask { inner } + } +} + +impl HashTask { + fn key(&self) -> Option<&K> { + Task::key(&*self) + } + // pub(crate) fn key_unwrap(&self) -> &K { + // unsafe { (&*self.key.get()).as_ref().unwrap() } + // } +} + +impl Deref for HashTask { + type Target = Task; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl Borrow for HashTask { + fn borrow(&self) -> &K { + // Never use the borrowed form after the key has been removed from the task + // Never use Task in a context where this method may be called + // IE. The Stub task never goes into the HashSet + unsafe { (*self.key.get()).as_ref().unwrap() } + } +} + +impl Hash for HashTask { + fn hash(&self, state: &mut H) { + unsafe { (*self.key.get()).as_ref() }.unwrap().hash(state) + } +} + +impl PartialEq for HashTask { + fn eq(&self, other: &Self) -> bool { + self.key() == other.key() + } +} +impl Eq for HashTask {} +unsafe impl Send for HashTask {} +unsafe impl Sync for HashTask {} diff --git a/futures-util/src/stream/futures_unordered/iter.rs b/futures-util/src/stream/futures_unordered/iter.rs index 913a115b2d..9365074a41 100644 --- a/futures-util/src/stream/futures_unordered/iter.rs +++ b/futures-util/src/stream/futures_unordered/iter.rs @@ -1,10 +1,12 @@ use crate::stream::futures_keyed; use core::pin::Pin; +use super::DummyStruct; + /// Mutable iterator over all futures in the unordered set. #[derive(Debug)] pub struct IterPinMut<'a, Fut> { - pub(super) inner: futures_keyed::IterPinMut<'a, (), Fut>, + pub(super) inner: futures_keyed::IterPinMut<'a, (), Fut, DummyStruct>, } /// Mutable iterator over all futures in the unordered set. @@ -14,7 +16,7 @@ pub struct IterMut<'a, Fut: Unpin>(pub(super) IterPinMut<'a, Fut>); /// Immutable iterator over all futures in the unordered set. #[derive(Debug)] pub struct IterPinRef<'a, Fut> { - pub(super) inner: futures_keyed::IterPinRef<'a, (), Fut>, + pub(super) inner: futures_keyed::IterPinRef<'a, (), Fut, DummyStruct>, } /// Immutable iterator over all the futures in the unordered set. @@ -24,14 +26,14 @@ pub struct Iter<'a, Fut: Unpin>(pub(super) IterPinRef<'a, Fut>); /// Owned iterator over all futures in the unordered set. #[derive(Debug)] pub struct IntoIter { - pub(super) inner: futures_keyed::IntoIter<(), Fut>, + pub(super) inner: futures_keyed::IntoIter<(), Fut, DummyStruct>, } impl Iterator for IntoIter { type Item = Fut; fn next(&mut self) -> Option { - self.inner.next() + self.inner.next().map(|opt| opt.1) } fn size_hint(&self) -> (usize, Option) { @@ -45,7 +47,7 @@ impl<'a, Fut> Iterator for IterPinMut<'a, Fut> { type Item = Pin<&'a mut Fut>; fn next(&mut self) -> Option { - self.inner.next() + self.inner.next().map(|opt| opt.1) } fn size_hint(&self) -> (usize, Option) { @@ -73,7 +75,7 @@ impl<'a, Fut> Iterator for IterPinRef<'a, Fut> { type Item = Pin<&'a Fut>; fn next(&mut self) -> Option { - self.inner.next() + self.inner.next().map(|opt| opt.1) } fn size_hint(&self) -> (usize, Option) { diff --git a/futures-util/src/stream/futures_unordered/mod.rs b/futures-util/src/stream/futures_unordered/mod.rs index 29fbea56c3..a733be4ecd 100644 --- a/futures-util/src/stream/futures_unordered/mod.rs +++ b/futures-util/src/stream/futures_unordered/mod.rs @@ -18,6 +18,8 @@ pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; use crate::stream::futures_keyed::FuturesKeyed; +use super::futures_keyed::ReleasesTask; + /// A set of futures which may complete in any order. /// /// See [`FuturesOrdered`](crate::stream::FuturesOrdered) for a version of this @@ -43,7 +45,23 @@ use crate::stream::futures_keyed::FuturesKeyed; /// library is activated, and it is activated by default. #[must_use = "streams do nothing unless polled"] pub struct FuturesUnordered { - inner: FuturesKeyed<(), Fut>, + inner: FuturesKeyed<(), Fut, DummyStruct>, +} + +struct DummyStruct {} + +impl fmt::Debug for DummyStruct { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "") + } +} + +impl ReleasesTask<()> for DummyStruct { + fn release_task(&mut self, _key: &()) {} + + fn new() -> Self { + DummyStruct {} + } } unsafe impl Send for FuturesUnordered {} diff --git a/futures-util/src/stream/mapped_futures/abort.rs b/futures-util/src/stream/mapped_futures/abort.rs deleted file mode 100644 index 1a42d24369..0000000000 --- a/futures-util/src/stream/mapped_futures/abort.rs +++ /dev/null @@ -1,12 +0,0 @@ -pub(super) fn abort(s: &str) -> ! { - struct DoublePanic; - - impl Drop for DoublePanic { - fn drop(&mut self) { - panic!("panicking twice to abort the program"); - } - } - - let _bomb = DoublePanic; - panic!("{}", s); -} diff --git a/futures-util/src/stream/mapped_futures/iter.rs b/futures-util/src/stream/mapped_futures/iter.rs index 0ec0ff4898..d8bbc8ef0c 100644 --- a/futures-util/src/stream/mapped_futures/iter.rs +++ b/futures-util/src/stream/mapped_futures/iter.rs @@ -1,122 +1,82 @@ -use super::task::{HashTask, Task}; -use super::MappedFutures; +use super::TaskSet; +use crate::stream::futures_keyed; use core::hash::Hash; -use core::marker::PhantomData; use core::pin::Pin; -use core::ptr; -use core::sync::atomic::Ordering::Relaxed; /// Mutable iterator over all futures in the unordered set. -#[derive(Debug)] +// #[derive(Debug)] pub struct IterPinMut<'a, K: Hash + Eq, Fut> { - pub(super) task: *const Task, - pub(super) len: usize, - pub(super) _marker: PhantomData<&'a mut MappedFutures>, + pub(super) inner: futures_keyed::IterPinMut<'a, K, Fut, TaskSet>, + // pub(super) task: *const Task, + // pub(super) len: usize, + // pub(super) _marker: PhantomData<&'a mut MappedFutures>, } /// Mutable iterator over all futures in the unordered set. -#[derive(Debug)] +// #[derive(Debug)] pub struct IterMut<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinMut<'a, K, Fut>); /// Immutable iterator over all futures in the unordered set. -#[derive(Debug)] +// #[derive(Debug)] pub struct IterPinRef<'a, K: Hash + Eq, Fut> { - pub(super) task: *const Task, - pub(super) len: usize, - pub(super) pending_next_all: *mut Task, - pub(super) _marker: PhantomData<&'a MappedFutures>, + // pub(super) task: *const Task, + // pub(super) len: usize, + // pub(super) pending_next_all: *mut Task, + // pub(super) _marker: PhantomData<&'a MappedFutures>, + pub(super) inner: futures_keyed::IterPinRef<'a, K, Fut, TaskSet>, } /// Immutable iterator over all the futures in the unordered set. -#[derive(Debug)] +// #[derive(Debug)] pub struct Iter<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinRef<'a, K, Fut>); /// Owned iterator over all futures in the unordered set. -#[derive(Debug)] +// #[derive(Debug)] pub struct IntoIter { - pub(super) len: usize, - pub(super) inner: MappedFutures, + pub(super) inner: futures_keyed::IntoIter>, } /// Immutable iterator over all keys in the mapping. pub struct Keys<'a, K: Hash + Eq, Fut> { - pub(super) inner: std::iter::Map< - std::collections::hash_set::Iter<'a, HashTask>, - Box) -> &'a K>, - >, + pub(super) inner: futures_keyed::IterPinRef<'a, K, Fut, TaskSet>, // pub(super) inner: std::iter::Map< + // std::collections::hash_set::Iter<'a, HashTask>, + // Box) -> &'a K>, + // >, } impl Iterator for IntoIter { - type Item = Fut; + type Item = (K, Fut); fn next(&mut self) -> Option { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = self.inner.head_all.get_mut(); - - if (*task).is_null() { - return None; - } - - unsafe { - // Moving out of the future is safe because it is `Unpin` - let future = (*(**task).future.get()).take().unwrap(); - - // Mutable access to a previously shared `MappedFutures` implies - // that the other threads already released the object before the - // current thread acquired it, so relaxed ordering can be used and - // valid `next_all` checks can be skipped. - let next = (**task).next_all.load(Relaxed); - *task = next; - if !task.is_null() { - *(**task).prev_all.get() = ptr::null_mut(); - } - self.len -= 1; - Some(future) - } + self.inner.next() } fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) + self.inner.size_hint() } } impl ExactSizeIterator for IntoIter {} impl<'a, K: Hash + Eq, Fut> Iterator for IterPinMut<'a, K, Fut> { - type Item = Pin<&'a mut Fut>; + type Item = (&'a K, Pin<&'a mut Fut>); fn next(&mut self) -> Option { - if self.task.is_null() { - return None; - } - - unsafe { - let future = (*(*self.task).future.get()).as_mut().unwrap(); - - // Mutable access to a previously shared `MappedFutures` implies - // that the other threads already released the object before the - // current thread acquired it, so relaxed ordering can be used and - // valid `next_all` checks can be skipped. - let next = (*self.task).next_all.load(Relaxed); - self.task = next; - self.len -= 1; - Some(Pin::new_unchecked(future)) - } + self.inner.next() } fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) + (self.inner.len, Some(self.inner.len)) } } impl ExactSizeIterator for IterPinMut<'_, K, Fut> {} impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for IterMut<'a, K, Fut> { - type Item = &'a mut Fut; + type Item = (&'a K, &'a mut Fut); fn next(&mut self) -> Option { - self.0.next().map(Pin::get_mut) + self.0.next().map(|(key, fut_pin)| (key, Pin::get_mut(fut_pin))) } fn size_hint(&self) -> (usize, Option) { @@ -127,39 +87,25 @@ impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for IterMut<'a, K, Fut> { impl ExactSizeIterator for IterMut<'_, K, Fut> {} impl<'a, K: Hash + Eq, Fut> Iterator for IterPinRef<'a, K, Fut> { - type Item = Pin<&'a Fut>; + type Item = (&'a K, Pin<&'a Fut>); fn next(&mut self) -> Option { - if self.task.is_null() { - return None; - } - - unsafe { - let future = (*(*self.task).future.get()).as_ref().unwrap(); - - // Relaxed ordering can be used since acquire ordering when - // `head_all` was initially read for this iterator implies acquire - // ordering for all previously inserted nodes (and we don't need to - // read `len_all` again for any other nodes). - let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed); - self.task = next; - self.len -= 1; - Some(Pin::new_unchecked(future)) - } + self.inner.next() } fn size_hint(&self) -> (usize, Option) { - (self.len, Some(self.len)) + self.inner.size_hint() } } impl ExactSizeIterator for IterPinRef<'_, K, Fut> {} impl<'a, K: Hash + Eq, Fut: Unpin> Iterator for Iter<'a, K, Fut> { - type Item = &'a Fut; + type Item = (&'a K, &'a Fut); fn next(&mut self) -> Option { - self.0.next().map(Pin::get_ref) + // self.0.next() + self.0.next().map(|(key, fut_pin)| (key, Pin::get_ref(fut_pin))) } fn size_hint(&self) -> (usize, Option) { @@ -175,7 +121,7 @@ impl<'a, K: Hash + Eq, Fut> Iterator for Keys<'a, K, Fut> { type Item = &'a K; fn next(&mut self) -> Option { - self.inner.next() + self.inner.next().map(|opt| opt.0) } } diff --git a/futures-util/src/stream/mapped_futures/mod.rs b/futures-util/src/stream/mapped_futures/mod.rs index f70d98ad03..5287c79af8 100644 --- a/futures-util/src/stream/mapped_futures/mod.rs +++ b/futures-util/src/stream/mapped_futures/mod.rs @@ -1,34 +1,22 @@ //! An unbounded map of futures. -use crate::task::AtomicWaker; -use alloc::sync::{Arc, Weak}; -use core::cell::UnsafeCell; +use alloc::sync::Arc; use core::fmt::{self, Debug}; use core::hash::Hash; use core::iter::FromIterator; -use core::marker::PhantomData; -use core::mem; use core::pin::Pin; -use core::ptr; -use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst}; -use core::sync::atomic::{AtomicBool, AtomicPtr}; use futures_core::future::Future; use futures_core::stream::{FusedStream, Stream}; use futures_core::task::{Context, Poll}; use std::collections::HashSet; -mod abort; - mod iter; use self::iter::Keys; #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; +use crate::stream::futures_keyed::task::HashTask; -mod task; -use self::task::{HashTask, Task}; - -mod ready_to_run_queue; -use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; +use super::futures_keyed::{FuturesKeyed, ReleasesTask}; /// A map of futures which may complete in any order. /// @@ -49,10 +37,34 @@ use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; /// with the [`MappedFutures::new`] constructor. #[must_use = "streams do nothing unless polled"] pub struct MappedFutures { - hash_set: HashSet>, - ready_to_run_queue: Arc>, - head_all: AtomicPtr>, - is_terminated: AtomicBool, + inner: FuturesKeyed>, +} + +struct TaskSet { + inner: HashSet>, +} + +// impl Deref for TaskSet { +// type Target = HashSet>; +// +// fn deref(&self) -> &Self::Target { +// &self.inner +// } +// } +// impl DerefMut for TaskSet { +// fn deref_mut(&mut self) -> &Self::Target { +// &mut self.inner +// } +// } + +impl ReleasesTask for TaskSet { + fn release_task(&mut self, key: &K) { + self.inner.remove(key); + } + + fn new() -> Self { + Self { inner: HashSet::new() } + } } unsafe impl Send for MappedFutures {} @@ -97,46 +109,26 @@ impl MappedFutures { /// In this state, [`MappedFutures::poll_next`](Stream::poll_next) will /// return [`Poll::Ready(None)`](Poll::Ready). pub fn new() -> Self { - let stub = Arc::new(Task { - future: UnsafeCell::new(None), - next_all: AtomicPtr::new(ptr::null_mut()), - prev_all: UnsafeCell::new(ptr::null()), - len_all: UnsafeCell::new(0), - next_ready_to_run: AtomicPtr::new(ptr::null_mut()), - queued: AtomicBool::new(true), - ready_to_run_queue: Weak::new(), - woken: AtomicBool::new(false), - key: UnsafeCell::new(None), - }); - let stub_ptr = Arc::as_ptr(&stub); - let ready_to_run_queue = Arc::new(ReadyToRunQueue { - waker: AtomicWaker::new(), - head: AtomicPtr::new(stub_ptr as *mut _), - tail: UnsafeCell::new(stub_ptr), - stub, - }); - - Self { - hash_set: HashSet::new(), - head_all: AtomicPtr::new(ptr::null_mut()), - ready_to_run_queue, - is_terminated: AtomicBool::new(false), - } + Self { inner: FuturesKeyed::new() } } /// Returns the number of futures contained in the set. /// /// This represents the total number of in-flight futures. pub fn len(&self) -> usize { - let (_, len) = self.atomic_load_head_and_len_all(); - len + self.inner.len() } /// Returns `true` if the set contains no futures. pub fn is_empty(&self) -> bool { // Relaxed ordering can be used here since we don't need to read from // the head pointer, only check whether it is null. - self.head_all.load(Relaxed).is_null() + self.inner.is_empty() + } + + /// This is a code stank + fn set(&mut self) -> &mut HashSet> { + &mut self.inner.inner.inner } /// Insert a future into the set. @@ -150,32 +142,9 @@ impl MappedFutures { /// Returns true if another future was not removed to make room for the provided future. pub fn insert(&mut self, key: K, future: Fut) -> bool { let replacing = self.cancel(&key); - let task = Arc::new(Task { - future: UnsafeCell::new(Some(future)), - next_all: AtomicPtr::new(self.pending_next_all()), - prev_all: UnsafeCell::new(ptr::null_mut()), - len_all: UnsafeCell::new(0), - next_ready_to_run: AtomicPtr::new(ptr::null_mut()), - queued: AtomicBool::new(true), - ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue), - woken: AtomicBool::new(false), - key: UnsafeCell::new(Some(key)), - }); - - // Reset the `is_terminated` flag if we've previously marked ourselves - // as terminated. - self.is_terminated.store(false, Relaxed); - - // Right now our task has a strong reference count of 1. We transfer - // ownership of this reference count to our internal linked list - // and we'll reclaim ownership through the `unlink` method below. - let ptr = self.link(task); - - // We'll need to get the future "into the system" to start tracking it, - // e.g. getting its wake-up notifications going to us tracking which - // futures are ready. To do that we unconditionally enqueue it for - // polling here. - self.ready_to_run_queue.enqueue(ptr); + let task = self.inner.push(key, future); + // self.inner.self.hash_set.insert(task.into()); + self.set().insert(task.into()); !replacing } @@ -191,32 +160,9 @@ impl MappedFutures { Fut: Unpin, { let replacing = self.remove(&key); - let task = Arc::new(Task { - future: UnsafeCell::new(Some(future)), - next_all: AtomicPtr::new(self.pending_next_all()), - prev_all: UnsafeCell::new(ptr::null_mut()), - len_all: UnsafeCell::new(0), - next_ready_to_run: AtomicPtr::new(ptr::null_mut()), - queued: AtomicBool::new(true), - ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue), - woken: AtomicBool::new(false), - key: UnsafeCell::new(Some(key)), - }); - - // Reset the `is_terminated` flag if we've previously marked ourselves - // as terminated. - self.is_terminated.store(false, Relaxed); - - // Right now our task has a strong reference count of 1. We transfer - // ownership of this reference count to our internal linked list - // and we'll reclaim ownership through the `unlink` method below. - let ptr = self.link(task); - - // We'll need to get the future "into the system" to start tracking it, - // e.g. getting its wake-up notifications going to us tracking which - // futures are ready. To do that we unconditionally enqueue it for - // polling here. - self.ready_to_run_queue.enqueue(ptr); + let task = self.inner.push(key, future); + // self.hash_set.insert(task.into()); + self.set().insert(task.into()); replacing } @@ -224,10 +170,11 @@ impl MappedFutures { /// /// Returns true if a future was cancelled. pub fn cancel(&mut self, key: &K) -> bool { - if let Some(task) = self.hash_set.get(key) { + // if let Some(task) = self.hash_set.get(key) { + if let Some(task) = self.set().take(key) { unsafe { if let Some(_) = (*task.future.get()).take() { - self.unlink(Arc::as_ptr(&task.inner)); + self.inner.unlink(Arc::as_ptr(&task.inner)); return true; } } @@ -240,10 +187,11 @@ impl MappedFutures { where Fut: Unpin, { - if let Some(task) = self.hash_set.get(key) { + // if let Some(task) = self.hash_set.get(key) { + if let Some(task) = self.set().take(key) { unsafe { let fut = (*task.future.get()).take().unwrap(); - self.unlink(Arc::as_ptr(&task.inner)); + self.inner.unlink(Arc::as_ptr(&task.inner)); return Some(fut); } } @@ -252,12 +200,13 @@ impl MappedFutures { /// Returns `true` if the map contains a future for the specified key. pub fn contains(&mut self, key: &K) -> bool { - self.hash_set.contains(key) + self.set().contains(key) } /// Get a pinned mutable reference to the mapped future. pub fn get_pin_mut(&mut self, key: &K) -> Option> { - if let Some(task_ref) = self.hash_set.get(key) { + // if let Some(task_ref) = self.hash_set.get(key) { + if let Some(task_ref) = self.set().get(key) { unsafe { if let Some(fut) = &mut *task_ref.inner.future.get() { return Some(Pin::new_unchecked(fut)); @@ -272,7 +221,8 @@ impl MappedFutures { where Fut: Unpin, { - if let Some(task_ref) = self.hash_set.get(key) { + // if let Some(task_ref) = self.hash_set.get(key) { + if let Some(task_ref) = self.set().get(key) { unsafe { if let Some(fut) = &mut *task_ref.inner.future.get() { return Some(fut); @@ -287,7 +237,8 @@ impl MappedFutures { where Fut: Unpin, { - if let Some(task_ref) = self.hash_set.get(key) { + // if let Some(task_ref) = self.hash_set.get(key) { + if let Some(task_ref) = self.set().get(key) { unsafe { if let Some(fut) = &*task_ref.inner.future.get() { return Some(fut); @@ -299,7 +250,8 @@ impl MappedFutures { /// Get a pinned shared reference to the mapped future. pub fn get_pin(&mut self, key: &K) -> Option> { - if let Some(task_ref) = self.hash_set.get(key) { + // if let Some(task_ref) = self.hash_set.get(key) { + if let Some(task_ref) = self.set().get(key) { unsafe { if let Some(fut) = &*task_ref.future.get() { return Some(Pin::new_unchecked(fut)); @@ -312,7 +264,10 @@ impl MappedFutures { /// Returns an iterator of keys in the mapping. pub fn keys<'a>(&'a self) -> Keys<'a, K, Fut> { Keys { - inner: self.hash_set.iter().map(Box::new(|hash_task| HashTask::key_unwrap(hash_task))), + // inner: self.hash_set.iter().map(Box::new(|hash_task| HashTask::key_unwrap(hash_task))), + // inner: self.set().iter().map(Box::new(|hash_task| HashTask::key_unwrap(hash_task))), + // inner: self.set().keys(), + inner: Pin::new(&self.inner).iter_pin_ref(), } } @@ -326,10 +281,12 @@ impl MappedFutures { /// Returns an iterator that allows inspecting each future in the set. pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, K, Fut> { - let (task, len) = self.atomic_load_head_and_len_all(); - let pending_next_all = self.pending_next_all(); + // let (task, len) = self.atomic_load_head_and_len_all(); + // let pending_next_all = self.pending_next_all(); - IterPinRef { task, len, pending_next_all, _marker: PhantomData } + // IterPinRef { task, len, pending_next_all, _marker: PhantomData } + IterPinRef { inner: unsafe { self.map_unchecked(|thing| &thing.inner) }.iter_pin_ref() } + // IterPinRef { task: (), len: (), pending_next_all: (), _marker: () } } /// Returns an iterator that allows modifying each future in the set. @@ -341,185 +298,15 @@ impl MappedFutures { } /// Returns an iterator that allows modifying each future in the set. - pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, K, Fut> { + pub fn iter_pin_mut(self: Pin<&mut Self>) -> IterPinMut<'_, K, Fut> { // `head_all` can be accessed directly and we don't need to spin on // `Task::next_all` since we have exclusive access to the set. - let task = *self.head_all.get_mut(); - let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; - - IterPinMut { task, len, _marker: PhantomData } - } - - /// Returns the current head node and number of futures in the list of all - /// futures within a context where access is shared with other threads - /// (mostly for use with the `len` and `iter_pin_ref` methods). - fn atomic_load_head_and_len_all(&self) -> (*const Task, usize) { - let task = self.head_all.load(Acquire); - let len = if task.is_null() { - 0 - } else { - unsafe { - (*task).spin_next_all(self.pending_next_all(), Acquire); - *(*task).len_all.get() - } - }; - - (task, len) - } - /// Releases the task. It destroys the future inside and either drops - /// the `Arc` or transfers ownership to the ready to run queue. - /// The task this method is called on must have been unlinked before. - fn release_task(&mut self, task: Arc>) { - // `release_task` must only be called on unlinked tasks - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - unsafe { - debug_assert!((*task.prev_all.get()).is_null()); + // IterPinMut { inner: Pin::new(&mut self.inner).iter_pin_mut() } + IterPinMut { + inner: unsafe { self.map_unchecked_mut(|thing| &mut thing.inner) }.iter_pin_mut(), } - - // The future is done, try to reset the queued flag. This will prevent - // `wake` from doing any work in the future - let prev = task.queued.swap(true, SeqCst); - - // Drop the future, even if it hasn't finished yet. This is safe - // because we're dropping the future on the thread that owns - // `MappedFutures`, which correctly tracks `Fut`'s lifetimes and - // such. - unsafe { - // Set to `None` rather than `take()`ing to prevent moving the - // future. - *task.future.get() = None; - - let key = &*task.key.get(); - if let Some(key) = key { - self.hash_set.remove(key); - } - } - - // If the queued flag was previously set, then it means that this task - // is still in our internal ready to run queue. We then transfer - // ownership of our reference count to the ready to run queue, and it'll - // come along and free it later, noticing that the future is `None`. - // - // If, however, the queued flag was *not* set then we're safe to - // release our reference count on the task. The queued flag was set - // above so all future `enqueue` operations will not actually - // enqueue the task, so our task will never see the ready to run queue - // again. The task itself will be deallocated once all reference counts - // have been dropped elsewhere by the various wakers that contain it. - if prev { - mem::forget(task); - } - } - - /// Insert a new task into the internal linked list. - fn link(&mut self, task: Arc>) -> *const Task { - // `next_all` should already be reset to the pending state before this - // function is called. - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - - let hash_task = HashTask { inner: task.clone() }; - self.hash_set.insert(hash_task); - - let ptr = Arc::into_raw(task); - - // Atomically swap out the old head node to get the node that should be - // assigned to `next_all`. - let next = self.head_all.swap(ptr as *mut _, AcqRel); - - unsafe { - // Store the new list length in the new node. - let new_len = if next.is_null() { - 1 - } else { - // Make sure `next_all` has been written to signal that it is - // safe to read `len_all`. - (*next).spin_next_all(self.pending_next_all(), Acquire); - *(*next).len_all.get() + 1 - }; - *(*ptr).len_all.get() = new_len; - - // Write the old head as the next node pointer, signaling to other - // threads that `len_all` and `next_all` are ready to read. - (*ptr).next_all.store(next, Release); - - // `prev_all` updates don't need to be synchronized, as the field is - // only ever used after exclusive access has been acquired. - if !next.is_null() { - *(*next).prev_all.get() = ptr; - } - } - - ptr - } - - /// Remove the task from the linked list tracking all tasks currently - /// managed by `MappedFutures`. - /// This method is unsafe because it has be guaranteed that `task` is a - /// valid pointer. - unsafe fn unlink(&mut self, task: *const Task) -> Arc> { - // Compute the new list length now in case we're removing the head node - // and won't be able to retrieve the correct length later. - let head = *self.head_all.get_mut(); - debug_assert!(!head.is_null()); - let new_len = *(*head).len_all.get() - 1; - - if let Some(key) = (*task).key() { - self.hash_set.remove(key); - } - - let task = Arc::from_raw(task); - let next = task.next_all.load(Relaxed); - let prev = *task.prev_all.get(); - task.next_all.store(self.pending_next_all(), Relaxed); - *task.prev_all.get() = ptr::null_mut(); - - if !next.is_null() { - *(*next).prev_all.get() = prev; - } - - if !prev.is_null() { - (*prev).next_all.store(next, Relaxed); - } else { - *self.head_all.get_mut() = next; - } - - // Store the new list length in the head node. - let head = *self.head_all.get_mut(); - if !head.is_null() { - *(*head).len_all.get() = new_len; - } - - task - } - - /// Returns the reserved value for `Task::next_all` to indicate a pending - /// assignment from the thread that inserted the task. - /// - /// `MappedFutures::link` needs to update `Task` pointers in an order - /// that ensures any iterators created on other threads can correctly - /// traverse the entire `Task` list using the chain of `next_all` pointers. - /// This could be solved with a compare-exchange loop that stores the - /// current `head_all` in `next_all` and swaps out `head_all` with the new - /// `Task` pointer if the head hasn't already changed. Under heavy thread - /// contention, this compare-exchange loop could become costly. - /// - /// An alternative is to initialize `next_all` to a reserved pending state - /// first, perform an atomic swap on `head_all`, and finally update - /// `next_all` with the old head node. Iterators will then either see the - /// pending state value or the correct next node pointer, and can reload - /// `next_all` as needed until the correct value is loaded. The number of - /// retries needed (if any) would be small and will always be finite, so - /// this should generally perform better than the compare-exchange loop. - /// - /// A valid `Task` pointer in the `head_all` list is guaranteed to never be - /// this value, so it is safe to use as a reserved value until the correct - /// value can be written. - fn pending_next_all(&self) -> *mut Task { - // The `ReadyToRunQueue` stub is never inserted into the `head_all` - // list, and its pointer value will remain valid for the lifetime of - // this `MappedFutures`, so we can make use of its value here. - Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _ + // IterPinMut { task, len, _marker: PhantomData } } } @@ -527,159 +314,15 @@ impl Stream for MappedFutures { type Item = (K, Fut::Output); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let len = self.len(); - - // Keep track of how many child futures we have polled, - // in case we want to forcibly yield. - let mut polled = 0; - let mut yielded = 0; - - // Ensure `parent` is correctly set. - self.ready_to_run_queue.waker.register(cx.waker()); - - loop { - // Safety: &mut self guarantees the mutual exclusion `dequeue` - // expects - let task = match unsafe { self.ready_to_run_queue.dequeue() } { - Dequeue::Empty => { - if self.is_empty() { - // We can only consider ourselves terminated once we - // have yielded a `None` - *self.is_terminated.get_mut() = true; - return Poll::Ready(None); - } else { - return Poll::Pending; - } - } - Dequeue::Inconsistent => { - // At this point, it may be worth yielding the thread & - // spinning a few times... but for now, just yield using the - // task system. - cx.waker().wake_by_ref(); - return Poll::Pending; - } - Dequeue::Data(task) => task, - }; - - debug_assert!(task != self.ready_to_run_queue.stub()); - - // Safety: - // - `task` is a valid pointer. - // - We are the only thread that accesses the `UnsafeCell` that - // contains the future - let future = match unsafe { &mut *(*task).future.get() } { - Some(future) => future, - - // If the future has already gone away then we're just - // cleaning out this task. See the comment in - // `release_task` for more information, but we're basically - // just taking ownership of our reference count here. - None => { - // This case only happens when `release_task` was called - // for this task before and couldn't drop the task - // because it was already enqueued in the ready to run - // queue. - - // Safety: `task` is a valid pointer - let task = unsafe { Arc::from_raw(task) }; - - // Double check that the call to `release_task` really - // happened. Calling it required the task to be unlinked. - debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all()); - unsafe { - debug_assert!((*task.prev_all.get()).is_null()); - } - continue; - } - }; - - // Safety: `task` is a valid pointer - let task = unsafe { self.unlink(task) }; - - // Unset queued flag: This must be done before polling to ensure - // that the future's task gets rescheduled if it sends a wake-up - // notification **during** the call to `poll`. - let prev = task.queued.swap(false, SeqCst); - assert!(prev); - - // We're going to need to be very careful if the `poll` - // method below panics. We need to (a) not leak memory and - // (b) ensure that we still don't have any use-after-frees. To - // manage this we do a few things: - // - // * A "bomb" is created which if dropped abnormally will call - // `release_task`. That way we'll be sure the memory management - // of the `task` is managed correctly. In particular - // `release_task` will drop the future. This ensures that it is - // dropped on this thread and not accidentally on a different - // thread (bad). - // * We unlink the task from our internal queue to preemptively - // assume it'll panic, in which case we'll want to discard it - // regardless. - struct Bomb<'a, K: Hash + Eq, Fut> { - queue: &'a mut MappedFutures, - task: Option>>, - } - - impl Drop for Bomb<'_, K, Fut> { - fn drop(&mut self) { - if let Some(task) = self.task.take() { - self.queue.release_task(task); - } - } - } - - let mut bomb = Bomb { task: Some(task), queue: &mut *self }; - - // Poll the underlying future with the appropriate waker - // implementation. This is where a large bit of the unsafety - // starts to stem from internally. The waker is basically just - // our `Arc>` and can schedule the future for polling by - // enqueuing itself in the ready to run queue. - // - // Critically though `Task` won't actually access `Fut`, the - // future, while it's floating around inside of wakers. - // These structs will basically just use `Fut` to size - // the internal allocation, appropriately accessing fields and - // deallocating the task if need be. - let res = { - let task = bomb.task.as_ref().unwrap(); - // We are only interested in whether the future is awoken before it - // finishes polling, so reset the flag here. - task.woken.store(false, Relaxed); - let waker = Task::waker_ref(task); - let mut cx = Context::from_waker(&waker); - - // Safety: We won't move the future ever again - let future = unsafe { Pin::new_unchecked(future) }; - - future.poll(&mut cx) - }; - polled += 1; - - match res { - Poll::Pending => { - let task = bomb.task.take().unwrap(); - // If the future was awoken during polling, we assume - // the future wanted to explicitly yield. - yielded += task.woken.load(Relaxed) as usize; - bomb.queue.link(task); - - // If a future yields, we respect it and yield here. - // If all futures have been polled, we also yield here to - // avoid starving other tasks waiting on the executor. - // (polling the same future twice per iteration may cause - // the problem: https://github.com/rust-lang/futures-rs/pull/2333) - if yielded >= 2 || polled == len { - cx.waker().wake_by_ref(); - return Poll::Pending; - } - continue; - } - Poll::Ready(output) => { - return Poll::Ready(Some((bomb.task.as_ref().unwrap().take_key(), output))); - } + match std::task::ready!(Pin::new(&mut self.inner).poll_next(cx)) { + Some((task, output)) => { + // let key = (*(**task).key.get()).take().unwrap(); + // let key = task.take_key(); + // MappedFutures::set(self.get_mut()).remove(&key); + MappedFutures::set(self.get_mut()).remove(task.key().unwrap()); + return Poll::Ready(Some((task.take_key(), output))); } + None => Poll::Ready(None), } } @@ -698,49 +341,13 @@ impl Debug for MappedFutures { impl MappedFutures { /// Clears the set, removing all futures. pub fn clear(&mut self) { - self.clear_head_all(); - self.hash_set.clear(); - - // we just cleared all the tasks, and we have &mut self, so this is safe. - unsafe { self.ready_to_run_queue.clear() }; - - self.is_terminated.store(false, Relaxed); - } - - fn clear_head_all(&mut self) { - while !self.head_all.get_mut().is_null() { - let head = *self.head_all.get_mut(); - let task = unsafe { self.unlink(head) }; - self.release_task(task); - } - } -} - -impl Drop for MappedFutures { - fn drop(&mut self) { - // When a `MappedFutures` is dropped we want to drop all futures - // associated with it. At the same time though there may be tons of - // wakers flying around which contain `Task` references - // inside them. We'll let those naturally get deallocated. - self.clear_head_all(); - - // Note that at this point we could still have a bunch of tasks in the - // ready to run queue. None of those tasks, however, have futures - // associated with them so they're safe to destroy on any thread. At - // this point the `MappedFutures` struct, the owner of the one strong - // reference to the ready to run queue will drop the strong reference. - // At that point whichever thread releases the strong refcount last (be - // it this thread or some other thread as part of an `upgrade`) will - // clear out the ready to run queue and free all remaining tasks. - // - // While that freeing operation isn't guaranteed to happen here, it's - // guaranteed to happen "promptly" as no more "blocking work" will - // happen while there's a strong refcount held. + self.inner.clear(); + self.set().clear(); } } impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a MappedFutures { - type Item = &'a Fut; + type Item = (&'a K, &'a Fut); type IntoIter = Iter<'a, K, Fut>; fn into_iter(self) -> Self::IntoIter { @@ -749,7 +356,7 @@ impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a MappedFutures { } impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a mut MappedFutures { - type Item = &'a mut Fut; + type Item = (&'a K, &'a mut Fut); type IntoIter = IterMut<'a, K, Fut>; fn into_iter(self) -> Self::IntoIter { @@ -758,16 +365,11 @@ impl<'a, K: Hash + Eq, Fut: Unpin> IntoIterator for &'a mut MappedFutures IntoIterator for MappedFutures { - type Item = Fut; + type Item = (K, Fut); type IntoIter = IntoIter; - fn into_iter(mut self) -> Self::IntoIter { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - let task = *self.head_all.get_mut(); - let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; - - IntoIter { len, inner: self } + fn into_iter(self) -> Self::IntoIter { + IntoIter { inner: self.inner.into_iter() } } } @@ -786,7 +388,7 @@ impl FromIterator<(K, Fut)> for MappedFutures { impl FusedStream for MappedFutures { fn is_terminated(&self) -> bool { - self.is_terminated.load(Relaxed) + self.inner.is_terminated() } } @@ -827,15 +429,15 @@ pub mod tests { fn map_futures() { let mut futures: MappedFutures = MappedFutures::new(); insert_millis(&mut futures, 1, 50); - insert_millis(&mut futures, 2, 50); - insert_millis(&mut futures, 3, 150); - insert_millis(&mut futures, 4, 200); - + // insert_millis(&mut futures, 2, 50); + // insert_millis(&mut futures, 3, 150); + // insert_millis(&mut futures, 4, 200); + // assert_eq!(block_on(futures.next()).unwrap().0, 1); - assert_eq!(futures.cancel(&3), true); - assert_eq!(block_on(futures.next()).unwrap().0, 2); - assert_eq!(block_on(futures.next()).unwrap().0, 4); - assert_eq!(block_on(futures.next()), None); + // assert_eq!(futures.cancel(&3), true); + // assert_eq!(block_on(futures.next()).unwrap().0, 2); + // assert_eq!(block_on(futures.next()).unwrap().0, 4); + // assert_eq!(block_on(futures.next()), None); } #[test] diff --git a/futures-util/src/stream/mapped_futures/ready_to_run_queue.rs b/futures-util/src/stream/mapped_futures/ready_to_run_queue.rs deleted file mode 100644 index b607044723..0000000000 --- a/futures-util/src/stream/mapped_futures/ready_to_run_queue.rs +++ /dev/null @@ -1,123 +0,0 @@ -use crate::task::AtomicWaker; -use alloc::sync::Arc; -use core::cell::UnsafeCell; -use core::hash::Hash; -use core::ptr; -use core::sync::atomic::AtomicPtr; -use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release}; - -use super::abort::abort; -use super::task::Task; - -pub(super) enum Dequeue { - Data(*const Task), - Empty, - Inconsistent, -} - -pub(super) struct ReadyToRunQueue { - // The waker of the task using `MappedFutures`. - pub(super) waker: AtomicWaker, - - // Head/tail of the readiness queue - pub(super) head: AtomicPtr>, - pub(super) tail: UnsafeCell<*const Task>, - pub(super) stub: Arc>, -} - -/// An MPSC queue into which the tasks containing the futures are inserted -/// whenever the future inside is scheduled for polling. -impl ReadyToRunQueue { - /// The enqueue function from the 1024cores intrusive MPSC queue algorithm. - pub(super) fn enqueue(&self, task: *const Task) { - unsafe { - debug_assert!((*task).queued.load(Relaxed)); - - // This action does not require any coordination - (*task).next_ready_to_run.store(ptr::null_mut(), Relaxed); - - // Note that these atomic orderings come from 1024cores - let task = task as *mut _; - let prev = self.head.swap(task, AcqRel); - (*prev).next_ready_to_run.store(task, Release); - } - } - - /// The dequeue function from the 1024cores intrusive MPSC queue algorithm - /// - /// Note that this is unsafe as it required mutual exclusion (only one - /// thread can call this) to be guaranteed elsewhere. - pub(super) unsafe fn dequeue(&self) -> Dequeue { - let mut tail = *self.tail.get(); - let mut next = (*tail).next_ready_to_run.load(Acquire); - - if tail == self.stub() { - if next.is_null() { - return Dequeue::Empty; - } - - *self.tail.get() = next; - tail = next; - next = (*next).next_ready_to_run.load(Acquire); - } - - if !next.is_null() { - *self.tail.get() = next; - debug_assert!(tail != self.stub()); - return Dequeue::Data(tail); - } - - if self.head.load(Acquire) as *const _ != tail { - return Dequeue::Inconsistent; - } - - self.enqueue(self.stub()); - - next = (*tail).next_ready_to_run.load(Acquire); - - if !next.is_null() { - *self.tail.get() = next; - return Dequeue::Data(tail); - } - - Dequeue::Inconsistent - } - - pub(super) fn stub(&self) -> *const Task { - Arc::as_ptr(&self.stub) - } - - // Clear the queue of tasks. - // - // Note that each task has a strong reference count associated with it - // which is owned by the ready to run queue. This method just pulls out - // tasks and drops their refcounts. - // - // # Safety - // - // - All tasks **must** have had their futures dropped already (by MappedFutures::clear) - // - The caller **must** guarantee unique access to `self` - pub(crate) unsafe fn clear(&self) { - loop { - // SAFETY: We have the guarantee of mutual exclusion required by `dequeue`. - match self.dequeue() { - Dequeue::Empty => break, - Dequeue::Inconsistent => abort("inconsistent in drop"), - Dequeue::Data(ptr) => drop(Arc::from_raw(ptr)), - } - } - } -} - -impl Drop for ReadyToRunQueue { - fn drop(&mut self) { - // Once we're in the destructor for `Inner` we need to clear out - // the ready to run queue of tasks if there's anything left in there. - - // All tasks have had their futures dropped already by the `MappedFutures` - // destructor above, and we have &mut self, so this is safe. - unsafe { - self.clear(); - } - } -} diff --git a/futures-util/src/stream/mapped_futures/task.rs b/futures-util/src/stream/mapped_futures/task.rs deleted file mode 100644 index 0c1387c467..0000000000 --- a/futures-util/src/stream/mapped_futures/task.rs +++ /dev/null @@ -1,180 +0,0 @@ -use alloc::sync::{Arc, Weak}; -use core::cell::UnsafeCell; -use core::sync::atomic::Ordering::{self, Relaxed, SeqCst}; -use core::sync::atomic::{AtomicBool, AtomicPtr}; -use std::borrow::Borrow; -use std::ops::Deref; - -use super::abort::abort; -use super::ReadyToRunQueue; -use crate::task::{waker_ref, ArcWake, WakerRef}; -use core::hash::Hash; - -pub(super) struct Task { - // The future - pub(super) future: UnsafeCell>, - - // Next pointer for linked list tracking all active tasks (use - // `spin_next_all` to read when access is shared across threads) - pub(super) next_all: AtomicPtr>, - - // Previous task in linked list tracking all active tasks - pub(super) prev_all: UnsafeCell<*const Task>, - - // Length of the linked list tracking all active tasks when this node was - // inserted (use `spin_next_all` to synchronize before reading when access - // is shared across threads) - pub(super) len_all: UnsafeCell, - - // Next pointer in ready to run queue - pub(super) next_ready_to_run: AtomicPtr>, - - // Queue that we'll be enqueued to when woken - pub(super) ready_to_run_queue: Weak>, - - // Whether or not this task is currently in the ready to run queue - pub(super) queued: AtomicBool, - - // Whether the future was awoken during polling - // It is possible for this flag to be set to true after the polling, - // but it will be ignored. - pub(super) woken: AtomicBool, - pub(super) key: UnsafeCell>, -} - -// Wrapper struct; exists effectively to implement hash on the type Arc -pub(super) struct HashTask { - pub(super) inner: Arc>, -} - -impl HashTask { - fn key(&self) -> Option<&K> { - Task::key(&*self) - } - pub(super) fn key_unwrap(&self) -> &K { - unsafe { (&*self.key.get()).as_ref().unwrap() } - } -} - -impl Deref for HashTask { - type Target = Task; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl Borrow for HashTask { - fn borrow(&self) -> &K { - // Never use Task in a context where this method may be called - // IE. The Stub task never goes into the HashSet - unsafe { (*self.key.get()).as_ref().unwrap() } - } -} - -impl Hash for HashTask { - fn hash(&self, state: &mut H) { - unsafe { (*self.key.get()).as_ref() }.unwrap().hash(state) - } -} - -impl PartialEq for HashTask { - fn eq(&self, other: &Self) -> bool { - self.key() == other.key() - } -} -impl Eq for HashTask {} -unsafe impl Send for HashTask {} -unsafe impl Sync for HashTask {} - -// `Task` can be sent across threads safely because it ensures that -// the underlying `Fut` type isn't touched from any of its methods. -// -// The parent (`super`) module is trusted not to access `future` -// across different threads. -unsafe impl Send for Task {} -unsafe impl Sync for Task {} - -impl ArcWake for Task { - fn wake_by_ref(arc_self: &Arc) { - let inner = match arc_self.ready_to_run_queue.upgrade() { - Some(inner) => inner, - None => return, - }; - - arc_self.woken.store(true, Relaxed); - - // It's our job to enqueue this task it into the ready to run queue. To - // do this we set the `queued` flag, and if successful we then do the - // actual queueing operation, ensuring that we're only queued once. - // - // Once the task is inserted call `wake` to notify the parent task, - // as it'll want to come along and run our task later. - // - // Note that we don't change the reference count of the task here, - // we merely enqueue the raw pointer. The `FuturesUnordered` - // implementation guarantees that if we set the `queued` flag that - // there's a reference count held by the main `FuturesUnordered` queue - // still. - let prev = arc_self.queued.swap(true, SeqCst); - if !prev { - inner.enqueue(Arc::as_ptr(arc_self)); - inner.waker.wake(); - } - } -} - -impl Task { - /// Returns a waker reference for this task without cloning the Arc. - pub(super) fn waker_ref(this: &Arc) -> WakerRef<'_> { - waker_ref(this) - } - - /// Spins until `next_all` is no longer set to `pending_next_all`. - /// - /// The temporary `pending_next_all` value is typically overwritten fairly - /// quickly after a node is inserted into the list of all futures, so this - /// should rarely spin much. - /// - /// When it returns, the correct `next_all` value is returned. - /// - /// `Relaxed` or `Acquire` ordering can be used. `Acquire` ordering must be - /// used before `len_all` can be safely read. - #[inline] - pub(super) fn spin_next_all( - &self, - pending_next_all: *mut Self, - ordering: Ordering, - ) -> *const Self { - loop { - let next = self.next_all.load(ordering); - if next != pending_next_all { - return next; - } - } - } - pub(super) fn key(&self) -> Option<&K> { - unsafe { (&*self.key.get()).as_ref() } - } - pub(super) fn take_key(&self) -> K { - unsafe { (*self.key.get()).take().unwrap() } - } -} - -impl Drop for Task { - fn drop(&mut self) { - // Since `Task` is sent across all threads for any lifetime, - // regardless of `Fut`, we, to guarantee memory safety, can't actually - // touch `Fut` at any time except when we have a reference to the - // `FuturesUnordered` itself . - // - // Consequently it *should* be the case that we always drop futures from - // the `FuturesUnordered` instance. This is a bomb, just in case there's - // a bug in that logic. - unsafe { - if (*self.future.get()).is_some() { - abort("future still here when dropping"); - } - } - } -} From 68926530b2e8fe0f0b784a6d40d7ab17f2510f33 Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Sat, 30 Sep 2023 14:53:45 -0400 Subject: [PATCH 05/10] clarify comment --- futures-util/src/stream/futures_keyed/task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/futures-util/src/stream/futures_keyed/task.rs b/futures-util/src/stream/futures_keyed/task.rs index 70ed3d6b19..4538ccbbeb 100644 --- a/futures-util/src/stream/futures_keyed/task.rs +++ b/futures-util/src/stream/futures_keyed/task.rs @@ -167,8 +167,8 @@ impl Deref for HashTask { impl Borrow for HashTask { fn borrow(&self) -> &K { // Never use the borrowed form after the key has been removed from the task - // Never use Task in a context where this method may be called // IE. The Stub task never goes into the HashSet + // Or removing Task from HashSet using key, after key removed from task unsafe { (*self.key.get()).as_ref().unwrap() } } } From 075f8fb2108f6c9bde898d10bf67dd69d780a6f8 Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Sat, 30 Sep 2023 14:58:37 -0400 Subject: [PATCH 06/10] uncomment testing code --- futures-util/src/stream/mapped_futures/mod.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/futures-util/src/stream/mapped_futures/mod.rs b/futures-util/src/stream/mapped_futures/mod.rs index 5287c79af8..0eead9cc52 100644 --- a/futures-util/src/stream/mapped_futures/mod.rs +++ b/futures-util/src/stream/mapped_futures/mod.rs @@ -429,15 +429,15 @@ pub mod tests { fn map_futures() { let mut futures: MappedFutures = MappedFutures::new(); insert_millis(&mut futures, 1, 50); - // insert_millis(&mut futures, 2, 50); - // insert_millis(&mut futures, 3, 150); - // insert_millis(&mut futures, 4, 200); - // + insert_millis(&mut futures, 2, 50); + insert_millis(&mut futures, 3, 150); + insert_millis(&mut futures, 4, 200); + assert_eq!(block_on(futures.next()).unwrap().0, 1); - // assert_eq!(futures.cancel(&3), true); - // assert_eq!(block_on(futures.next()).unwrap().0, 2); - // assert_eq!(block_on(futures.next()).unwrap().0, 4); - // assert_eq!(block_on(futures.next()), None); + assert_eq!(futures.cancel(&3), true); + assert_eq!(block_on(futures.next()).unwrap().0, 2); + assert_eq!(block_on(futures.next()).unwrap().0, 4); + assert_eq!(block_on(futures.next()), None); } #[test] From 5ea6bf0130fd667ef39b1904c945e3908cf33230 Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Fri, 7 Jun 2024 16:50:32 -0400 Subject: [PATCH 07/10] re-add Debug derive, remove unnecessary Sync/Send impl, change inner module name --- .../src/stream/futures_unordered/iter.rs | 19 +--- .../src/stream/futures_unordered/mod.rs | 8 +- .../abort.rs | 0 .../iter.rs | 24 +++--- .../mod.rs | 86 ++++++++++--------- .../ready_to_run_queue.rs | 6 +- .../task.rs | 8 +- .../src/stream/mapped_futures/iter.rs | 45 +++------- futures-util/src/stream/mapped_futures/mod.rs | 19 ++-- futures-util/src/stream/mod.rs | 2 +- 10 files changed, 95 insertions(+), 122 deletions(-) rename futures-util/src/stream/{futures_keyed => futures_unordered_internal}/abort.rs (100%) rename futures-util/src/stream/{futures_keyed => futures_unordered_internal}/iter.rs (84%) rename futures-util/src/stream/{futures_keyed => futures_unordered_internal}/mod.rs (89%) rename futures-util/src/stream/{futures_keyed => futures_unordered_internal}/ready_to_run_queue.rs (96%) rename futures-util/src/stream/{futures_keyed => futures_unordered_internal}/task.rs (95%) diff --git a/futures-util/src/stream/futures_unordered/iter.rs b/futures-util/src/stream/futures_unordered/iter.rs index 9365074a41..34fc1010dd 100644 --- a/futures-util/src/stream/futures_unordered/iter.rs +++ b/futures-util/src/stream/futures_unordered/iter.rs @@ -1,4 +1,4 @@ -use crate::stream::futures_keyed; +use crate::stream::futures_unordered_internal; use core::pin::Pin; use super::DummyStruct; @@ -6,7 +6,7 @@ use super::DummyStruct; /// Mutable iterator over all futures in the unordered set. #[derive(Debug)] pub struct IterPinMut<'a, Fut> { - pub(super) inner: futures_keyed::IterPinMut<'a, (), Fut, DummyStruct>, + pub(super) inner: futures_unordered_internal::IterPinMut<'a, (), Fut, DummyStruct>, } /// Mutable iterator over all futures in the unordered set. @@ -16,7 +16,7 @@ pub struct IterMut<'a, Fut: Unpin>(pub(super) IterPinMut<'a, Fut>); /// Immutable iterator over all futures in the unordered set. #[derive(Debug)] pub struct IterPinRef<'a, Fut> { - pub(super) inner: futures_keyed::IterPinRef<'a, (), Fut, DummyStruct>, + pub(super) inner: futures_unordered_internal::IterPinRef<'a, (), Fut, DummyStruct>, } /// Immutable iterator over all the futures in the unordered set. @@ -26,7 +26,7 @@ pub struct Iter<'a, Fut: Unpin>(pub(super) IterPinRef<'a, Fut>); /// Owned iterator over all futures in the unordered set. #[derive(Debug)] pub struct IntoIter { - pub(super) inner: futures_keyed::IntoIter<(), Fut, DummyStruct>, + pub(super) inner: futures_unordered_internal::IntoIter<(), Fut, DummyStruct>, } impl Iterator for IntoIter { @@ -98,14 +98,3 @@ impl<'a, Fut: Unpin> Iterator for Iter<'a, Fut> { } impl ExactSizeIterator for Iter<'_, Fut> {} - -// SAFETY: we do nothing thread-local and there is no interior mutability, -// so the usual structural `Send`/`Sync` apply. -unsafe impl Send for IterPinRef<'_, Fut> {} -unsafe impl Sync for IterPinRef<'_, Fut> {} - -unsafe impl Send for IterPinMut<'_, Fut> {} -unsafe impl Sync for IterPinMut<'_, Fut> {} - -unsafe impl Send for IntoIter {} -unsafe impl Sync for IntoIter {} diff --git a/futures-util/src/stream/futures_unordered/mod.rs b/futures-util/src/stream/futures_unordered/mod.rs index a733be4ecd..035ed0ee8a 100644 --- a/futures-util/src/stream/futures_unordered/mod.rs +++ b/futures-util/src/stream/futures_unordered/mod.rs @@ -16,9 +16,9 @@ mod iter; #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; -use crate::stream::futures_keyed::FuturesKeyed; +use crate::stream::futures_unordered_internal::FuturesUnorderedInternal; -use super::futures_keyed::ReleasesTask; +use super::futures_unordered_internal::ReleasesTask; /// A set of futures which may complete in any order. /// @@ -45,7 +45,7 @@ use super::futures_keyed::ReleasesTask; /// library is activated, and it is activated by default. #[must_use = "streams do nothing unless polled"] pub struct FuturesUnordered { - inner: FuturesKeyed<(), Fut, DummyStruct>, + inner: FuturesUnorderedInternal<(), Fut, DummyStruct>, } struct DummyStruct {} @@ -120,7 +120,7 @@ impl FuturesUnordered { /// In this state, [`FuturesUnordered::poll_next`](Stream::poll_next) will /// return [`Poll::Ready(None)`](Poll::Ready). pub fn new() -> Self { - Self { inner: FuturesKeyed::new() } + Self { inner: FuturesUnorderedInternal::new() } } /// Returns the number of futures contained in the set. diff --git a/futures-util/src/stream/futures_keyed/abort.rs b/futures-util/src/stream/futures_unordered_internal/abort.rs similarity index 100% rename from futures-util/src/stream/futures_keyed/abort.rs rename to futures-util/src/stream/futures_unordered_internal/abort.rs diff --git a/futures-util/src/stream/futures_keyed/iter.rs b/futures-util/src/stream/futures_unordered_internal/iter.rs similarity index 84% rename from futures-util/src/stream/futures_keyed/iter.rs rename to futures-util/src/stream/futures_unordered_internal/iter.rs index 5caff4b37e..30e31d76cf 100644 --- a/futures-util/src/stream/futures_keyed/iter.rs +++ b/futures-util/src/stream/futures_unordered_internal/iter.rs @@ -1,5 +1,5 @@ use super::task::Task; -use super::{FuturesKeyed, ReleasesTask}; +use super::{FuturesUnorderedInternal, ReleasesTask}; use core::marker::PhantomData; use core::pin::Pin; use core::ptr; @@ -10,7 +10,7 @@ use core::sync::atomic::Ordering::Relaxed; pub(crate) struct IterPinMut<'a, K, Fut, S: ReleasesTask> { pub(super) task: *const Task, pub(crate) len: usize, - pub(super) _marker: PhantomData<&'a mut FuturesKeyed>, + pub(super) _marker: PhantomData<&'a mut FuturesUnorderedInternal>, } /// Mutable iterator over all futures in the unordered set. @@ -25,7 +25,7 @@ pub(crate) struct IterPinRef<'a, K, Fut, S: ReleasesTask> { pub(super) task: *const Task, pub(crate) len: usize, pub(super) pending_next_all: *mut Task, - pub(super) _marker: PhantomData<&'a FuturesKeyed>, + pub(super) _marker: PhantomData<&'a FuturesUnorderedInternal>, } /// Immutable iterator over all the futures in the unordered set. @@ -36,7 +36,7 @@ pub(crate) struct Iter<'a, K, Fut: Unpin, S: ReleasesTask>(pub(super) IterPin #[derive(Debug)] pub(crate) struct IntoIter> { pub(crate) len: usize, - pub(super) inner: FuturesKeyed, + pub(super) inner: FuturesUnorderedInternal, } impl> Iterator for IntoIter { @@ -56,7 +56,7 @@ impl> Iterator for IntoIter { let future = (*(**task).future.get()).take().unwrap(); let key = (**task).take_key(); - // Mutable access to a previously shared `FuturesKeyed` implies + // Mutable access to a previously shared `FuturesUnorderedInternal` implies // that the other threads already released the object before the // current thread acquired it, so relaxed ordering can be used and // valid `next_all` checks can be skipped. @@ -89,7 +89,7 @@ impl<'a, K, Fut, S: ReleasesTask> Iterator for IterPinMut<'a, K, Fut, S> { let future = (*(*self.task).future.get()).as_mut().unwrap(); let key = (*(*self.task).key.get()).as_ref().unwrap(); - // Mutable access to a previously shared `FuturesKeyed` implies + // Mutable access to a previously shared `FuturesUnorderedInternal` implies // that the other threads already released the object before the // current thread acquired it, so relaxed ordering can be used and // valid `next_all` checks can be skipped. @@ -168,11 +168,11 @@ impl> ExactSizeIterator for Iter<'_, K, Fut, S // SAFETY: we do nothing thread-local and there is no interior mutability, // so the usual structural `Send`/`Sync` apply. -unsafe impl> Send for IterPinRef<'_, K, Fut, S> {} -unsafe impl> Sync for IterPinRef<'_, K, Fut, S> {} +unsafe impl> Send for IterPinRef<'_, K, Fut, S> {} +unsafe impl> Sync for IterPinRef<'_, K, Fut, S> {} -unsafe impl> Send for IterPinMut<'_, K, Fut, S> {} -unsafe impl> Sync for IterPinMut<'_, K, Fut, S> {} +unsafe impl> Send for IterPinMut<'_, K, Fut, S> {} +unsafe impl> Sync for IterPinMut<'_, K, Fut, S> {} -unsafe impl> Send for IntoIter {} -unsafe impl> Sync for IntoIter {} +unsafe impl> Send for IntoIter {} +unsafe impl> Sync for IntoIter {} diff --git a/futures-util/src/stream/futures_keyed/mod.rs b/futures-util/src/stream/futures_unordered_internal/mod.rs similarity index 89% rename from futures-util/src/stream/futures_keyed/mod.rs rename to futures-util/src/stream/futures_unordered_internal/mod.rs index c7f9432185..5fa13907bb 100644 --- a/futures-util/src/stream/futures_keyed/mod.rs +++ b/futures-util/src/stream/futures_unordered_internal/mod.rs @@ -44,25 +44,25 @@ use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue}; /// type that preserves a FIFO order. /// /// This structure is optimized to manage a large number of futures. -/// Futures managed by [`FuturesKeyed`] will only be polled when they +/// Futures managed by [`FuturesUnorderedInternal`] will only be polled when they /// generate wake-up notifications. This reduces the required amount of work /// needed to poll large numbers of futures. /// -/// [`FuturesKeyed`] can be filled by [`collect`](Iterator::collect)ing an -/// iterator of futures into a [`FuturesKeyed`], or by -/// [`push`](FuturesKeyed::push)ing futures onto an existing -/// [`FuturesKeyed`]. When new futures are added, +/// [`FuturesUnorderedInternal`] can be filled by [`collect`](Iterator::collect)ing an +/// iterator of futures into a [`FuturesUnorderedInternal`], or by +/// [`push`](FuturesUnorderedInternal::push)ing futures onto an existing +/// [`FuturesUnorderedInternal`]. When new futures are added, /// [`poll_next`](Stream::poll_next) must be called in order to begin receiving /// wake-ups for new futures. /// -/// Note that you can create a ready-made [`FuturesKeyed`] via the +/// Note that you can create a ready-made [`FuturesUnorderedInternal`] via the /// [`collect`](Iterator::collect) method, or you can start with an empty set -/// with the [`FuturesKeyed::new`] constructor. +/// with the [`FuturesUnorderedInternal::new`] constructor. /// /// This type is only available when the `std` or `alloc` feature of this /// library is activated, and it is activated by default. #[must_use = "streams do nothing unless polled"] -pub(super) struct FuturesKeyed> { +pub(super) struct FuturesUnorderedInternal> { ready_to_run_queue: Arc>, head_all: AtomicPtr>, is_terminated: AtomicBool, @@ -74,18 +74,18 @@ pub(crate) trait ReleasesTask { fn new() -> Self; } -unsafe impl> Send for FuturesKeyed {} -unsafe impl> Sync for FuturesKeyed {} -impl> Unpin for FuturesKeyed {} +unsafe impl> Send for FuturesUnorderedInternal {} +unsafe impl> Sync for FuturesUnorderedInternal {} +impl> Unpin for FuturesUnorderedInternal {} -// impl Spawn for FuturesKeyed> { +// impl Spawn for FuturesUnorderedInternal> { // fn spawn_obj(&self, key: K, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> { // self.push(key, future_obj); // Ok(()) // } // } // -// impl LocalSpawn for FuturesKeyed> { +// impl LocalSpawn for FuturesUnorderedInternal> { // fn spawn_local_obj( // &self, // key: K, @@ -96,12 +96,12 @@ impl> Unpin for FuturesKeyed {} // } // } -// FuturesKeyed is implemented using two linked lists. One which links all -// futures managed by a `FuturesKeyed` and one that tracks futures that have +// FuturesUnorderedInternal is implemented using two linked lists. One which links all +// futures managed by a `FuturesUnorderedInternal` and one that tracks futures that have // been scheduled for polling. The first linked list allows for thread safe // insertion of nodes at the head as well as forward iteration, but is otherwise // not thread safe and is only accessed by the thread that owns the -// `FuturesKeyed` value for any other operations. The second linked list is +// `FuturesUnorderedInternal` value for any other operations. The second linked list is // an implementation of the intrusive MPSC queue algorithm described by // 1024cores.net. // @@ -112,7 +112,7 @@ impl> Unpin for FuturesKeyed {} // Before a managed future is polled, the current context's waker is replaced // with one that is aware of the specific future being run. This ensures that // wake-up notifications generated by that specific future are visible to -// `FuturesKeyed`. When a wake-up notification is received, the task is +// `FuturesUnorderedInternal`. When a wake-up notification is received, the task is // inserted into the ready to run queue, so that its future can be polled later. // // Each task is wrapped in an `Arc` and thereby atomically reference counted. @@ -121,17 +121,17 @@ impl> Unpin for FuturesKeyed {} // notification is received, the task will only be inserted into the ready to // run queue if it isn't inserted already. -impl> Default for FuturesKeyed { +impl> Default for FuturesUnorderedInternal { fn default() -> Self { Self::new() } } -impl> FuturesKeyed { - /// Constructs a new, empty [`FuturesKeyed`]. +impl> FuturesUnorderedInternal { + /// Constructs a new, empty [`FuturesUnorderedInternal`]. /// - /// The returned [`FuturesKeyed`] does not contain any futures. - /// In this state, [`FuturesKeyed::poll_next`](Stream::poll_next) will + /// The returned [`FuturesUnorderedInternal`] does not contain any futures. + /// In this state, [`FuturesUnorderedInternal::poll_next`](Stream::poll_next) will /// return [`Poll::Ready(None)`](Poll::Ready). /// /// Note that the key type over which this struct is generic does not have trait bounds @@ -186,7 +186,7 @@ impl> FuturesKeyed { /// /// This method adds the given future to the set. This method will not /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must - /// ensure that [`FuturesKeyed::poll_next`](Stream::poll_next) is called + /// ensure that [`FuturesUnorderedInternal::poll_next`](Stream::poll_next) is called /// in order to receive wake-up notifications for the given future. pub(super) fn push(&self, key: K, future: Fut) -> Arc> { let task = Arc::new(Task { @@ -288,7 +288,7 @@ impl> FuturesKeyed { // Drop the future, even if it hasn't finished yet. This is safe // because we're dropping the future on the thread that owns - // `FuturesKeyed`, which correctly tracks `Fut`'s lifetimes and + // `FuturesUnorderedInternal`, which correctly tracks `Fut`'s lifetimes and // such. unsafe { // Set to `None` rather than `take()`ing to prevent moving the @@ -350,7 +350,7 @@ impl> FuturesKeyed { } /// Remove the task from the linked list tracking all tasks currently - /// managed by `FuturesKeyed`. + /// managed by `FuturesUnorderedInternal`. /// This method is unsafe because it has be guaranteed that `task` is a /// valid pointer. pub(crate) unsafe fn unlink(&mut self, task: *const Task) -> Arc> { @@ -388,7 +388,7 @@ impl> FuturesKeyed { /// Returns the reserved value for `Task::next_all` to indicate a pending /// assignment from the thread that inserted the task. /// - /// `FuturesKeyed::link` needs to update `Task` pointers in an order + /// `FuturesUnorderedInternal::link` needs to update `Task` pointers in an order /// that ensures any iterators created on other threads can correctly /// traverse the entire `Task` list using the chain of `next_all` pointers. /// This could be solved with a compare-exchange loop that stores the @@ -410,12 +410,12 @@ impl> FuturesKeyed { fn pending_next_all(&self) -> *mut Task { // The `ReadyToRunQueue` stub is never inserted into the `head_all` // list, and its pointer value will remain valid for the lifetime of - // this `FuturesKeyed`, so we can make use of its value here. + // this `FuturesUnorderedInternal`, so we can make use of its value here. Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _ } } -impl> Stream for FuturesKeyed { +impl> Stream for FuturesUnorderedInternal { type Item = (Arc>, Fut::Output); fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -509,7 +509,7 @@ impl> Stream for FuturesKeyed { // assume it'll panic, in which case we'll want to discard it // regardless. struct Bomb<'a, K, Fut, S: ReleasesTask> { - queue: &'a mut FuturesKeyed, + queue: &'a mut FuturesUnorderedInternal, task: Option>>, } @@ -583,13 +583,13 @@ impl> Stream for FuturesKeyed { } } -impl> Debug for FuturesKeyed { +impl> Debug for FuturesUnorderedInternal { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "FuturesKeyed {{ ... }}") + write!(f, "FuturesUnorderedInternal {{ ... }}") } } -impl> FuturesKeyed { +impl> FuturesUnorderedInternal { /// Clears the set, removing all futures. pub(super) fn clear(&mut self) { self.clear_head_all(); @@ -609,9 +609,9 @@ impl> FuturesKeyed { } } -impl> Drop for FuturesKeyed { +impl> Drop for FuturesUnorderedInternal { fn drop(&mut self) { - // When a `FuturesKeyed` is dropped we want to drop all futures + // When a `FuturesUnorderedInternal` is dropped we want to drop all futures // associated with it. At the same time though there may be tons of // wakers flying around which contain `Task` references // inside them. We'll let those naturally get deallocated. @@ -620,7 +620,7 @@ impl> Drop for FuturesKeyed { // Note that at this point we could still have a bunch of tasks in the // ready to run queue. None of those tasks, however, have futures // associated with them so they're safe to destroy on any thread. At - // this point the `FuturesKeyed` struct, the owner of the one strong + // this point the `FuturesUnorderedInternal` struct, the owner of the one strong // reference to the ready to run queue will drop the strong reference. // At that point whichever thread releases the strong refcount last (be // it this thread or some other thread as part of an `upgrade`) will @@ -632,7 +632,9 @@ impl> Drop for FuturesKeyed { } } -impl<'a, K, Fut: Unpin, S: ReleasesTask> IntoIterator for &'a FuturesKeyed { +impl<'a, K, Fut: Unpin, S: ReleasesTask> IntoIterator + for &'a FuturesUnorderedInternal +{ type Item = (&'a K, &'a Fut); type IntoIter = Iter<'a, K, Fut, S>; @@ -641,7 +643,9 @@ impl<'a, K, Fut: Unpin, S: ReleasesTask> IntoIterator for &'a FuturesKeyed> IntoIterator for &'a mut FuturesKeyed { +impl<'a, K, Fut: Unpin, S: ReleasesTask> IntoIterator + for &'a mut FuturesUnorderedInternal +{ type Item = (&'a K, &'a mut Fut); type IntoIter = IterMut<'a, K, Fut, S>; @@ -650,7 +654,7 @@ impl<'a, K, Fut: Unpin, S: ReleasesTask> IntoIterator for &'a mut FuturesKeye } } -impl> IntoIterator for FuturesKeyed { +impl> IntoIterator for FuturesUnorderedInternal { type Item = (K, Fut); type IntoIter = IntoIter; @@ -664,7 +668,7 @@ impl> IntoIterator for FuturesKeyed } } -impl> FromIterator<(K, Fut)> for FuturesKeyed { +impl> FromIterator<(K, Fut)> for FuturesUnorderedInternal { fn from_iter(iter: I) -> Self where I: IntoIterator, @@ -677,13 +681,13 @@ impl> FromIterator<(K, Fut)> for FuturesKeyed> FusedStream for FuturesKeyed { +impl> FusedStream for FuturesUnorderedInternal { fn is_terminated(&self) -> bool { self.is_terminated.load(Relaxed) } } -impl> Extend<(K, Fut)> for FuturesKeyed { +impl> Extend<(K, Fut)> for FuturesUnorderedInternal { fn extend(&mut self, iter: I) where I: IntoIterator, diff --git a/futures-util/src/stream/futures_keyed/ready_to_run_queue.rs b/futures-util/src/stream/futures_unordered_internal/ready_to_run_queue.rs similarity index 96% rename from futures-util/src/stream/futures_keyed/ready_to_run_queue.rs rename to futures-util/src/stream/futures_unordered_internal/ready_to_run_queue.rs index 574ac50b31..08b7c79ce2 100644 --- a/futures-util/src/stream/futures_keyed/ready_to_run_queue.rs +++ b/futures-util/src/stream/futures_unordered_internal/ready_to_run_queue.rs @@ -15,7 +15,7 @@ pub(super) enum Dequeue { } pub(super) struct ReadyToRunQueue { - // The waker of the task using `FuturesKeyed`. + // The waker of the task using `FuturesUnorderedInternal`. pub(super) waker: AtomicWaker, // Head/tail of the readiness queue @@ -94,7 +94,7 @@ impl ReadyToRunQueue { // // # Safety // - // - All tasks **must** have had their futures dropped already (by FuturesKeyed::clear) + // - All tasks **must** have had their futures dropped already (by FuturesUnorderedInternal::clear) // - The caller **must** guarantee unique access to `self` pub(crate) unsafe fn clear(&self) { loop { @@ -113,7 +113,7 @@ impl Drop for ReadyToRunQueue { // Once we're in the destructor for `Inner` we need to clear out // the ready to run queue of tasks if there's anything left in there. - // All tasks have had their futures dropped already by the `FuturesKeyed` + // All tasks have had their futures dropped already by the `FuturesUnorderedInternal` // destructor above, and we have &mut self, so this is safe. unsafe { self.clear(); diff --git a/futures-util/src/stream/futures_keyed/task.rs b/futures-util/src/stream/futures_unordered_internal/task.rs similarity index 95% rename from futures-util/src/stream/futures_keyed/task.rs rename to futures-util/src/stream/futures_unordered_internal/task.rs index 4538ccbbeb..a982bca236 100644 --- a/futures-util/src/stream/futures_keyed/task.rs +++ b/futures-util/src/stream/futures_unordered_internal/task.rs @@ -69,9 +69,9 @@ impl ArcWake for Task { // as it'll want to come along and run our task later. // // Note that we don't change the reference count of the task here, - // we merely enqueue the raw pointer. The `FuturesKeyed` + // we merely enqueue the raw pointer. The `FuturesUnorderedInternal` // implementation guarantees that if we set the `queued` flag that - // there's a reference count held by the main `FuturesKeyed` queue + // there's a reference count held by the main `FuturesUnorderedInternal` queue // still. let prev = arc_self.queued.swap(true, SeqCst); if !prev { @@ -123,10 +123,10 @@ impl Drop for Task { // Since `Task` is sent across all threads for any lifetime, // regardless of `Fut`, we, to guarantee memory safety, can't actually // touch `Fut` at any time except when we have a reference to the - // `FuturesKeyed` itself . + // `FuturesUnorderedInternal` itself . // // Consequently it *should* be the case that we always drop futures from - // the `FuturesKeyed` instance. This is a bomb, just in case there's + // the `FuturesUnorderedInternal` instance. This is a bomb, just in case there's // a bug in that logic. unsafe { if (*self.future.get()).is_some() { diff --git a/futures-util/src/stream/mapped_futures/iter.rs b/futures-util/src/stream/mapped_futures/iter.rs index d8bbc8ef0c..101c16656a 100644 --- a/futures-util/src/stream/mapped_futures/iter.rs +++ b/futures-util/src/stream/mapped_futures/iter.rs @@ -1,47 +1,38 @@ use super::TaskSet; -use crate::stream::futures_keyed; +use crate::stream::futures_unordered_internal; use core::hash::Hash; use core::pin::Pin; /// Mutable iterator over all futures in the unordered set. -// #[derive(Debug)] +#[derive(Debug)] pub struct IterPinMut<'a, K: Hash + Eq, Fut> { - pub(super) inner: futures_keyed::IterPinMut<'a, K, Fut, TaskSet>, - // pub(super) task: *const Task, - // pub(super) len: usize, - // pub(super) _marker: PhantomData<&'a mut MappedFutures>, + pub(super) inner: futures_unordered_internal::IterPinMut<'a, K, Fut, TaskSet>, } /// Mutable iterator over all futures in the unordered set. -// #[derive(Debug)] +#[derive(Debug)] pub struct IterMut<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinMut<'a, K, Fut>); /// Immutable iterator over all futures in the unordered set. -// #[derive(Debug)] +#[derive(Debug)] pub struct IterPinRef<'a, K: Hash + Eq, Fut> { - // pub(super) task: *const Task, - // pub(super) len: usize, - // pub(super) pending_next_all: *mut Task, - // pub(super) _marker: PhantomData<&'a MappedFutures>, - pub(super) inner: futures_keyed::IterPinRef<'a, K, Fut, TaskSet>, + pub(super) inner: futures_unordered_internal::IterPinRef<'a, K, Fut, TaskSet>, } /// Immutable iterator over all the futures in the unordered set. -// #[derive(Debug)] +#[derive(Debug)] pub struct Iter<'a, K: Hash + Eq, Fut: Unpin>(pub(super) IterPinRef<'a, K, Fut>); /// Owned iterator over all futures in the unordered set. -// #[derive(Debug)] +#[derive(Debug)] pub struct IntoIter { - pub(super) inner: futures_keyed::IntoIter>, + pub(super) inner: futures_unordered_internal::IntoIter>, } /// Immutable iterator over all keys in the mapping. +#[derive(Debug)] pub struct Keys<'a, K: Hash + Eq, Fut> { - pub(super) inner: futures_keyed::IterPinRef<'a, K, Fut, TaskSet>, // pub(super) inner: std::iter::Map< - // std::collections::hash_set::Iter<'a, HashTask>, - // Box) -> &'a K>, - // >, + pub(super) inner: futures_unordered_internal::IterPinRef<'a, K, Fut, TaskSet>, } impl Iterator for IntoIter { @@ -124,17 +115,3 @@ impl<'a, K: Hash + Eq, Fut> Iterator for Keys<'a, K, Fut> { self.inner.next().map(|opt| opt.0) } } - -// SAFETY: we do nothing thread-local and there is no interior mutability, -// so the usual structural `Send`/`Sync` apply. -unsafe impl Send for IterPinRef<'_, K, Fut> {} -unsafe impl Sync for IterPinRef<'_, K, Fut> {} - -unsafe impl Send for IterPinMut<'_, K, Fut> {} -unsafe impl Sync for IterPinMut<'_, K, Fut> {} - -unsafe impl Send for IntoIter {} -unsafe impl Sync for IntoIter {} - -unsafe impl Send for Keys<'_, K, Fut> {} -unsafe impl Sync for Keys<'_, K, Fut> {} diff --git a/futures-util/src/stream/mapped_futures/mod.rs b/futures-util/src/stream/mapped_futures/mod.rs index 0eead9cc52..7bb2d783f8 100644 --- a/futures-util/src/stream/mapped_futures/mod.rs +++ b/futures-util/src/stream/mapped_futures/mod.rs @@ -14,9 +14,9 @@ mod iter; use self::iter::Keys; #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352 pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef}; -use crate::stream::futures_keyed::task::HashTask; +use crate::stream::futures_unordered_internal::task::HashTask; -use super::futures_keyed::{FuturesKeyed, ReleasesTask}; +use super::futures_unordered_internal::{FuturesUnorderedInternal, ReleasesTask}; /// A map of futures which may complete in any order. /// @@ -37,13 +37,19 @@ use super::futures_keyed::{FuturesKeyed, ReleasesTask}; /// with the [`MappedFutures::new`] constructor. #[must_use = "streams do nothing unless polled"] pub struct MappedFutures { - inner: FuturesKeyed>, + inner: FuturesUnorderedInternal>, } struct TaskSet { inner: HashSet>, } +impl Debug for TaskSet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "TaskSet {{ ... }}") + } +} + // impl Deref for TaskSet { // type Target = HashSet>; // @@ -109,7 +115,7 @@ impl MappedFutures { /// In this state, [`MappedFutures::poll_next`](Stream::poll_next) will /// return [`Poll::Ready(None)`](Poll::Ready). pub fn new() -> Self { - Self { inner: FuturesKeyed::new() } + Self { inner: FuturesUnorderedInternal::new() } } /// Returns the number of futures contained in the set. @@ -233,10 +239,7 @@ impl MappedFutures { } /// Get a shared reference to the mapped future. - pub fn get(&mut self, key: &K) -> Option<&Fut> - where - Fut: Unpin, - { + pub fn get(&mut self, key: &K) -> Option<&Fut> { // if let Some(task_ref) = self.hash_set.get(key) { if let Some(task_ref) = self.set().get(key) { unsafe { diff --git a/futures-util/src/stream/mod.rs b/futures-util/src/stream/mod.rs index 08ae0362f2..1a7d507ef3 100644 --- a/futures-util/src/stream/mod.rs +++ b/futures-util/src/stream/mod.rs @@ -115,10 +115,10 @@ mod futures_ordered; #[cfg(feature = "alloc")] pub use self::futures_ordered::FuturesOrdered; -mod futures_keyed; #[cfg(not(futures_no_atomic_cas))] #[cfg(feature = "alloc")] pub mod futures_unordered; +mod futures_unordered_internal; #[cfg(not(futures_no_atomic_cas))] #[cfg(feature = "alloc")] #[doc(inline)] From e8d2071efc67efd68dad60b46f67e604ede7ecbb Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Sat, 8 Jun 2024 08:36:53 -0400 Subject: [PATCH 08/10] fix mapped_futures test compilation --- futures-util/src/stream/mapped_futures/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/futures-util/src/stream/mapped_futures/mod.rs b/futures-util/src/stream/mapped_futures/mod.rs index 7bb2d783f8..d63c32e259 100644 --- a/futures-util/src/stream/mapped_futures/mod.rs +++ b/futures-util/src/stream/mapped_futures/mod.rs @@ -414,6 +414,7 @@ pub mod tests { use futures_timer::Delay; // use futures_util::StreamExt; // use crate::StreamExt; + use std::boxed::Box; use std::time::Duration; fn insert_millis(futs: &mut MappedFutures, key: u32, millis: u64) { From 29efed66123cd67f8ece47d8f851d560c461f18b Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Sat, 8 Jun 2024 08:37:11 -0400 Subject: [PATCH 09/10] complete master merge --- .../stream/futures_unordered_internal/mod.rs | 30 +++++++------------ 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/futures-util/src/stream/futures_unordered_internal/mod.rs b/futures-util/src/stream/futures_unordered_internal/mod.rs index 5fa13907bb..c0f1ad2eb9 100644 --- a/futures-util/src/stream/futures_unordered_internal/mod.rs +++ b/futures-util/src/stream/futures_unordered_internal/mod.rs @@ -303,7 +303,7 @@ impl> FuturesUnorderedInternal { // // If, however, the queued flag was *not* set then we're safe to // release our reference count on the task. The queued flag was set - // above so all future `enqueue` operations will not actually + // above so all future `enqueue` operations Taskwill not actually // enqueue the task, so our task will never see the ready to run queue // again. The task itself will be deallocated once all reference counts // have been dropped elsewhere by the various wakers that contain it. @@ -526,10 +526,10 @@ impl> Stream for FuturesUnorderedInternal>` and can schedule the future for polling by + // our `Arc>` and can schedule the future for polling by // enqueuing itself in the ready to run queue. // - // Critically though `Task` won't actually access `Fut`, the + // Critically though `Task` won't actually access `Fut`, the // future, while it's floating around inside of wakers. // These structs will basically just use `Fut` to size // the internal allocation, appropriately accessing fields and @@ -539,7 +539,8 @@ impl> Stream for FuturesUnorderedInternal> Debug for FuturesUnorderedInternal { impl> FuturesUnorderedInternal { /// Clears the set, removing all futures. pub(super) fn clear(&mut self) { - self.clear_head_all(); - - // we just cleared all the tasks, and we have &mut self, so this is safe. - unsafe { self.ready_to_run_queue.clear() }; - - self.is_terminated.store(false, Relaxed); - } - - fn clear_head_all(&mut self) { - while !self.head_all.get_mut().is_null() { - let head = *self.head_all.get_mut(); - let task = unsafe { self.unlink(head) }; - self.release_task(task); - } + *self = Self::new(); } } @@ -615,7 +603,11 @@ impl> Drop for FuturesUnorderedInternal { // associated with it. At the same time though there may be tons of // wakers flying around which contain `Task` references // inside them. We'll let those naturally get deallocated. - self.clear_head_all(); + while !self.head_all.get_mut().is_null() { + let head = *self.head_all.get_mut(); + let task = unsafe { self.unlink(head) }; + self.release_task(task); + } // Note that at this point we could still have a bunch of tasks in the // ready to run queue. None of those tasks, however, have futures From 08acad38497d4728f79623acea63e4c386f55048 Mon Sep 17 00:00:00 2001 From: CreativeStoic Date: Sat, 8 Jun 2024 08:44:37 -0400 Subject: [PATCH 10/10] clean code --- .../src/stream/futures_unordered/mod.rs | 11 -------- .../stream/futures_unordered_internal/mod.rs | 25 +++---------------- .../stream/futures_unordered_internal/task.rs | 5 +--- futures-util/src/stream/mapped_futures/mod.rs | 13 ---------- 4 files changed, 4 insertions(+), 50 deletions(-) diff --git a/futures-util/src/stream/futures_unordered/mod.rs b/futures-util/src/stream/futures_unordered/mod.rs index 777ae2a876..91231344bc 100644 --- a/futures-util/src/stream/futures_unordered/mod.rs +++ b/futures-util/src/stream/futures_unordered/mod.rs @@ -208,12 +208,6 @@ impl FuturesUnordered { } } -// impl Drop for FuturesUnordered { -// fn drop(&mut self) { -// drop(self.inner) -// } -// } - impl<'a, Fut: Unpin> IntoIterator for &'a FuturesUnordered { type Item = &'a Fut; type IntoIter = Iter<'a, Fut>; @@ -237,11 +231,6 @@ impl IntoIterator for FuturesUnordered { type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { - // `head_all` can be accessed directly and we don't need to spin on - // `Task::next_all` since we have exclusive access to the set. - // let task = *self.head_all.get_mut(); - // let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } }; - IntoIter { inner: self.inner.into_iter() } } } diff --git a/futures-util/src/stream/futures_unordered_internal/mod.rs b/futures-util/src/stream/futures_unordered_internal/mod.rs index c0f1ad2eb9..4a11c4f196 100644 --- a/futures-util/src/stream/futures_unordered_internal/mod.rs +++ b/futures-util/src/stream/futures_unordered_internal/mod.rs @@ -24,7 +24,6 @@ use core::sync::atomic::{AtomicBool, AtomicPtr}; use futures_core::future::Future; use futures_core::stream::{FusedStream, Stream}; use futures_core::task::{Context, Poll}; -// use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError}; mod abort; @@ -78,24 +77,6 @@ unsafe impl> Send for FuturesUnorderedInternal< unsafe impl> Sync for FuturesUnorderedInternal {} impl> Unpin for FuturesUnorderedInternal {} -// impl Spawn for FuturesUnorderedInternal> { -// fn spawn_obj(&self, key: K, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> { -// self.push(key, future_obj); -// Ok(()) -// } -// } -// -// impl LocalSpawn for FuturesUnorderedInternal> { -// fn spawn_local_obj( -// &self, -// key: K, -// future_obj: LocalFutureObj<'static, ()>, -// ) -> Result<(), SpawnError> { -// self.push(key, future_obj); -// Ok(()) -// } -// } - // FuturesUnorderedInternal is implemented using two linked lists. One which links all // futures managed by a `FuturesUnorderedInternal` and one that tracks futures that have // been scheduled for polling. The first linked list allows for thread safe @@ -303,7 +284,7 @@ impl> FuturesUnorderedInternal { // // If, however, the queued flag was *not* set then we're safe to // release our reference count on the task. The queued flag was set - // above so all future `enqueue` operations Taskwill not actually + // above so all future `enqueue` operations will not actually // enqueue the task, so our task will never see the ready to run queue // again. The task itself will be deallocated once all reference counts // have been dropped elsewhere by the various wakers that contain it. @@ -526,10 +507,10 @@ impl> Stream for FuturesUnorderedInternal>` and can schedule the future for polling by + // our `Arc>` and can schedule the future for polling by // enqueuing itself in the ready to run queue. // - // Critically though `Task` won't actually access `Fut`, the + // Critically though `Task` won't actually access `Fut`, the // future, while it's floating around inside of wakers. // These structs will basically just use `Fut` to size // the internal allocation, appropriately accessing fields and diff --git a/futures-util/src/stream/futures_unordered_internal/task.rs b/futures-util/src/stream/futures_unordered_internal/task.rs index 815a9b9e6f..bfb436a722 100644 --- a/futures-util/src/stream/futures_unordered_internal/task.rs +++ b/futures-util/src/stream/futures_unordered_internal/task.rs @@ -7,7 +7,7 @@ use core::sync::atomic::{AtomicBool, AtomicPtr}; use super::abort::abort; use super::ReadyToRunQueue; -use crate::task::{waker_ref, ArcWake, WakerRef}; +use crate::task::ArcWake; use core::hash::Hash; pub(crate) struct Task { @@ -151,9 +151,6 @@ impl HashTask { fn key(&self) -> Option<&K> { Task::key(&*self) } - // pub(crate) fn key_unwrap(&self) -> &K { - // unsafe { (&*self.key.get()).as_ref().unwrap() } - // } } impl Deref for HashTask { diff --git a/futures-util/src/stream/mapped_futures/mod.rs b/futures-util/src/stream/mapped_futures/mod.rs index d63c32e259..dfb37a756b 100644 --- a/futures-util/src/stream/mapped_futures/mod.rs +++ b/futures-util/src/stream/mapped_futures/mod.rs @@ -50,19 +50,6 @@ impl Debug for TaskSet { } } -// impl Deref for TaskSet { -// type Target = HashSet>; -// -// fn deref(&self) -> &Self::Target { -// &self.inner -// } -// } -// impl DerefMut for TaskSet { -// fn deref_mut(&mut self) -> &Self::Target { -// &mut self.inner -// } -// } - impl ReleasesTask for TaskSet { fn release_task(&mut self, key: &K) { self.inner.remove(key);