Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: test more features with Miri #4397

Merged
merged 14 commits into from Feb 9, 2022
16 changes: 8 additions & 8 deletions .github/workflows/ci.yml
Expand Up @@ -167,18 +167,18 @@ jobs:
- uses: actions-rs/toolchain@v1
with:
toolchain: ${{ env.nightly }}
components: miri
override: true
- uses: Swatinem/rust-cache@v1
- name: Install Miri
run: |
set -e
rustup component add miri
cargo miri setup
rm -rf tokio/tests

- name: miri
run: cargo miri test --features rt,rt-multi-thread,sync task
# Many of tests in tokio/tests and doctests use #[tokio::test] or
# #[tokio::main] that calls epoll_create1 that Miri does not support.
run: cargo miri test --features fs,io-util,io-std,net,parking_lot,rt,rt-multi-thread,sync --lib --no-fail-fast
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is opt-ing out of some things that use tokio::sync::Notified. Getting intrusive linked lists on the stack to pass with miri will probably be difficult.

Copy link
Contributor

@Darksonn Darksonn Jan 28, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Or maybe not .. it seems like the problem was elsewhere entirely.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure how to fix the timer, but Notified works now.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Haha, I figured it out: The issue for timers was futures::executor::block_on.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The issue for timers was futures::executor::block_on.

Ah, futures has a few unreleased fixes of stacked borrows violation with -Zmiri-tag-raw-pointers: rust-lang/futures-rs#2550

working-directory: tokio
env:
MIRIFLAGS: -Zmiri-disable-isolation -Zmiri-tag-raw-pointers
PROPTEST_CASES: 10

san:
name: san
runs-on: ubuntu-latest
Expand Down
2 changes: 2 additions & 0 deletions tokio/src/fs/file/tests.rs
Expand Up @@ -228,6 +228,7 @@ fn flush_while_idle() {
}

#[test]
#[cfg_attr(miri, ignore)] // takes a really long time with miri
fn read_with_buffer_larger_than_max() {
// Chunks
let chunk_a = 16 * 1024;
Expand Down Expand Up @@ -299,6 +300,7 @@ fn read_with_buffer_larger_than_max() {
}

#[test]
#[cfg_attr(miri, ignore)] // takes a really long time with miri
fn write_with_buffer_larger_than_max() {
// Chunks
let chunk_a = 16 * 1024;
Expand Down
1 change: 1 addition & 0 deletions tokio/src/process/unix/orphan.rs
Expand Up @@ -280,6 +280,7 @@ pub(crate) mod test {
drop(signal_guard);
}

#[cfg_attr(miri, ignore)] // Miri does not support epoll.
#[test]
fn does_not_register_signal_if_queue_empty() {
let signal_driver = IoDriver::new().and_then(SignalDriver::new).unwrap();
Expand Down
7 changes: 6 additions & 1 deletion tokio/src/runtime/task/harness.rs
Expand Up @@ -26,6 +26,10 @@ where
}
}

fn header_ptr(&self) -> NonNull<Header> {
self.cell.cast()
}

fn header(&self) -> &Header {
unsafe { &self.cell.as_ref().header }
}
Expand Down Expand Up @@ -93,7 +97,8 @@ where

match self.header().state.transition_to_running() {
TransitionToRunning::Success => {
let waker_ref = waker_ref::<T, S>(self.header());
let header_ptr = self.header_ptr();
let waker_ref = waker_ref::<T, S>(&header_ptr);
let cx = Context::from_waker(&*waker_ref);
let res = poll_future(&self.core().stage, cx);

Expand Down
4 changes: 2 additions & 2 deletions tokio/src/runtime/task/mod.rs
Expand Up @@ -313,7 +313,7 @@ cfg_rt_multi_thread! {

impl<S: 'static> Task<S> {
fn into_raw(self) -> NonNull<Header> {
let ret = self.header().into();
let ret = self.raw.header_ptr();
mem::forget(self);
ret
}
Expand Down Expand Up @@ -427,7 +427,7 @@ unsafe impl<S> linked_list::Link for Task<S> {
type Target = Header;

fn as_raw(handle: &Task<S>) -> NonNull<Header> {
handle.header().into()
handle.raw.header_ptr()
}

unsafe fn from_raw(ptr: NonNull<Header>) -> Task<S> {
Expand Down
4 changes: 4 additions & 0 deletions tokio/src/runtime/task/raw.rs
Expand Up @@ -57,6 +57,10 @@ impl RawTask {
RawTask { ptr }
}

pub(super) fn header_ptr(&self) -> NonNull<Header> {
self.ptr
}

/// Returns a reference to the task's meta structure.
///
/// Safe as `Header` is `Sync`.
Expand Down
10 changes: 5 additions & 5 deletions tokio/src/runtime/task/waker.rs
Expand Up @@ -15,7 +15,7 @@ pub(super) struct WakerRef<'a, S: 'static> {

/// Returns a `WakerRef` which avoids having to pre-emptively increase the
/// refcount if there is no need to do so.
pub(super) fn waker_ref<T, S>(header: &Header) -> WakerRef<'_, S>
pub(super) fn waker_ref<T, S>(header: &NonNull<Header>) -> WakerRef<'_, S>
hawkw marked this conversation as resolved.
Show resolved Hide resolved
where
T: Future,
S: Schedule,
Expand All @@ -28,7 +28,7 @@ where
// point and not an *owned* waker, we must ensure that `drop` is never
// called on this waker instance. This is done by wrapping it with
// `ManuallyDrop` and then never calling drop.
let waker = unsafe { ManuallyDrop::new(Waker::from_raw(raw_waker::<T, S>(header))) };
let waker = unsafe { ManuallyDrop::new(Waker::from_raw(raw_waker::<T, S>(*header))) };

WakerRef {
waker,
Expand Down Expand Up @@ -77,7 +77,7 @@ where
let harness = Harness::<T, S>::from_raw(ptr);
trace!(harness, "waker.clone");
(*header).state.ref_inc();
raw_waker::<T, S>(header)
raw_waker::<T, S>(ptr)
}

unsafe fn drop_waker<T, S>(ptr: *const ())
Expand Down Expand Up @@ -114,12 +114,12 @@ where
harness.wake_by_ref();
}

fn raw_waker<T, S>(header: *const Header) -> RawWaker
fn raw_waker<T, S>(header: NonNull<Header>) -> RawWaker
where
T: Future,
S: Schedule,
{
let ptr = header as *const ();
let ptr = header.as_ptr() as *const ();
let vtable = &RawWakerVTable::new(
clone_waker::<T, S>,
wake_by_val::<T, S>,
Expand Down
20 changes: 14 additions & 6 deletions tokio/src/runtime/tests/queue.rs
Expand Up @@ -101,13 +101,21 @@ fn steal_batch() {
assert!(local1.pop().is_none());
}

const fn normal_or_miri(normal: usize, miri: usize) -> usize {
if cfg!(miri) {
miri
} else {
normal
}
}
hawkw marked this conversation as resolved.
Show resolved Hide resolved

#[test]
fn stress1() {
const NUM_ITER: usize = 1;
const NUM_STEAL: usize = 1_000;
const NUM_LOCAL: usize = 1_000;
const NUM_PUSH: usize = 500;
const NUM_POP: usize = 250;
const NUM_STEAL: usize = normal_or_miri(1_000, 10);
const NUM_LOCAL: usize = normal_or_miri(1_000, 10);
const NUM_PUSH: usize = normal_or_miri(500, 10);
const NUM_POP: usize = normal_or_miri(250, 10);

let mut metrics = MetricsBatch::new();

Expand Down Expand Up @@ -169,8 +177,8 @@ fn stress1() {
#[test]
fn stress2() {
const NUM_ITER: usize = 1;
const NUM_TASKS: usize = 1_000_000;
const NUM_STEAL: usize = 1_000;
const NUM_TASKS: usize = normal_or_miri(1_000_000, 50);
const NUM_STEAL: usize = normal_or_miri(1_000, 10);

let mut metrics = MetricsBatch::new();

Expand Down
5 changes: 3 additions & 2 deletions tokio/src/sync/notify.rs
Expand Up @@ -130,6 +130,7 @@ enum NotificationType {
}

#[derive(Debug)]
#[repr(C)] // required by `linked_list::Link` impl
Darksonn marked this conversation as resolved.
Show resolved Hide resolved
struct Waiter {
/// Intrusive linked-list pointers.
pointers: linked_list::Pointers<Waiter>,
Expand Down Expand Up @@ -731,8 +732,8 @@ unsafe impl linked_list::Link for Waiter {
ptr
}

unsafe fn pointers(mut target: NonNull<Waiter>) -> NonNull<linked_list::Pointers<Waiter>> {
NonNull::from(&mut target.as_mut().pointers)
unsafe fn pointers(target: NonNull<Waiter>) -> NonNull<linked_list::Pointers<Waiter>> {
target.cast()
}
}

Expand Down
1 change: 1 addition & 0 deletions tokio/src/time/driver/mod.rs
Expand Up @@ -525,4 +525,5 @@ impl fmt::Debug for Inner {
}

#[cfg(test)]
#[cfg(not(miri))]
mod tests;
5 changes: 3 additions & 2 deletions tokio/src/util/linked_list.rs
Expand Up @@ -352,6 +352,7 @@ mod tests {
use std::pin::Pin;

#[derive(Debug)]
#[repr(C)]
struct Entry {
pointers: Pointers<Entry>,
val: i32,
Expand All @@ -369,8 +370,8 @@ mod tests {
Pin::new_unchecked(&*ptr.as_ptr())
}

unsafe fn pointers(mut target: NonNull<Entry>) -> NonNull<Pointers<Entry>> {
NonNull::from(&mut target.as_mut().pointers)
unsafe fn pointers(target: NonNull<Entry>) -> NonNull<Pointers<Entry>> {
target.cast()
}
}

Expand Down
55 changes: 42 additions & 13 deletions tokio/src/util/slab.rs
Expand Up @@ -157,6 +157,10 @@ struct Slot<T> {

/// Next entry in the free list.
next: u32,

/// Makes miri happy. Could probably also be fixed by replacing `slots` with a raw-pointer
Darksonn marked this conversation as resolved.
Show resolved Hide resolved
/// based equivalent.
_pin: std::marker::PhantomPinned,
}

/// Value paired with a reference to the page.
Expand Down Expand Up @@ -409,7 +413,7 @@ impl<T: Entry> Page<T> {
slot.value.with(|ptr| unsafe { (*ptr).value.reset() });

// Return a reference to the slot
Some((me.addr(idx), slot.gen_ref(me)))
Some((me.addr(idx), locked.gen_ref(idx, me)))
} else if me.len == locked.slots.len() {
// The page is full
None
Expand All @@ -428,9 +432,10 @@ impl<T: Entry> Page<T> {
locked.slots.push(Slot {
value: UnsafeCell::new(Value {
value: Default::default(),
page: &**me as *const _,
page: Arc::as_ptr(me),
}),
next: 0,
_pin: std::marker::PhantomPinned,
});

// Increment the head to indicate the free stack is empty
Expand All @@ -443,7 +448,7 @@ impl<T: Entry> Page<T> {

debug_assert_eq!(locked.slots.len(), locked.head);

Some((me.addr(idx), locked.slots[idx].gen_ref(me)))
Some((me.addr(idx), locked.gen_ref(idx, me)))
}
}
}
Expand Down Expand Up @@ -558,8 +563,21 @@ impl<T> Slots<T> {

idx
}

/// Generates a `Ref` for the slot at the given index. This involves bumping the page's ref count.
fn gen_ref(&self, idx: usize, page: &Arc<Page<T>>) -> Ref<T> {
assert!(idx < self.slots.len());
mem::forget(page.clone());

let vec_ptr = self.slots.as_ptr();
let slot: *const Slot<T> = unsafe { vec_ptr.add(idx) };
let value: *const Value<T> = slot as *const Value<T>;

Ref { value }
}
}

/*
impl<T: Entry> Slot<T> {
/// Generates a `Ref` for the slot. This involves bumping the page's ref count.
fn gen_ref(&self, page: &Arc<Page<T>>) -> Ref<T> {
Expand All @@ -574,6 +592,7 @@ impl<T: Entry> Slot<T> {
Ref { value }
}
}
*/

impl<T> Value<T> {
/// Releases the slot, returning the `Arc<Page<T>>` logically owned by the ref.
Expand Down Expand Up @@ -691,11 +710,13 @@ mod test {

#[test]
fn insert_many() {
const MANY: usize = normal_or_miri(10_000, 50);

let mut slab = Slab::<Foo>::new();
let alloc = slab.allocator();
let mut entries = vec![];

for i in 0..10_000 {
for i in 0..MANY {
let (addr, val) = alloc.allocate().unwrap();
val.id.store(i, SeqCst);
entries.push((addr, val));
Expand All @@ -708,15 +729,15 @@ mod test {

entries.clear();

for i in 0..10_000 {
for i in 0..MANY {
let (addr, val) = alloc.allocate().unwrap();
val.id.store(10_000 - i, SeqCst);
val.id.store(MANY - i, SeqCst);
entries.push((addr, val));
}

for (i, (addr, v)) in entries.iter().enumerate() {
assert_eq!(10_000 - i, v.id.load(SeqCst));
assert_eq!(10_000 - i, slab.get(*addr).unwrap().id.load(SeqCst));
assert_eq!(MANY - i, v.id.load(SeqCst));
assert_eq!(MANY - i, slab.get(*addr).unwrap().id.load(SeqCst));
}
}

Expand All @@ -726,15 +747,15 @@ mod test {
let alloc = slab.allocator();
let mut entries = vec![];

for i in 0..10_000 {
for i in 0..normal_or_miri(10_000, 100) {
let (addr, val) = alloc.allocate().unwrap();
val.id.store(i, SeqCst);
entries.push((addr, val));
}

for _ in 0..10 {
// Drop 1000 in reverse
for _ in 0..1_000 {
for _ in 0..normal_or_miri(1_000, 10) {
entries.pop();
}

Expand All @@ -753,7 +774,7 @@ mod test {
let mut entries1 = vec![];
let mut entries2 = vec![];

for i in 0..10_000 {
for i in 0..normal_or_miri(10_000, 100) {
let (addr, val) = alloc.allocate().unwrap();
val.id.store(i, SeqCst);

Expand All @@ -771,6 +792,14 @@ mod test {
}
}

const fn normal_or_miri(normal: usize, miri: usize) -> usize {
if cfg!(miri) {
miri
} else {
normal
}
}

#[test]
fn compact_all() {
let mut slab = Slab::<Foo>::new();
Expand All @@ -780,7 +809,7 @@ mod test {
for _ in 0..2 {
entries.clear();

for i in 0..10_000 {
for i in 0..normal_or_miri(10_000, 100) {
let (addr, val) = alloc.allocate().unwrap();
val.id.store(i, SeqCst);

Expand Down Expand Up @@ -808,7 +837,7 @@ mod test {
let alloc = slab.allocator();
let mut entries = vec![];

for _ in 0..5 {
for _ in 0..normal_or_miri(5, 2) {
entries.clear();

// Allocate a few pages + 1
Expand Down