From 619f7db2dfae7bd683d6ebb7e9aeb062409c54ed Mon Sep 17 00:00:00 2001 From: Vincent Esche Date: Wed, 7 Oct 2020 12:33:06 +0200 Subject: [PATCH] Fixed a few typos --- crossbeam-epoch/src/atomic.rs | 8 ++++---- crossbeam-epoch/src/internal.rs | 4 ++-- crossbeam-skiplist/src/base.rs | 8 ++++---- crossbeam-utils/src/cache_padded.rs | 2 +- crossbeam-utils/src/thread.rs | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/crossbeam-epoch/src/atomic.rs b/crossbeam-epoch/src/atomic.rs index e637071ef..517718745 100644 --- a/crossbeam-epoch/src/atomic.rs +++ b/crossbeam-epoch/src/atomic.rs @@ -749,7 +749,7 @@ pub trait Pointer { /// # Safety /// /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should - /// not be converted back by `Pointer::from_usize()` mutliple times. + /// not be converted back by `Pointer::from_usize()` multiple times. unsafe fn from_usize(data: usize) -> Self; } @@ -801,7 +801,7 @@ impl Owned { /// # Safety /// /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted - /// back by `Owned::from_raw()` mutliple times. + /// back by `Owned::from_raw()` multiple times. /// /// # Examples /// @@ -1108,7 +1108,7 @@ impl<'g, T: ?Sized + Pointable> Shared<'g, T> { /// /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. /// - /// Another concern is the possiblity of data races due to lack of proper synchronization. + /// Another concern is the possibility of data races due to lack of proper synchronization. /// For example, consider the following scenario: /// /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` @@ -1188,7 +1188,7 @@ impl<'g, T: ?Sized + Pointable> Shared<'g, T> { /// /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. /// - /// Another concern is the possiblity of data races due to lack of proper synchronization. + /// Another concern is the possibility of data races due to lack of proper synchronization. /// For example, consider the following scenario: /// /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` diff --git a/crossbeam-epoch/src/internal.rs b/crossbeam-epoch/src/internal.rs index 6c1e98f4c..9976ffe86 100644 --- a/crossbeam-epoch/src/internal.rs +++ b/crossbeam-epoch/src/internal.rs @@ -29,7 +29,7 @@ //! Whenever a bag is pushed into a queue, the objects in some bags in the queue are collected and //! destroyed along the way. This design reduces contention on data structures. The global queue //! cannot be explicitly accessed: the only way to interact with it is by calling functions -//! `defer()` that adds an object tothe thread-local bag, or `collect()` that manually triggers +//! `defer()` that adds an object to the thread-local bag, or `collect()` that manually triggers //! garbage collection. //! //! Ideally each instance of concurrent data structure may have its own queue that gets fully @@ -368,7 +368,7 @@ pub struct Local { /// Total number of pinnings performed. /// - /// This is just an auxilliary counter that sometimes kicks off collection. + /// This is just an auxiliary counter that sometimes kicks off collection. pin_count: Cell>, } diff --git a/crossbeam-skiplist/src/base.rs b/crossbeam-skiplist/src/base.rs index 92eb2aaa4..cc617f8fa 100644 --- a/crossbeam-skiplist/src/base.rs +++ b/crossbeam-skiplist/src/base.rs @@ -226,7 +226,7 @@ impl Node { } } - /// Decrements the reference count of a node, pinning the thread and destoying the node + /// Decrements the reference count of a node, pinning the thread and destroying the node /// if the count become zero. #[inline] unsafe fn decrement_with_pin(&self, parent: &SkipList, pin: F) @@ -1157,7 +1157,7 @@ where loop { { - // Search for the first entry in order to unlink all the preceeding entries + // Search for the first entry in order to unlink all the preceding entries // we have removed. // // By unlinking nodes in batches we make sure that the final search doesn't @@ -1933,7 +1933,7 @@ where pub struct IntoIter { /// The current node. /// - /// All preceeding nods have already been destroyed. + /// All preceding nods have already been destroyed. node: *mut Node, } @@ -1946,7 +1946,7 @@ impl Drop for IntoIter { // the skip list. let next = (*self.node).tower[0].load(Ordering::Relaxed, epoch::unprotected()); - // We can safely do this without defering because references to + // We can safely do this without deferring because references to // keys & values that we give out never outlive the SkipList. Node::finalize(self.node); diff --git a/crossbeam-utils/src/cache_padded.rs b/crossbeam-utils/src/cache_padded.rs index 8998f97cb..62c686b7e 100644 --- a/crossbeam-utils/src/cache_padded.rs +++ b/crossbeam-utils/src/cache_padded.rs @@ -4,7 +4,7 @@ use core::ops::{Deref, DerefMut}; /// Pads and aligns a value to the length of a cache line. /// /// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of -/// data are not placed into the same cache line. Updating an atomic value invalides the whole +/// data are not placed into the same cache line. Updating an atomic value invalidates the whole /// cache line it belongs to, which makes the next access to the same cache line slower for other /// CPU cores. Use `CachePadded` to ensure updating one piece of data doesn't invalidate other /// cached data. diff --git a/crossbeam-utils/src/thread.rs b/crossbeam-utils/src/thread.rs index 393d0d607..ab91be72d 100644 --- a/crossbeam-utils/src/thread.rs +++ b/crossbeam-utils/src/thread.rs @@ -442,7 +442,7 @@ impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> { *result.lock().unwrap() = Some(res); }; - // Allocate `clsoure` on the heap and erase the `'env` bound. + // Allocate `closure` on the heap and erase the `'env` bound. let closure: Box = Box::new(closure); let closure: Box = unsafe { mem::transmute(closure) };