From 86ce2eaf28757150fc7407a4ea8a59a640dbbc48 Mon Sep 17 00:00:00 2001 From: Jerome Froelich Date: Mon, 22 Jan 2018 20:49:24 -0500 Subject: [PATCH] Implement Send and Sync for LruCache --- Cargo.toml | 3 +++ src/lib.rs | 66 +++++++++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 64 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7e44d84..76e100c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,3 +14,6 @@ keywords = ["LRU", "cache"] nightly = [] [dependencies] + +[dev-dependencies] +scoped_threadpool = "0.1.*" diff --git a/src/lib.rs b/src/lib.rs index d2beccd..894f14d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -55,6 +55,9 @@ //! } //! ``` +#[cfg(test)] +extern crate scoped_threadpool; + use std::mem; use std::ptr; use std::hash::{Hash, Hasher}; @@ -119,7 +122,9 @@ impl LruCache { /// let mut cache: LruCache = LruCache::new(10); /// ``` pub fn new(cap: usize) -> LruCache { - let mut cache = LruCache { + // NB: The compiler warns that cache does not need to be marked as mutable if we + // declare it as such since we only mutate it inside the unsafe block. + let cache = LruCache { map: HashMap::with_capacity(cap), cap: cap, head: unsafe { Box::into_raw(Box::new(mem::uninitialized::>())) }, @@ -164,7 +169,9 @@ impl LruCache { None => { let mut node = if self.len() == self.cap() { // if the cache is full, remove the last entry so we can use it for the new key - let old_key = KeyRef { k: unsafe { &(*(*self.tail).prev).key } }; + let old_key = KeyRef { + k: unsafe { &(*(*self.tail).prev).key }, + }; let mut old_node = self.map.remove(&old_key).unwrap(); old_node.key = k; @@ -456,7 +463,9 @@ impl LruCache { let prev; unsafe { prev = (*self.tail).prev } if prev != self.head { - let old_key = KeyRef { k: unsafe { &(*(*self.tail).prev).key } }; + let old_key = KeyRef { + k: unsafe { &(*(*self.tail).prev).key }, + }; let mut old_node = self.map.remove(&old_key).unwrap(); let node_ptr: *mut LruEntry = &mut *old_node; self.detach(node_ptr); @@ -491,8 +500,18 @@ impl Drop for LruCache { let head = *Box::from_raw(self.head); let tail = *Box::from_raw(self.tail); - let LruEntry { next: _, prev: _, key: head_key, val: head_val } = head; - let LruEntry { next: _, prev: _, key: tail_key, val: tail_val } = tail; + let LruEntry { + next: _, + prev: _, + key: head_key, + val: head_val, + } = head; + let LruEntry { + next: _, + prev: _, + key: tail_key, + val: tail_val, + } = tail; mem::forget(head_key); mem::forget(head_val); @@ -502,10 +521,17 @@ impl Drop for LruCache { } } +// The compiler does not automatically derive Send and Sync for LruCache because it contains +// raw pointers. The raw pointers are safely encapsulated by LruCache though so we can +// implement Send and Sync for it below. +unsafe impl Send for LruCache {} +unsafe impl Sync for LruCache {} + #[cfg(test)] mod tests { use std::fmt::Debug; use super::LruCache; + use scoped_threadpool::Pool; fn assert_opt_eq(opt: Option<&V>, v: V) { assert!(opt.is_some()); @@ -690,4 +716,34 @@ mod tests { assert_eq!(cache.get(&3), Some(&"c")); assert_eq!(cache.get(&4), Some(&"d")); } + + #[test] + fn test_send() { + use std::thread; + + let mut cache = LruCache::new(4); + cache.put(1, "a"); + + let handle = thread::spawn(move || { + assert_eq!(cache.get(&1), Some(&"a")); + }); + + assert!(handle.join().is_ok()); + } + + #[test] + fn test_sync() { + let mut pool = Pool::new(1); + let mut cache = LruCache::new(4); + cache.put(1, "a"); + + let cache_ref = &cache; + pool.scoped(|scoped| { + scoped.execute(move || { + assert_eq!(cache_ref.peek(&1), Some(&"a")); + }); + }); + + assert_eq!((cache_ref).peek(&1), Some(&"a")); + } }