diff --git a/src/map.rs b/src/map.rs index 47067cfbfc..a5d3ccb97e 100644 --- a/src/map.rs +++ b/src/map.rs @@ -8361,4 +8361,48 @@ mod test_map { let ys = map.get_many_key_value_mut(["baz", "baz"]); assert_eq!(ys, None); } + + #[test] + #[should_panic = "panic in drop"] + fn test_clone_from_double_drop() { + #[derive(Clone)] + struct CheckedDrop { + panic_in_drop: bool, + dropped: bool, + } + impl Drop for CheckedDrop { + fn drop(&mut self) { + if self.panic_in_drop { + self.dropped = true; + panic!("panic in drop"); + } + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + const DISARMED: CheckedDrop = CheckedDrop { + panic_in_drop: false, + dropped: false, + }; + const ARMED: CheckedDrop = CheckedDrop { + panic_in_drop: true, + dropped: false, + }; + + let mut map1 = HashMap::new(); + map1.insert(1, DISARMED); + map1.insert(2, DISARMED); + map1.insert(3, DISARMED); + map1.insert(4, DISARMED); + + let mut map2 = HashMap::new(); + map2.insert(1, DISARMED); + map2.insert(2, ARMED); + map2.insert(3, DISARMED); + map2.insert(4, DISARMED); + + map2.clone_from(&map1); + } } diff --git a/src/raw/mod.rs b/src/raw/mod.rs index 8c902b6746..211b818a5f 100644 --- a/src/raw/mod.rs +++ b/src/raw/mod.rs @@ -1,5 +1,5 @@ use crate::alloc::alloc::{handle_alloc_error, Layout}; -use crate::scopeguard::guard; +use crate::scopeguard::{guard, ScopeGuard}; use crate::TryReserveError; use core::iter::FusedIterator; use core::marker::PhantomData; @@ -1602,25 +1602,27 @@ impl Clone for RawTable { Self::new_in(self.table.alloc.clone()) } else { unsafe { - let mut new_table = ManuallyDrop::new( - // Avoid `Result::ok_or_else` because it bloats LLVM IR. - match Self::new_uninitialized( - self.table.alloc.clone(), - self.table.buckets(), - Fallibility::Infallible, - ) { - Ok(table) => table, - Err(_) => hint::unreachable_unchecked(), - }, - ); - - new_table.clone_from_spec(self, |new_table| { - // We need to free the memory allocated for the new table. + // Avoid `Result::ok_or_else` because it bloats LLVM IR. + let new_table = match Self::new_uninitialized( + self.table.alloc.clone(), + self.table.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }; + + // If cloning fails then we need to free the allocation for the + // new table. However we don't run its drop since its control + // bytes are not initialized yet. + let mut guard = guard(ManuallyDrop::new(new_table), |new_table| { new_table.free_buckets(); }); - // Return the newly created table. - ManuallyDrop::into_inner(new_table) + guard.clone_from_spec(self); + + // Disarm the scope guard and return the newly created table. + ManuallyDrop::into_inner(ScopeGuard::into_inner(guard)) } } } @@ -1630,19 +1632,30 @@ impl Clone for RawTable { *self = Self::new_in(self.table.alloc.clone()); } else { unsafe { - // First, drop all our elements without clearing the control bytes. - self.drop_elements(); + // Make sure that if any panics occurs, we clear the table and + // leave it in an empty state. + let mut self_ = guard(self, |self_| { + self_.clear_no_drop(); + }); + + // First, drop all our elements without clearing the control + // bytes. If this panics then the scope guard will clear the + // table, leaking any elements that were not dropped yet. + // + // This leak is unavoidable: we can't try dropping more elements + // since this could lead to another panic and abort the process. + self_.drop_elements(); // If necessary, resize our table to match the source. - if self.buckets() != source.buckets() { + if self_.buckets() != source.buckets() { // Skip our drop by using ptr::write. - if !self.table.is_empty_singleton() { - self.free_buckets(); + if !self_.table.is_empty_singleton() { + self_.free_buckets(); } - (self as *mut Self).write( + (&mut **self_ as *mut Self).write( // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. match Self::new_uninitialized( - self.table.alloc.clone(), + self_.table.alloc.clone(), source.buckets(), Fallibility::Infallible, ) { @@ -1652,10 +1665,10 @@ impl Clone for RawTable { ); } - self.clone_from_spec(source, |self_| { - // We need to leave the table in an empty state. - self_.clear_no_drop(); - }); + self_.clone_from_spec(source); + + // Disarm the scope guard if cloning was successful. + ScopeGuard::into_inner(self_); } } } @@ -1663,20 +1676,20 @@ impl Clone for RawTable { /// Specialization of `clone_from` for `Copy` types trait RawTableClone { - unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)); + unsafe fn clone_from_spec(&mut self, source: &Self); } impl RawTableClone for RawTable { default_fn! { #[cfg_attr(feature = "inline-more", inline)] - unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) { - self.clone_from_impl(source, on_panic); + unsafe fn clone_from_spec(&mut self, source: &Self) { + self.clone_from_impl(source); } } } #[cfg(feature = "nightly")] impl RawTableClone for RawTable { #[cfg_attr(feature = "inline-more", inline)] - unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) { + unsafe fn clone_from_spec(&mut self, source: &Self) { source .table .ctrl(0) @@ -1691,9 +1704,12 @@ impl RawTableClone for RawTable { } impl RawTable { - /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`. + /// Common code for clone and clone_from. Assumes: + /// - `self.buckets() == source.buckets()`. + /// - Any existing elements have been dropped. + /// - The control bytes are not initialized yet. #[cfg_attr(feature = "inline-more", inline)] - unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) { + unsafe fn clone_from_impl(&mut self, source: &Self) { // Copy the control bytes unchanged. We do this in a single pass source .table @@ -1711,11 +1727,6 @@ impl RawTable { } } } - - // Depending on whether we were called from clone or clone_from, we - // either need to free the memory for the destination table or just - // clear the control bytes. - on_panic(self_); }); for from in source.iter() { diff --git a/src/scopeguard.rs b/src/scopeguard.rs index ccdc0c511e..f85e6ab0ed 100644 --- a/src/scopeguard.rs +++ b/src/scopeguard.rs @@ -1,5 +1,9 @@ // Extracted from the scopeguard crate -use core::ops::{Deref, DerefMut}; +use core::{ + mem, + ops::{Deref, DerefMut}, + ptr, +}; pub struct ScopeGuard where @@ -17,6 +21,27 @@ where ScopeGuard { dropfn, value } } +impl ScopeGuard +where + F: FnMut(&mut T), +{ + #[inline] + pub fn into_inner(guard: Self) -> T { + // Cannot move out of Drop-implementing types, so + // ptr::read the value and forget the guard. + unsafe { + let value = ptr::read(&guard.value); + // read the closure so that it is dropped, and assign it to a local + // variable to ensure that it is only dropped after the guard has + // been forgotten. (In case the Drop impl of the closure, or that + // of any consumed captured variable, panics). + let _dropfn = ptr::read(&guard.dropfn); + mem::forget(guard); + value + } + } +} + impl Deref for ScopeGuard where F: FnMut(&mut T),