From 02a2103dc62a330ac4bd8d1d7dc52062c9a1a9ee Mon Sep 17 00:00:00 2001 From: Joshua Liebow-Feeser Date: Wed, 18 Aug 2021 20:32:17 +0000 Subject: [PATCH] Fix unsoundness in :: next_u32 Previously, certain implementations of `BlockRngCore` could cause `BlockRng64`'s implementation of `RngCore::next_u32` to exhibit undefined behavior (UB). This removes that UB by adding a bounds check, and adds documentation to `BlockRngCore::Results` clarifying requirements for that type. While we're here, run `rustfmt` on existing code. Fixes #1158 --- rand_core/src/block.rs | 51 ++++++++++++++++++++++++++++++------------ rand_core/src/error.rs | 9 ++++---- rand_core/src/impls.rs | 8 +++---- rand_core/src/lib.rs | 2 +- 4 files changed, 46 insertions(+), 24 deletions(-) diff --git a/rand_core/src/block.rs b/rand_core/src/block.rs index 5f48bfaf546..43106439b56 100644 --- a/rand_core/src/block.rs +++ b/rand_core/src/block.rs @@ -57,8 +57,7 @@ use crate::impls::{fill_via_u32_chunks, fill_via_u64_chunks}; use crate::{CryptoRng, Error, RngCore, SeedableRng}; use core::convert::AsRef; use core::fmt; -#[cfg(feature = "serde1")] -use serde::{Deserialize, Serialize}; +#[cfg(feature = "serde1")] use serde::{Deserialize, Serialize}; /// A trait for RNGs which do not generate random numbers individually, but in /// blocks (typically `[u32; N]`). This technique is commonly used by @@ -71,6 +70,12 @@ pub trait BlockRngCore { /// Results type. This is the 'block' an RNG implementing `BlockRngCore` /// generates, which will usually be an array like `[u32; 16]`. + /// + /// # Panics + /// + /// Code which uses this type is allowed to assume that `Results` is a type + /// with at least one element. It may panic or exhibit incorrect behavior + /// if this is not the case. It may *not* exhibit undefined behavior. type Results: AsRef<[Self::Item]> + AsMut<[Self::Item]> + Default; /// Generate a new block of results. @@ -179,8 +184,7 @@ impl BlockRng { } impl> RngCore for BlockRng -where - ::Results: AsRef<[u32]> + AsMut<[u32]>, +where ::Results: AsRef<[u32]> + AsMut<[u32]> { #[inline] fn next_u32(&mut self) -> u32 { @@ -347,12 +351,11 @@ impl BlockRng64 { } impl> RngCore for BlockRng64 -where - ::Results: AsRef<[u64]> + AsMut<[u64]>, +where ::Results: AsRef<[u64]> + AsMut<[u64]> { #[inline] fn next_u32(&mut self) -> u32 { - let mut index = self.index * 2 - self.half_used as usize; + let mut index = self.index * 2 - usize::from(self.half_used); if index >= self.results.as_ref().len() * 2 { self.core.generate(&mut self.results); self.index = 0; @@ -364,14 +367,34 @@ where self.half_used = !self.half_used; self.index += self.half_used as usize; - // Index as if this is a u32 slice. + let index = if cfg!(target_endian = "little") { + index + } else { + index ^ 1 + }; + + let results = self.results.as_ref(); + // Convert the `&[u64]` into an equivalent `&[u32]`, which has twice as + // many elements. The conversion is sound because `u64` never has a + // smaller alignment requirement than `u32`. + let ptr = results.as_ptr() as *const u32; + // We are guaranteed that this multiplication can't overflow. If it did + // overflow, that would mean that there were at least 2^(word size) + // 32-bit values in the slice, and thus 4 * 2^(word size) bytes. There + // can only ever be at most 2^(word size) - 1 addressable bytes in + // memory at a time, so we could never be in this situation. + let len = results.len() * 2; + unsafe { - let results = &*(self.results.as_ref() as *const [u64] as *const [u32]); - if cfg!(target_endian = "little") { - *results.get_unchecked(index) - } else { - *results.get_unchecked(index ^ 1) - } + let results = core::slice::from_raw_parts(ptr, len); + // Note that this bounds check is required for soundness. There's no + // guarantee that `<::Results as AsRef>::as_ref`, + // which is supplied by code outside of this crate, returns a slice + // of any particular length, or even that two subsequent calls to it + // return slices of the same length. Thus, it would be unsound to + // assume that this index is in-bounds and to elide the bounds + // check. + results[index] } } diff --git a/rand_core/src/error.rs b/rand_core/src/error.rs index a64c430da8b..b3cc4dfa547 100644 --- a/rand_core/src/error.rs +++ b/rand_core/src/error.rs @@ -50,9 +50,7 @@ impl Error { #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] #[inline] pub fn new(err: E) -> Self - where - E: Into>, - { + where E: Into> { Error { inner: err.into() } } @@ -223,6 +221,9 @@ mod test { fn test_error_codes() { // Make sure the values are the same as in `getrandom`. assert_eq!(super::Error::CUSTOM_START, getrandom::Error::CUSTOM_START); - assert_eq!(super::Error::INTERNAL_START, getrandom::Error::INTERNAL_START); + assert_eq!( + super::Error::INTERNAL_START, + getrandom::Error::INTERNAL_START + ); } } diff --git a/rand_core/src/impls.rs b/rand_core/src/impls.rs index 2588a72ea3f..498b89c769d 100644 --- a/rand_core/src/impls.rs +++ b/rand_core/src/impls.rs @@ -65,17 +65,15 @@ macro_rules! fill_via_chunks { core::ptr::copy_nonoverlapping( $src.as_ptr() as *const u8, $dst.as_mut_ptr(), - chunk_size_u8); + chunk_size_u8, + ); } } else { for (&n, chunk) in $src.iter().zip($dst.chunks_mut(SIZE)) { let tmp = n.to_le(); let src_ptr = &tmp as *const $ty as *const u8; unsafe { - core::ptr::copy_nonoverlapping( - src_ptr, - chunk.as_mut_ptr(), - chunk.len()); + core::ptr::copy_nonoverlapping(src_ptr, chunk.as_mut_ptr(), chunk.len()); } } } diff --git a/rand_core/src/lib.rs b/rand_core/src/lib.rs index bc24270771b..74478ac1602 100644 --- a/rand_core/src/lib.rs +++ b/rand_core/src/lib.rs @@ -41,8 +41,8 @@ use core::convert::AsMut; use core::default::Default; -#[cfg(feature = "std")] extern crate std; #[cfg(feature = "alloc")] extern crate alloc; +#[cfg(feature = "std")] extern crate std; #[cfg(feature = "alloc")] use alloc::boxed::Box; pub use error::Error;