From 1d0f40fcd5a5004f56dd7c16dedba064cf3087f1 Mon Sep 17 00:00:00 2001 From: ImmConCon <43708554+ImmemorConsultrixContrarie@users.noreply.github.com> Date: Thu, 23 Jan 2020 16:06:27 +0200 Subject: [PATCH 1/3] Rewrite boilerplate impls into macro Two problems: 1) I accidentally rustfmt it; 2) Usize type now has TYPENAME "usize" instead of "u32" or "u64". It could get back by implementing this trait for usize as a standalone with some "cfg!()"s. --- src/store.rs | 313 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 186 insertions(+), 127 deletions(-) diff --git a/src/store.rs b/src/store.rs index c73b75be..02ebe2b7 100644 --- a/src/store.rs +++ b/src/store.rs @@ -9,40 +9,20 @@ concrete bits in fundamental elements. They are implementation details, and are not exported in the prelude. !*/ -use crate::{ - access::BitAccess, - indices::BitIdx, - order::BitOrder, -}; +use crate::{access::BitAccess, indices::BitIdx, order::BitOrder}; use core::{ - convert::TryInto, - fmt::{ - Binary, - Debug, - Display, - LowerHex, - UpperHex, - }, - mem::size_of, - ops::{ - BitAnd, - BitAndAssign, - BitOr, - BitOrAssign, - Not, - Shl, - ShlAssign, - Shr, - ShrAssign, - }, - slice, + convert::TryInto, + fmt::{Binary, Debug, Display, LowerHex, UpperHex}, + mem::size_of, + ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not, Shl, ShlAssign, Shr, ShrAssign}, + slice, }; use radium::marker::BitOps; #[cfg(feature = "atomic")] -use core::sync::atomic; +use core::sync::atomic::{AtomicU16, AtomicU32, AtomicU64, AtomicU8, AtomicUsize}; #[cfg(not(feature = "atomic"))] use core::cell::Cell; @@ -293,149 +273,205 @@ be used in array types `[T; elts(len)]`. **/ #[doc(hidden)] pub const fn elts(bits: usize) -> usize { - let width: usize = size_of::() * 8; - bits / width + (bits % width != 0) as usize + let width: usize = size_of::() * 8; + bits / width + (bits % width != 0) as usize +} + +macro_rules! bitstore { + ($($T:ty => $Size:literal ; $Atom:ty)*) => { + $( + impl BitStore for $T { + const TYPENAME: &'static str = core::stringify!($T); + + const FALSE: Self = 0; + const TRUE: Self = !0; + + #[cfg(feature = "atomic")] + type Access = $Atom; + + #[cfg(not(feature = "atomic"))] + type Access = Cell; + + #[inline] + fn as_bytes(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } + } + + #[inline] + fn from_bytes(bytes: &[u8]) -> Self { + bytes + .try_into() + .map(Self::from_ne_bytes) + .expect(concat!("<", core::stringify!($T), " as BitStore>::from_bytes requires a slice of length ", $Size)) + } + + #[inline(always)] + fn count_ones(self) -> usize { + Self::count_ones(self) as usize + } + } + )* + }; + + (#![$M:meta] $($T:ty => $Size:literal ; $Atom:ty)+) => { + $( + #[$M] + bitstore!($T => $Size ; $Atom); + )+ + }; } +bitstore! { + u8 => 1 ; AtomicU8 + u16 => 2 ; AtomicU16 + u32 => 4 ; AtomicU32 +} +bitstore! { + #![cfg(target_pointer_width = "32")] + usize => 4 ; AtomicUsize +} +bitstore! { + #![cfg(target_pointer_width = "64")] + u64 => 8 ; AtomicU64 + usize => 8 ; AtomicUsize +} + +/* impl BitStore for u8 { - const TYPENAME: &'static str = "u8"; + const TYPENAME: &'static str = "u8"; - const FALSE: Self = 0; - const TRUE: Self = !0; + const FALSE: Self = 0; + const TRUE: Self = !0; - #[cfg(feature = "atomic")] - type Access = atomic::AtomicU8; + #[cfg(feature = "atomic")] + type Access = atomic::AtomicU8; - #[cfg(not(feature = "atomic"))] - type Access = Cell; + #[cfg(not(feature = "atomic"))] + type Access = Cell; - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, 1) } - } + #[inline] + fn as_bytes(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self as *const Self as *const u8, 1) } + } - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of length 1") - } + #[inline] + fn from_bytes(bytes: &[u8]) -> Self { + bytes + .try_into() + .map(Self::from_ne_bytes) + .expect("::from_bytes requires a slice of length 1") + } } impl BitStore for u16 { - const TYPENAME: &'static str = "u16"; + const TYPENAME: &'static str = "u16"; - const FALSE: Self = 0; - const TRUE: Self = !0; + const FALSE: Self = 0; + const TRUE: Self = !0; - #[cfg(feature = "atomic")] - type Access = atomic::AtomicU16; + #[cfg(feature = "atomic")] + type Access = atomic::AtomicU16; - #[cfg(not(feature = "atomic"))] - type Access = Cell; + #[cfg(not(feature = "atomic"))] + type Access = Cell; - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, 2) } - } + #[inline] + fn as_bytes(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self as *const Self as *const u8, 2) } + } - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of length 2") - } + #[inline] + fn from_bytes(bytes: &[u8]) -> Self { + bytes + .try_into() + .map(Self::from_ne_bytes) + .expect("::from_bytes requires a slice of length 2") + } } impl BitStore for u32 { - const TYPENAME: &'static str = "u32"; + const TYPENAME: &'static str = "u32"; - const FALSE: Self = 0; - const TRUE: Self = !0; + const FALSE: Self = 0; + const TRUE: Self = !0; - #[cfg(feature = "atomic")] - type Access = atomic::AtomicU32; + #[cfg(feature = "atomic")] + type Access = atomic::AtomicU32; - #[cfg(not(feature = "atomic"))] - type Access = Cell; + #[cfg(not(feature = "atomic"))] + type Access = Cell; - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, 4) } - } + #[inline] + fn as_bytes(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self as *const Self as *const u8, 4) } + } - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of length 4") - } + #[inline] + fn from_bytes(bytes: &[u8]) -> Self { + bytes + .try_into() + .map(Self::from_ne_bytes) + .expect("::from_bytes requires a slice of length 4") + } } #[cfg(target_pointer_width = "64")] impl BitStore for u64 { - const TYPENAME: &'static str = "u64"; + const TYPENAME: &'static str = "u64"; - const FALSE: Self = 0; - const TRUE: Self = !0; + const FALSE: Self = 0; + const TRUE: Self = !0; - #[cfg(feature = "atomic")] - type Access = atomic::AtomicU64; + #[cfg(feature = "atomic")] + type Access = atomic::AtomicU64; - #[cfg(not(feature = "atomic"))] - type Access = Cell; + #[cfg(not(feature = "atomic"))] + type Access = Cell; - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, 8) } - } + #[inline] + fn as_bytes(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self as *const Self as *const u8, 8) } + } - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of length 8") - } + #[inline] + fn from_bytes(bytes: &[u8]) -> Self { + bytes + .try_into() + .map(Self::from_ne_bytes) + .expect("::from_bytes requires a slice of length 8") + } } impl BitStore for usize { - #[cfg(target_pointer_width = "32")] - const TYPENAME: &'static str = "u32"; + #[cfg(target_pointer_width = "32")] + const TYPENAME: &'static str = "u32"; - #[cfg(target_pointer_width = "64")] - const TYPENAME: &'static str = "u64"; + #[cfg(target_pointer_width = "64")] + const TYPENAME: &'static str = "u64"; - const FALSE: Self = 0; - const TRUE: Self = !0; + const FALSE: Self = 0; + const TRUE: Self = !0; - #[cfg(feature = "atomic")] - type Access = atomic::AtomicUsize; + #[cfg(feature = "atomic")] + type Access = atomic::AtomicUsize; - #[cfg(not(feature = "atomic"))] - type Access = Cell; + #[cfg(not(feature = "atomic"))] + type Access = Cell; - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { - slice::from_raw_parts( - self as *const Self as *const u8, - size_of::(), - ) - } - } + #[inline] + fn as_bytes(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } + } - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of its exact width in bytes") - } + #[inline] + fn from_bytes(bytes: &[u8]) -> Self { + bytes + .try_into() + .map(Self::from_ne_bytes) + .expect("::from_bytes requires a slice of its exact width in bytes") + } } +*/ #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] compile_fail!("This architecture is currently not supported. File an issue at https://github.com/myrrlyn/bitvec"); @@ -450,6 +486,28 @@ private, this trait effectively forbids downstream implementation of the #[doc(hidden)] pub trait Sealed {} +macro_rules! seal { + ($($T:ty)*) => { + $( + impl Sealed for $T {} + )* + }; + + (#![$M:meta] $($T:ty)+) => { + $( + #[$M] + seal!($T); + )+ + }; +} + +seal! {u8 u16 u32 usize} +seal! { + #![cfg(target_pointer_width = "64")] + u64 +} + +/* impl Sealed for u8 {} impl Sealed for u16 {} impl Sealed for u32 {} @@ -458,3 +516,4 @@ impl Sealed for u32 {} impl Sealed for u64 {} impl Sealed for usize {} +*/ From f2a3d8abb235a241d929ad32fde971a85c28db0a Mon Sep 17 00:00:00 2001 From: ImmConCon <43708554+ImmemorConsultrixContrarie@users.noreply.github.com> Date: Thu, 23 Jan 2020 17:57:29 +0200 Subject: [PATCH 2/3] Changed resize insides into pure ptr operations Rustfmt'ed again, whatever. Tested, all tests pass (except for those in "../traits.rs/fmt!" macro, they simply didn't compile (I dunno if those are tests or not at all). Now that unsafe is pretty strait, clean and should const-eval into one line actually. --- src/fields.rs | 1959 +++++++++++++++++++++++++------------------------ src/store.rs | 201 +---- 2 files changed, 1030 insertions(+), 1130 deletions(-) diff --git a/src/fields.rs b/src/fields.rs index 34f3ae9d..52ab1790 100644 --- a/src/fields.rs +++ b/src/fields.rs @@ -39,13 +39,10 @@ elements differently, for instance by calling `.to_be_bytes` before store and !*/ use crate::{ - access::BitAccess, - order::{ - Lsb0, - Msb0, - }, - slice::BitSlice, - store::BitStore, + access::BitAccess, + order::{Lsb0, Msb0}, + slice::BitSlice, + store::BitStore, }; use core::mem; @@ -53,11 +50,7 @@ use core::mem; use either::Either; #[cfg(feature = "alloc")] -use crate::{ - boxed::BitBox, - order::BitOrder, - vec::BitVec, -}; +use crate::{boxed::BitBox, order::BitOrder, vec::BitVec}; /** Permit a specific `BitSlice` to be used for C-style bitfield access. @@ -78,772 +71,824 @@ Methods should be called as `bits[start .. end].load_or_store()`, where the range subslice selects up to but no more than the `U::BITS` element width. **/ pub trait BitField { - /// Load the sequence of bits from `self` into the least-significant bits of - /// an element. - /// - /// This can load any fundamental type which implements `BitStore`. Other - /// Rust fundamental types which do not implement it must be recast - /// appropriately by the user. - /// - /// The default implementation of this function calls [`load_le`] on - /// little-endian byte-ordered CPUs, and [`load_be`] on big-endian - /// byte-ordered CPUs. - /// - /// # Parameters - /// - /// - `&self`: A read reference to some bits in memory. This slice must be - /// trimmed to have a width no more than the `U::BITS` width of the type - /// being loaded. This can be accomplished with range indexing on a larger - /// slice. - /// - /// # Returns - /// - /// A `U` value whose least `self.len()` significant bits are filled with - /// the bits of `self`. - /// - /// # Panics - /// - /// If `self` is empty, or wider than a single `U` element, this panics. - /// - /// [`load_be`]: #tymethod.load_be - /// [`load_le`]: #tymethod.load_le - fn load(&self) -> U - where U: BitStore { - #[cfg(target_endian = "little")] - return self.load_le(); - - #[cfg(target_endian = "big")] - return self.load_be(); - } - - /// Load from `self`, using little-endian element ordering. - /// - /// This function interprets a multi-element slice as having its least - /// significant chunk in the low memory address, and its most significant - /// chunk in the high memory address. Each element `T` is still interpreted - /// from individual bytes according to the local CPU ordering. - /// - /// # Parameters - /// - /// - `&self`: A read reference to some bits in memory. This slice must be - /// trimmed to have a width no more than the `U::BITS` width of the type - /// being loaded. This can be accomplished with range indexing on a larger - /// slice. - /// - /// # Returns - /// - /// A `U` value whose least `self.len()` significant bits are filled with - /// the bits of `self`. If `self` spans multiple `T` elements, then the - /// lowest-address `T` is interpreted as containing the least significant - /// bits of the `U` return value, and the highest-address `T` is interpreted - /// as containing its most significant bits. - /// - /// # Panics - /// - /// If `self` is empty, or wider than a single `U` element, this panics. - fn load_le(&self) -> U - where U: BitStore; - - /// Load from `self`, using big-endian element ordering. - /// - /// This function interprets a multi-element slice as having its most - /// significant chunk in the low memory address, and its least significant - /// chunk in the high memory address. Each element `T` is still interpreted - /// from individual bytes according to the local CPU ordering. - /// - /// # Parameters - /// - /// - `&self`: A read reference to some bits in memory. This slice must be - /// trimmed to have a width no more than the `U::BITS` width of the type - /// being loaded. This can be accomplished with range indexing on a larger - /// slice. - /// - /// # Returns - /// - /// A `U` value whose least `self.len()` significant bits are filled with - /// the bits of `self`. If `self` spans multiple `T` elements, then the - /// lowest-address `T` is interpreted as containing the most significant - /// bits of the `U` return value, and the highest-address `T` is interpreted - /// as containing its least significant bits. - fn load_be(&self) -> U - where U: BitStore; - - /// Stores a sequence of bits from the user into the domain of `self`. - /// - /// This can store any fundamental type which implements `BitStore`. Other - /// Rust fundamental types which do not implement it must be recast - /// appropriately by the user. - /// - /// The default implementation of this function calls [`store_le`] on - /// little-endian byte-ordered CPUs, and [`store_be`] on big-endian - /// byte-ordered CPUs. - /// - /// # Parameters - /// - /// - `&mut self`: A write reference to some bits in memory. This slice must - /// be trimmed to have a width no more than the `U::BITS` width of the - /// type being stored. This can be accomplished with range indexing on a - /// larger slice. - /// - `value`: A value, whose `self.len()` least significant bits will be - /// stored into `self`. - /// - /// # Behavior - /// - /// The `self.len()` least significant bits of `value` are written into the - /// domain of `self`. - /// - /// # Panics - /// - /// If `self` is empty, or wider than a single `U` element, this panics. - /// - /// [`store_be`]: #tymethod.store_be - /// [`store_le`]: #tymethod.store_le - fn store(&mut self, value: U) - where U: BitStore { - #[cfg(target_endian = "little")] - self.store_le(value); - - #[cfg(target_endian = "big")] - self.store_be(value); - } - - /// Store into `self`, using little-endian element ordering. - /// - /// This function interprets a multi-element slice as having its least - /// significant chunk in the low memory address, and its most significant - /// chunk in the high memory address. Each element `T` is still interpreted - /// from individual bytes according to the local CPU ordering. - /// - /// # Parameters - /// - /// - `&mut self`: A write reference to some bits in memory. This slice must - /// be trimmed to have a width no more than the `U::BITS` width of the - /// type being stored. This can be accomplished with range indexing on a - /// larger slice. - /// - `value`: A value, whose `self.len()` least significant bits will be - /// stored into `self`. - /// - /// # Behavior - /// - /// The `self.len()` least significant bits of `value` are written into the - /// domain of `self`. If `self` spans multiple `T` elements, then the - /// lowest-address `T` is interpreted as containing the least significant - /// bits of the `U` return value, and the highest-address `T` is interpreted - /// as containing its most significant bits. - /// - /// # Panics - /// - /// If `self` is empty, or wider than a single `U` element, this panics. - fn store_le(&mut self, value: U) - where U: BitStore; - - /// Store into `self`, using big-endian element ordering. - /// - /// This function interprets a multi-element slice as having its most - /// significant chunk in the low memory address, and its least significant - /// chunk in the high memory address. Each element `T` is still interpreted - /// from individual bytes according to the local CPU ordering. - /// - /// # Parameters - /// - /// - `&mut self`: A write reference to some bits in memory. This slice must - /// be trimmed to have a width no more than the `U::BITS` width of the - /// type being stored. This can be accomplished with range indexing on a - /// larger slice. - /// - `value`: A value, whose `self.len()` least significant bits will be - /// stored into `self`. - /// - /// # Behavior - /// - /// The `self.len()` least significant bits of `value` are written into the - /// domain of `self`. If `self` spans multiple `T` elements, then the - /// lowest-address `T` is interpreted as containing the most significant - /// bits of the `U` return value, and the highest-address `T` is interpreted - /// as containing its least significant bits. - /// - /// # Panics - /// - /// If `self` is empty, or wider than a single `U` element, this panics. - fn store_be(&mut self, value: U) - where U: BitStore; + /// Load the sequence of bits from `self` into the least-significant bits of + /// an element. + /// + /// This can load any fundamental type which implements `BitStore`. Other + /// Rust fundamental types which do not implement it must be recast + /// appropriately by the user. + /// + /// The default implementation of this function calls [`load_le`] on + /// little-endian byte-ordered CPUs, and [`load_be`] on big-endian + /// byte-ordered CPUs. + /// + /// # Parameters + /// + /// - `&self`: A read reference to some bits in memory. This slice must be + /// trimmed to have a width no more than the `U::BITS` width of the type + /// being loaded. This can be accomplished with range indexing on a larger + /// slice. + /// + /// # Returns + /// + /// A `U` value whose least `self.len()` significant bits are filled with + /// the bits of `self`. + /// + /// # Panics + /// + /// If `self` is empty, or wider than a single `U` element, this panics. + /// + /// [`load_be`]: #tymethod.load_be + /// [`load_le`]: #tymethod.load_le + fn load(&self) -> U + where + U: BitStore, + { + #[cfg(target_endian = "little")] + return self.load_le(); + + #[cfg(target_endian = "big")] + return self.load_be(); + } + + /// Load from `self`, using little-endian element ordering. + /// + /// This function interprets a multi-element slice as having its least + /// significant chunk in the low memory address, and its most significant + /// chunk in the high memory address. Each element `T` is still interpreted + /// from individual bytes according to the local CPU ordering. + /// + /// # Parameters + /// + /// - `&self`: A read reference to some bits in memory. This slice must be + /// trimmed to have a width no more than the `U::BITS` width of the type + /// being loaded. This can be accomplished with range indexing on a larger + /// slice. + /// + /// # Returns + /// + /// A `U` value whose least `self.len()` significant bits are filled with + /// the bits of `self`. If `self` spans multiple `T` elements, then the + /// lowest-address `T` is interpreted as containing the least significant + /// bits of the `U` return value, and the highest-address `T` is interpreted + /// as containing its most significant bits. + /// + /// # Panics + /// + /// If `self` is empty, or wider than a single `U` element, this panics. + fn load_le(&self) -> U + where + U: BitStore; + + /// Load from `self`, using big-endian element ordering. + /// + /// This function interprets a multi-element slice as having its most + /// significant chunk in the low memory address, and its least significant + /// chunk in the high memory address. Each element `T` is still interpreted + /// from individual bytes according to the local CPU ordering. + /// + /// # Parameters + /// + /// - `&self`: A read reference to some bits in memory. This slice must be + /// trimmed to have a width no more than the `U::BITS` width of the type + /// being loaded. This can be accomplished with range indexing on a larger + /// slice. + /// + /// # Returns + /// + /// A `U` value whose least `self.len()` significant bits are filled with + /// the bits of `self`. If `self` spans multiple `T` elements, then the + /// lowest-address `T` is interpreted as containing the most significant + /// bits of the `U` return value, and the highest-address `T` is interpreted + /// as containing its least significant bits. + fn load_be(&self) -> U + where + U: BitStore; + + /// Stores a sequence of bits from the user into the domain of `self`. + /// + /// This can store any fundamental type which implements `BitStore`. Other + /// Rust fundamental types which do not implement it must be recast + /// appropriately by the user. + /// + /// The default implementation of this function calls [`store_le`] on + /// little-endian byte-ordered CPUs, and [`store_be`] on big-endian + /// byte-ordered CPUs. + /// + /// # Parameters + /// + /// - `&mut self`: A write reference to some bits in memory. This slice must + /// be trimmed to have a width no more than the `U::BITS` width of the + /// type being stored. This can be accomplished with range indexing on a + /// larger slice. + /// - `value`: A value, whose `self.len()` least significant bits will be + /// stored into `self`. + /// + /// # Behavior + /// + /// The `self.len()` least significant bits of `value` are written into the + /// domain of `self`. + /// + /// # Panics + /// + /// If `self` is empty, or wider than a single `U` element, this panics. + /// + /// [`store_be`]: #tymethod.store_be + /// [`store_le`]: #tymethod.store_le + fn store(&mut self, value: U) + where + U: BitStore, + { + #[cfg(target_endian = "little")] + self.store_le(value); + + #[cfg(target_endian = "big")] + self.store_be(value); + } + + /// Store into `self`, using little-endian element ordering. + /// + /// This function interprets a multi-element slice as having its least + /// significant chunk in the low memory address, and its most significant + /// chunk in the high memory address. Each element `T` is still interpreted + /// from individual bytes according to the local CPU ordering. + /// + /// # Parameters + /// + /// - `&mut self`: A write reference to some bits in memory. This slice must + /// be trimmed to have a width no more than the `U::BITS` width of the + /// type being stored. This can be accomplished with range indexing on a + /// larger slice. + /// - `value`: A value, whose `self.len()` least significant bits will be + /// stored into `self`. + /// + /// # Behavior + /// + /// The `self.len()` least significant bits of `value` are written into the + /// domain of `self`. If `self` spans multiple `T` elements, then the + /// lowest-address `T` is interpreted as containing the least significant + /// bits of the `U` return value, and the highest-address `T` is interpreted + /// as containing its most significant bits. + /// + /// # Panics + /// + /// If `self` is empty, or wider than a single `U` element, this panics. + fn store_le(&mut self, value: U) + where + U: BitStore; + + /// Store into `self`, using big-endian element ordering. + /// + /// This function interprets a multi-element slice as having its most + /// significant chunk in the low memory address, and its least significant + /// chunk in the high memory address. Each element `T` is still interpreted + /// from individual bytes according to the local CPU ordering. + /// + /// # Parameters + /// + /// - `&mut self`: A write reference to some bits in memory. This slice must + /// be trimmed to have a width no more than the `U::BITS` width of the + /// type being stored. This can be accomplished with range indexing on a + /// larger slice. + /// - `value`: A value, whose `self.len()` least significant bits will be + /// stored into `self`. + /// + /// # Behavior + /// + /// The `self.len()` least significant bits of `value` are written into the + /// domain of `self`. If `self` spans multiple `T` elements, then the + /// lowest-address `T` is interpreted as containing the most significant + /// bits of the `U` return value, and the highest-address `T` is interpreted + /// as containing its least significant bits. + /// + /// # Panics + /// + /// If `self` is empty, or wider than a single `U` element, this panics. + fn store_be(&mut self, value: U) + where + U: BitStore; } impl BitField for BitSlice -where T: BitStore { - fn load_le(&self) -> U - where U: BitStore { - let len = self.len(); - if !(1 ..= U::BITS as usize).contains(&len) { - panic!("Cannot load {} bits from a {}-bit region", U::BITS, len); - } - - match self.bitptr().domain().splat() { - /* The live bits are in the interior of a single element. - - This path only needs to load the element, shift it right by the - distance from LSedge to the live region, and mask it for the length - of `self`. - */ - Either::Right((head, elt, _)) => - resize((elt.load() >> *head) & mask_for::(len)), - /* The live region touches at least one element edge. - - This block reads chunks from the slice memory into an accumulator, - from the most-significant chunk to the least-significant. Each read - must collect the live section of the chunk into a temporary, then - shift the accumulator left by the chunk’s bit width, then write the - temporary into the newly-vacated least significant bits of the - accumulator. - */ - Either::Left((head, body, tail)) => { - let mut accum = 0usize; - - // If the tail exists, it contains the most significant chunk - // of the value, on the LSedge side. - if let Some((tail, t)) = tail { - // Load, mask, resize, and store. No other data is present. - accum = resize(tail.load() & mask_for::(*t as usize)); - } - // Read the body elements, from high address to low, into the - // accumulator. - if let Some(elts) = body { - for elt in elts.iter().rev() { - let val: usize = resize(elt.load()); - accum <<= T::BITS; - accum |= val; - } - } - // If the head exists, it contains the least significant chunk - // of the value, on the MSedge side. - if let Some((h, head)) = head { - // Get the live region’s distance from the LSedge. - let lsedge = *h; - // Find the region width (MSedge to head). - let width = T::BITS - lsedge; - // Load the element, shift down to LSedge, and resize. - let val: usize = resize(head.load() >> lsedge); - accum <<= width; - accum |= val; - } - - resize(accum) - }, - } - } - - fn load_be(&self) -> U - where U: BitStore { - let len = self.len(); - if !(1 ..= U::BITS as usize).contains(&len) { - panic!("Cannot load {} bits from a {}-bit region", U::BITS, len); - } - - match self.bitptr().domain().splat() { - /* The live bits are in the interior of a single element. - - This path only needs to load the element, shift it right by the - distance from LSedge to the live region, and mask it for the length - of `self`. - */ - Either::Right((head, elt, _)) => - resize((elt.load() >> *head) & mask_for::(len)), - /* The live region touches at least one element edge. - - This block reads chunks from the slice memory into an accumulator, - from the most-significant chunk to the least-significant. Each read - must collect the live section of the chunk into a temporary, then - shift the accumulator left by the chunk’s width, then write the - temporary into the newly-vacated least significant bits of the - accumulator. - */ - Either::Left((head, body, tail)) => { - let mut accum = 0usize; - - // If the head exists, it contains the most significant chunk - // of the value, on the MSedge side. - if let Some((h, head)) = head { - // Load, move, resize, and store. No other data is present. - accum = resize(head.load() >> *h); - } - // Read the body elements, from low address to high, into the - // accumulator. - if let Some(elts) = body { - for elt in elts.iter() { - let val: usize = resize(elt.load()); - accum <<= T::BITS; - accum |= val; - } - } - // If the tail exists, it contains the least significant chunk - // of the value, on the LSedge side. - if let Some((tail, t)) = tail { - // Get the live region’s width. - let width = *t as usize; - // Load, mask, and resize. - let val: usize = resize(tail.load() & mask_for::(width)); - // Shift the accumulator by the live width, and store. - accum <<= width; - accum |= val; - } - - resize(accum) - }, - } - } - - fn store_le(&mut self, value: U) - where U: BitStore { - let len = self.len(); - if !(1 ..= U::BITS as usize).contains(&len) { - panic!("Cannot store {} bits in a {}-bit region", U::BITS, len); - } - - let value = value & mask_for(len); - match self.bitptr().domain().splat() { - /* The live region is in the interior of a single element. - - The `value` is shifted left by the region’s distance from the - LSedge, then written directly into place. - */ - Either::Right((head, elt, _)) => { - // Get the region’s distance from the LSedge. - let lsedge = *head; - // Erase the live region. - elt.clear_bits(!(mask_for::(len) << lsedge)); - // Shift the value to fit the region, and write. - elt.set_bits(resize::(value) << lsedge); - }, - /* The live region touches at least one element edge. - - This block writes chunks from the value into slice memory, from the - least-significant chunk to the most-significant. Each write moves - a slice chunk’s width of bits from the LSedge of the value into - memory, then shifts the value right by that width. - */ - Either::Left((head, body, tail)) => { - let mut value: usize = resize(value); - - // If the head exists, it contains the least significant chunk - // of the value, on the MSedge side. - if let Some((h, head)) = head { - // Get the region distance from the LSedge. - let lsedge = *h; - // Find the region width (MSedge to head). - let width = T::BITS - lsedge; - // Take the region-width LSedge bits of the value. - let val = value & mask_for::(width as usize); - // Erase the region. - head.clear_bits(T::TRUE >> width); - // Shift the snippet to fit the region, and write. - head.set_bits(resize::(val) << lsedge); - // Discard the now-written bits from the value. - value >>= width; - } - // Write into the body elements, from low address to high, from - // the value. - if let Some(elts) = body { - for elt in elts.iter() { - elt.store(resize(value)); - value >>= T::BITS; - } - } - // If the tail exists, it contains the most significant chunk - // of the value, on the LSedge side. - if let Some((tail, t)) = tail { - // Get the region width. - let width = *t; - // Take the region-width LSedge bits of the value. - let val = value & mask_for::(width as usize); - // Erase the region. - tail.clear_bits(T::TRUE << width); - // Write the snippet into the region. - tail.set_bits(resize(val)); - } - }, - } - } - - fn store_be(&mut self, value: U) - where U: BitStore { - let len = self.len(); - if !(1 ..= U::BITS as usize).contains(&len) { - panic!("Cannot store {} bits in a {}-bit region", U::BITS, len); - } - - let value = value & mask_for(len); - match self.bitptr().domain().splat() { - /* The live region is in the interior of a single element. - - The `value` is shifted left by the region’s distance from the - LSedge, then written directly into place. - */ - Either::Right((head, elt, _)) => { - // Get the region’s distance from the LSedge. - let lsedge = *head; - // Erase the live region. - elt.clear_bits(!(mask_for::(len) << lsedge)); - // Shift the value to fit the region, and write. - elt.set_bits(resize::(value) << lsedge); - }, - Either::Left((head, body, tail)) => { - let mut value: usize = resize(value); - - // If the tail exists, it contains the least significant chunk - // of the value, on the LSedge side. - if let Some((tail, t)) = tail { - // Get the region width. - let width = *t; - // Take the region-width LSedge bits of the value. - let val = value & mask_for::(width as usize); - // Erase the region. - tail.clear_bits(T::TRUE << width); - // Write the snippet into the region. - tail.set_bits(resize(val)); - // Discard the now-written bits from the value. - value >>= width; - } - // Write into the body elements, from high address to low, from - // the value. - if let Some(elts) = body { - for elt in elts.iter().rev() { - elt.store(resize(value)); - value >>= T::BITS; - } - } - // If the head exists, it contains the most significant chunk - // of the value, on the MSedge side. - if let Some((h, head)) = head { - // Get the region distance from the LSedge. - let lsedge = *h; - // Find the region width (MSedge to head). - let width = T::BITS - lsedge; - // Take the region-width LSedge bits of the value. - let val = value & mask_for::(width as usize); - // Erase the region. - head.clear_bits(T::TRUE >> width); - // Shift the snippet to fit the region, and write. - head.set_bits(resize::(val) << lsedge); - } - }, - } - } +where + T: BitStore, +{ + fn load_le(&self) -> U + where + U: BitStore, + { + let len = self.len(); + if !(1..=U::BITS as usize).contains(&len) { + panic!("Cannot load {} bits from a {}-bit region", U::BITS, len); + } + + match self.bitptr().domain().splat() { + /* The live bits are in the interior of a single element. + + This path only needs to load the element, shift it right by the + distance from LSedge to the live region, and mask it for the length + of `self`. + */ + Either::Right((head, elt, _)) => resize((elt.load() >> *head) & mask_for::(len)), + /* The live region touches at least one element edge. + + This block reads chunks from the slice memory into an accumulator, + from the most-significant chunk to the least-significant. Each read + must collect the live section of the chunk into a temporary, then + shift the accumulator left by the chunk’s bit width, then write the + temporary into the newly-vacated least significant bits of the + accumulator. + */ + Either::Left((head, body, tail)) => { + let mut accum = 0usize; + + // If the tail exists, it contains the most significant chunk + // of the value, on the LSedge side. + if let Some((tail, t)) = tail { + // Load, mask, resize, and store. No other data is present. + accum = resize(tail.load() & mask_for::(*t as usize)); + } + // Read the body elements, from high address to low, into the + // accumulator. + if let Some(elts) = body { + for elt in elts.iter().rev() { + let val: usize = resize(elt.load()); + accum <<= T::BITS; + accum |= val; + } + } + // If the head exists, it contains the least significant chunk + // of the value, on the MSedge side. + if let Some((h, head)) = head { + // Get the live region’s distance from the LSedge. + let lsedge = *h; + // Find the region width (MSedge to head). + let width = T::BITS - lsedge; + // Load the element, shift down to LSedge, and resize. + let val: usize = resize(head.load() >> lsedge); + accum <<= width; + accum |= val; + } + + resize(accum) + } + } + } + + fn load_be(&self) -> U + where + U: BitStore, + { + let len = self.len(); + if !(1..=U::BITS as usize).contains(&len) { + panic!("Cannot load {} bits from a {}-bit region", U::BITS, len); + } + + match self.bitptr().domain().splat() { + /* The live bits are in the interior of a single element. + + This path only needs to load the element, shift it right by the + distance from LSedge to the live region, and mask it for the length + of `self`. + */ + Either::Right((head, elt, _)) => resize((elt.load() >> *head) & mask_for::(len)), + /* The live region touches at least one element edge. + + This block reads chunks from the slice memory into an accumulator, + from the most-significant chunk to the least-significant. Each read + must collect the live section of the chunk into a temporary, then + shift the accumulator left by the chunk’s width, then write the + temporary into the newly-vacated least significant bits of the + accumulator. + */ + Either::Left((head, body, tail)) => { + let mut accum = 0usize; + + // If the head exists, it contains the most significant chunk + // of the value, on the MSedge side. + if let Some((h, head)) = head { + // Load, move, resize, and store. No other data is present. + accum = resize(head.load() >> *h); + } + // Read the body elements, from low address to high, into the + // accumulator. + if let Some(elts) = body { + for elt in elts.iter() { + let val: usize = resize(elt.load()); + accum <<= T::BITS; + accum |= val; + } + } + // If the tail exists, it contains the least significant chunk + // of the value, on the LSedge side. + if let Some((tail, t)) = tail { + // Get the live region’s width. + let width = *t as usize; + // Load, mask, and resize. + let val: usize = resize(tail.load() & mask_for::(width)); + // Shift the accumulator by the live width, and store. + accum <<= width; + accum |= val; + } + + resize(accum) + } + } + } + + fn store_le(&mut self, value: U) + where + U: BitStore, + { + let len = self.len(); + if !(1..=U::BITS as usize).contains(&len) { + panic!("Cannot store {} bits in a {}-bit region", U::BITS, len); + } + + let value = value & mask_for(len); + match self.bitptr().domain().splat() { + /* The live region is in the interior of a single element. + + The `value` is shifted left by the region’s distance from the + LSedge, then written directly into place. + */ + Either::Right((head, elt, _)) => { + // Get the region’s distance from the LSedge. + let lsedge = *head; + // Erase the live region. + elt.clear_bits(!(mask_for::(len) << lsedge)); + // Shift the value to fit the region, and write. + elt.set_bits(resize::(value) << lsedge); + } + /* The live region touches at least one element edge. + + This block writes chunks from the value into slice memory, from the + least-significant chunk to the most-significant. Each write moves + a slice chunk’s width of bits from the LSedge of the value into + memory, then shifts the value right by that width. + */ + Either::Left((head, body, tail)) => { + let mut value: usize = resize(value); + + // If the head exists, it contains the least significant chunk + // of the value, on the MSedge side. + if let Some((h, head)) = head { + // Get the region distance from the LSedge. + let lsedge = *h; + // Find the region width (MSedge to head). + let width = T::BITS - lsedge; + // Take the region-width LSedge bits of the value. + let val = value & mask_for::(width as usize); + // Erase the region. + head.clear_bits(T::TRUE >> width); + // Shift the snippet to fit the region, and write. + head.set_bits(resize::(val) << lsedge); + // Discard the now-written bits from the value. + value >>= width; + } + // Write into the body elements, from low address to high, from + // the value. + if let Some(elts) = body { + for elt in elts.iter() { + elt.store(resize(value)); + value >>= T::BITS; + } + } + // If the tail exists, it contains the most significant chunk + // of the value, on the LSedge side. + if let Some((tail, t)) = tail { + // Get the region width. + let width = *t; + // Take the region-width LSedge bits of the value. + let val = value & mask_for::(width as usize); + // Erase the region. + tail.clear_bits(T::TRUE << width); + // Write the snippet into the region. + tail.set_bits(resize(val)); + } + } + } + } + + fn store_be(&mut self, value: U) + where + U: BitStore, + { + let len = self.len(); + if !(1..=U::BITS as usize).contains(&len) { + panic!("Cannot store {} bits in a {}-bit region", U::BITS, len); + } + + let value = value & mask_for(len); + match self.bitptr().domain().splat() { + /* The live region is in the interior of a single element. + + The `value` is shifted left by the region’s distance from the + LSedge, then written directly into place. + */ + Either::Right((head, elt, _)) => { + // Get the region’s distance from the LSedge. + let lsedge = *head; + // Erase the live region. + elt.clear_bits(!(mask_for::(len) << lsedge)); + // Shift the value to fit the region, and write. + elt.set_bits(resize::(value) << lsedge); + } + Either::Left((head, body, tail)) => { + let mut value: usize = resize(value); + + // If the tail exists, it contains the least significant chunk + // of the value, on the LSedge side. + if let Some((tail, t)) = tail { + // Get the region width. + let width = *t; + // Take the region-width LSedge bits of the value. + let val = value & mask_for::(width as usize); + // Erase the region. + tail.clear_bits(T::TRUE << width); + // Write the snippet into the region. + tail.set_bits(resize(val)); + // Discard the now-written bits from the value. + value >>= width; + } + // Write into the body elements, from high address to low, from + // the value. + if let Some(elts) = body { + for elt in elts.iter().rev() { + elt.store(resize(value)); + value >>= T::BITS; + } + } + // If the head exists, it contains the most significant chunk + // of the value, on the MSedge side. + if let Some((h, head)) = head { + // Get the region distance from the LSedge. + let lsedge = *h; + // Find the region width (MSedge to head). + let width = T::BITS - lsedge; + // Take the region-width LSedge bits of the value. + let val = value & mask_for::(width as usize); + // Erase the region. + head.clear_bits(T::TRUE >> width); + // Shift the snippet to fit the region, and write. + head.set_bits(resize::(val) << lsedge); + } + } + } + } } impl BitField for BitSlice -where T: BitStore { - fn load_le(&self) -> U - where U: BitStore { - let len = self.len(); - if !(1 ..= U::BITS as usize).contains(&len) { - panic!("Cannot load {} bits from a {}-bit region", U::BITS, len); - } - - match self.bitptr().domain().splat() { - /* The live bits are in the interior of a single element. - - This path only needs to load the element, shift it right by the - distance from LSedge to the live region, and mask it for the length - of `self`. - */ - Either::Right((_, elt, tail)) => - resize((elt.load() >> (T::BITS - *tail)) & mask_for::(len)), - /* The live region touches at least one element edge. - - This block reads chunks from the slice memory into an accumulator, - from the most-significant chunk to the least-significant. Each read - must collect the live section of the chunk into a temporary, then - shift the accumulator left by the chunk’s bit width, then write the - temporary into the newly-vacated least significant bits of the - accumulator. - */ - Either::Left((head, body, tail)) => { - let mut accum = 0usize; - - // If the tail exists, it contains the most significant chunk - // of the value, on the MSedge side. - if let Some((tail, t)) = tail { - // Find the live region’s distance from the LSedge. - let lsedge = T::BITS - *t; - // Load, move, resize, and store. No other data is present. - accum = resize(tail.load() >> lsedge); - } - // Read the body elements, from high address to low, into the - // accumulator. - if let Some(elts) = body { - for elt in elts.iter().rev() { - let val: usize = resize(elt.load()); - accum <<= T::BITS; - accum |= val; - } - } - // If the head exists, it contains the least significant chunk - // of the value, on the LSedge side. - if let Some((h, head)) = head { - // Find the region width (head to LSedge). - let width = (T::BITS - *h) as usize; - // Load the element, mask, and resize. - let val: usize = resize(head.load() & mask_for::(width)); - accum <<= width; - accum |= val; - } - - resize(accum) - }, - } - } - - fn load_be(&self) -> U - where U: BitStore { - let len = self.len(); - if !(1 ..= U::BITS as usize).contains(&len) { - panic!("Cannot load {} bits from a {}-bit region", U::BITS, len); - } - - match self.bitptr().domain().splat() { - /* The live bits are in the interior of a single element. - - This path only needs to load the element, shift it right by the - distance from LSedge to the live region, and mask it for the length - of `self`. - */ - Either::Right((_, elt, tail)) => - resize((elt.load() >> (T::BITS - *tail)) & mask_for::(len)), - /* The live region touches at least one element edge. - - This block reads chunks from the slice memory into an accumulator, - from the most-significant chunk to the least-significant. Each read - must collect the live section of the chunk into a temporary, then - shift the accumulator left by the chunk’s bit width, then write the - temporary into the newly-vacated least significant bits of the - accumulator. - */ - Either::Left((head, body, tail)) => { - let mut accum = 0usize; - - // If the head exists, it contains the most significant chunk - // of the value, on the LSedge side. - if let Some((h, head)) = head { - // Find the region width (head to LSedge). - let width = T::BITS - *h; - // Load, mask, resize, and store. No other data is present. - accum = resize(head.load() & mask_for::(width as usize)); - } - // Read the body elements, from low address to high, into the - // accumulator. - if let Some(elts) = body { - for elt in elts.iter() { - let val: usize = resize(elt.load()); - accum <<= T::BITS; - accum |= val; - } - } - // If the tail exists, it contains the least significant chunk - // of the value, on the MSedge side. - if let Some((tail, t)) = tail { - // Find the live region’s distance from LSedge. - let lsedge = T::BITS - *t; - // Load the element, shift down to LSedge, and resize. - let val: usize = resize(tail.load() >> lsedge); - accum <<= *t; - accum |= val; - } - - resize(accum) - }, - } - } - - fn store_le(&mut self, value: U) - where U: BitStore { - let len = self.len(); - if !(1 ..= U::BITS as usize).contains(&len) { - panic!("Cannot store {} bits in a {}-bit region", U::BITS, len); - } - - let value = value & mask_for(len); - match self.bitptr().domain().splat() { - /* The live region is in the interior of a single element. - - The `value` is shifted left by the region’s distance from the - LSedge, then written directly into place. - */ - Either::Right((_, elt, tail)) => { - // Get the region’s distance from the LSedge. - let lsedge = T::BITS - *tail; - // Erase the live region. - elt.clear_bits(!(mask_for::(len) << lsedge)); - // Shift the value to fit the region, and write. - elt.set_bits(resize::(value) << lsedge); - }, - /* The live region touches at least one element edge. - - This block writes chunks from the value into slice memory, from the - least-significant chunk to the most-significant. Each write moves a - slice chunk’s width of bits from the LSedge of the value into - memory, then shifts the value right by that width. - */ - Either::Left((head, body, tail)) => { - let mut value: usize = resize(value); - - // If the head exists, it contains the least significant chunk - // of the value, on the LSedge side. - if let Some((h, head)) = head { - // Get the region width (head to LSedge). - let width = T::BITS - *h; - // Take the region-width LSedge bits of the value. - let val = value & mask_for::(width as usize); - // Erase the region. - head.clear_bits(T::TRUE << width); - // Write the snippet into the region. - head.set_bits(resize(val)); - // Discard the now-written bits from the value. - value >>= width; - } - // Write into the body elements, from low address to high, from - // the value. - if let Some(elts) = body { - for elt in elts.iter() { - elt.store(resize(value)); - value >>= T::BITS; - } - } - // If the tail exists, it contains the most significant chunk - // of the value, on the MSedge side. - if let Some((tail, t)) = tail { - // Get the region width. - let width = *t; - // Find the region distance from the LSedge. - let lsedge = T::BITS - width; - // Take the region-width LSedge bits of the value. - let val = value & mask_for::(width as usize); - // Erase the region. - tail.clear_bits(T::TRUE >> width); - // Shift the snippet to fit the region, and write. - tail.set_bits(resize::(val) << lsedge); - } - }, - } - } - - fn store_be(&mut self, value: U) - where U: BitStore { - let len = self.len(); - if !(1 ..= U::BITS as usize).contains(&len) { - panic!("Cannot store {} bits in a {}-bit region", U::BITS, len); - } - - let value = value & mask_for(len); - match self.bitptr().domain().splat() { - /* The live region is in the interior of a single element. - - The `value` is shifted left by the region’s distance from the - LSedge, then written directly into place. - */ - Either::Right((_, elt, tail)) => { - // Get the region’s distance from the LSedge. - let lsedge = T::BITS - *tail; - // Erase the live region. - elt.clear_bits(!(mask_for::(len) << lsedge)); - // Shift the value to fit the region, and write. - elt.set_bits(resize::(value) << lsedge); - }, - /* The live region touches at least one element edge. - - This block writes chunks from the value into slice memory, from the - least-significant chunk to the most-significant. Each write moves a - slice chunk’s width of bits from the LSedge of the value into - memory, then shifts the value right by that width. - */ - Either::Left((head, body, tail)) => { - let mut value: usize = resize(value); - - // If the tail exists, it contains the least significant chunk - // of the value, on the MSedge side. - if let Some((tail, t)) = tail { - // Get the region width (MSedge to tail). - let width = *t; - // Find the region distance from the LSedge. - let lsedge = T::BITS - width; - // Take the region-width LSedge bits of the value. - let val = value & mask_for::(width as usize); - // Erase the region. - tail.clear_bits(T::TRUE >> width); - // Shift the snippet to fit the region, and write. - tail.set_bits(resize::(val) << lsedge); - // Discard the now-written bits from the value. - value >>= width; - } - // Write into the body elements, from high address to low, from - // the value. - if let Some(elts) = body { - for elt in elts.iter().rev() { - elt.store(resize(value)); - value >>= T::BITS; - } - } - // If the head exists, it contains the most significant chunk - // of the value, on the LSedge side. - if let Some((h, head)) = head { - // Find the region width. - let width = T::BITS - *h; - // Take the region-width LSedge bits of the value. - let val = value & mask_for::(width as usize); - // Erase the region. - head.clear_bits(T::TRUE << width); - // Write the snippet into the region. - head.set_bits(resize(val)); - } - }, - } - } +where + T: BitStore, +{ + fn load_le(&self) -> U + where + U: BitStore, + { + let len = self.len(); + if !(1..=U::BITS as usize).contains(&len) { + panic!("Cannot load {} bits from a {}-bit region", U::BITS, len); + } + + match self.bitptr().domain().splat() { + /* The live bits are in the interior of a single element. + + This path only needs to load the element, shift it right by the + distance from LSedge to the live region, and mask it for the length + of `self`. + */ + Either::Right((_, elt, tail)) => { + resize((elt.load() >> (T::BITS - *tail)) & mask_for::(len)) + } + /* The live region touches at least one element edge. + + This block reads chunks from the slice memory into an accumulator, + from the most-significant chunk to the least-significant. Each read + must collect the live section of the chunk into a temporary, then + shift the accumulator left by the chunk’s bit width, then write the + temporary into the newly-vacated least significant bits of the + accumulator. + */ + Either::Left((head, body, tail)) => { + let mut accum = 0usize; + + // If the tail exists, it contains the most significant chunk + // of the value, on the MSedge side. + if let Some((tail, t)) = tail { + // Find the live region’s distance from the LSedge. + let lsedge = T::BITS - *t; + // Load, move, resize, and store. No other data is present. + accum = resize(tail.load() >> lsedge); + } + // Read the body elements, from high address to low, into the + // accumulator. + if let Some(elts) = body { + for elt in elts.iter().rev() { + let val: usize = resize(elt.load()); + accum <<= T::BITS; + accum |= val; + } + } + // If the head exists, it contains the least significant chunk + // of the value, on the LSedge side. + if let Some((h, head)) = head { + // Find the region width (head to LSedge). + let width = (T::BITS - *h) as usize; + // Load the element, mask, and resize. + let val: usize = resize(head.load() & mask_for::(width)); + accum <<= width; + accum |= val; + } + + resize(accum) + } + } + } + + fn load_be(&self) -> U + where + U: BitStore, + { + let len = self.len(); + if !(1..=U::BITS as usize).contains(&len) { + panic!("Cannot load {} bits from a {}-bit region", U::BITS, len); + } + + match self.bitptr().domain().splat() { + /* The live bits are in the interior of a single element. + + This path only needs to load the element, shift it right by the + distance from LSedge to the live region, and mask it for the length + of `self`. + */ + Either::Right((_, elt, tail)) => { + resize((elt.load() >> (T::BITS - *tail)) & mask_for::(len)) + } + /* The live region touches at least one element edge. + + This block reads chunks from the slice memory into an accumulator, + from the most-significant chunk to the least-significant. Each read + must collect the live section of the chunk into a temporary, then + shift the accumulator left by the chunk’s bit width, then write the + temporary into the newly-vacated least significant bits of the + accumulator. + */ + Either::Left((head, body, tail)) => { + let mut accum = 0usize; + + // If the head exists, it contains the most significant chunk + // of the value, on the LSedge side. + if let Some((h, head)) = head { + // Find the region width (head to LSedge). + let width = T::BITS - *h; + // Load, mask, resize, and store. No other data is present. + accum = resize(head.load() & mask_for::(width as usize)); + } + // Read the body elements, from low address to high, into the + // accumulator. + if let Some(elts) = body { + for elt in elts.iter() { + let val: usize = resize(elt.load()); + accum <<= T::BITS; + accum |= val; + } + } + // If the tail exists, it contains the least significant chunk + // of the value, on the MSedge side. + if let Some((tail, t)) = tail { + // Find the live region’s distance from LSedge. + let lsedge = T::BITS - *t; + // Load the element, shift down to LSedge, and resize. + let val: usize = resize(tail.load() >> lsedge); + accum <<= *t; + accum |= val; + } + + resize(accum) + } + } + } + + fn store_le(&mut self, value: U) + where + U: BitStore, + { + let len = self.len(); + if !(1..=U::BITS as usize).contains(&len) { + panic!("Cannot store {} bits in a {}-bit region", U::BITS, len); + } + + let value = value & mask_for(len); + match self.bitptr().domain().splat() { + /* The live region is in the interior of a single element. + + The `value` is shifted left by the region’s distance from the + LSedge, then written directly into place. + */ + Either::Right((_, elt, tail)) => { + // Get the region’s distance from the LSedge. + let lsedge = T::BITS - *tail; + // Erase the live region. + elt.clear_bits(!(mask_for::(len) << lsedge)); + // Shift the value to fit the region, and write. + elt.set_bits(resize::(value) << lsedge); + } + /* The live region touches at least one element edge. + + This block writes chunks from the value into slice memory, from the + least-significant chunk to the most-significant. Each write moves a + slice chunk’s width of bits from the LSedge of the value into + memory, then shifts the value right by that width. + */ + Either::Left((head, body, tail)) => { + let mut value: usize = resize(value); + + // If the head exists, it contains the least significant chunk + // of the value, on the LSedge side. + if let Some((h, head)) = head { + // Get the region width (head to LSedge). + let width = T::BITS - *h; + // Take the region-width LSedge bits of the value. + let val = value & mask_for::(width as usize); + // Erase the region. + head.clear_bits(T::TRUE << width); + // Write the snippet into the region. + head.set_bits(resize(val)); + // Discard the now-written bits from the value. + value >>= width; + } + // Write into the body elements, from low address to high, from + // the value. + if let Some(elts) = body { + for elt in elts.iter() { + elt.store(resize(value)); + value >>= T::BITS; + } + } + // If the tail exists, it contains the most significant chunk + // of the value, on the MSedge side. + if let Some((tail, t)) = tail { + // Get the region width. + let width = *t; + // Find the region distance from the LSedge. + let lsedge = T::BITS - width; + // Take the region-width LSedge bits of the value. + let val = value & mask_for::(width as usize); + // Erase the region. + tail.clear_bits(T::TRUE >> width); + // Shift the snippet to fit the region, and write. + tail.set_bits(resize::(val) << lsedge); + } + } + } + } + + fn store_be(&mut self, value: U) + where + U: BitStore, + { + let len = self.len(); + if !(1..=U::BITS as usize).contains(&len) { + panic!("Cannot store {} bits in a {}-bit region", U::BITS, len); + } + + let value = value & mask_for(len); + match self.bitptr().domain().splat() { + /* The live region is in the interior of a single element. + + The `value` is shifted left by the region’s distance from the + LSedge, then written directly into place. + */ + Either::Right((_, elt, tail)) => { + // Get the region’s distance from the LSedge. + let lsedge = T::BITS - *tail; + // Erase the live region. + elt.clear_bits(!(mask_for::(len) << lsedge)); + // Shift the value to fit the region, and write. + elt.set_bits(resize::(value) << lsedge); + } + /* The live region touches at least one element edge. + + This block writes chunks from the value into slice memory, from the + least-significant chunk to the most-significant. Each write moves a + slice chunk’s width of bits from the LSedge of the value into + memory, then shifts the value right by that width. + */ + Either::Left((head, body, tail)) => { + let mut value: usize = resize(value); + + // If the tail exists, it contains the least significant chunk + // of the value, on the MSedge side. + if let Some((tail, t)) = tail { + // Get the region width (MSedge to tail). + let width = *t; + // Find the region distance from the LSedge. + let lsedge = T::BITS - width; + // Take the region-width LSedge bits of the value. + let val = value & mask_for::(width as usize); + // Erase the region. + tail.clear_bits(T::TRUE >> width); + // Shift the snippet to fit the region, and write. + tail.set_bits(resize::(val) << lsedge); + // Discard the now-written bits from the value. + value >>= width; + } + // Write into the body elements, from high address to low, from + // the value. + if let Some(elts) = body { + for elt in elts.iter().rev() { + elt.store(resize(value)); + value >>= T::BITS; + } + } + // If the head exists, it contains the most significant chunk + // of the value, on the LSedge side. + if let Some((h, head)) = head { + // Find the region width. + let width = T::BITS - *h; + // Take the region-width LSedge bits of the value. + let val = value & mask_for::(width as usize); + // Erase the region. + head.clear_bits(T::TRUE << width); + // Write the snippet into the region. + head.set_bits(resize(val)); + } + } + } + } } #[cfg(feature = "alloc")] impl BitField for BitBox -where O: BitOrder, T: BitStore, BitSlice: BitField { - fn load_le(&self) -> U - where U: BitStore { - self.as_bitslice().load_le() - } - - fn load_be(&self) -> U - where U: BitStore { - self.as_bitslice().load_be() - } - - fn store_le(&mut self, value: U) - where U: BitStore { - self.as_mut_bitslice().store_le(value) - } - - fn store_be(&mut self, value: U) - where U: BitStore { - self.as_mut_bitslice().store_be(value) - } +where + O: BitOrder, + T: BitStore, + BitSlice: BitField, +{ + fn load_le(&self) -> U + where + U: BitStore, + { + self.as_bitslice().load_le() + } + + fn load_be(&self) -> U + where + U: BitStore, + { + self.as_bitslice().load_be() + } + + fn store_le(&mut self, value: U) + where + U: BitStore, + { + self.as_mut_bitslice().store_le(value) + } + + fn store_be(&mut self, value: U) + where + U: BitStore, + { + self.as_mut_bitslice().store_be(value) + } } #[cfg(feature = "alloc")] impl BitField for BitVec -where O: BitOrder, T: BitStore, BitSlice: BitField { - fn load_le(&self) -> U - where U: BitStore { - self.as_bitslice().load_le() - } - - fn load_be(&self) -> U - where U: BitStore { - self.as_bitslice().load_be() - } - - fn store_le(&mut self, value: U) - where U: BitStore { - self.as_mut_bitslice().store_le(value) - } - - fn store_be(&mut self, value: U) - where U: BitStore { - self.as_mut_bitslice().store_be(value) - } +where + O: BitOrder, + T: BitStore, + BitSlice: BitField, +{ + fn load_le(&self) -> U + where + U: BitStore, + { + self.as_bitslice().load_le() + } + + fn load_be(&self) -> U + where + U: BitStore, + { + self.as_bitslice().load_be() + } + + fn store_le(&mut self, value: U) + where + U: BitStore, + { + self.as_mut_bitslice().store_le(value) + } + + fn store_be(&mut self, value: U) + where + U: BitStore, + { + self.as_mut_bitslice().store_be(value) + } } /** Safely computes an LS-edge bitmask for a value of some length. @@ -867,14 +912,15 @@ zero. **/ #[inline] fn mask_for(len: usize) -> T -where T: BitStore { - let len = len as u8; - if len >= T::BITS { - T::TRUE - } - else { - !(T::TRUE << len) - } +where + T: BitStore, +{ + let len = len as u8; + if len >= T::BITS { + T::TRUE + } else { + !(T::TRUE << len) + } } /** Resizes a value from one fundamental type to another. @@ -899,180 +945,211 @@ The result of transforming `value as U`. Where `U` is wider than `T`, this zero-extends; where `U` is narrower, it truncates. **/ fn resize(value: T) -> U -where T: BitStore, U: BitStore { - let zero = 0usize; - let mut slab = zero.to_ne_bytes(); - let start = 0; - - /* Copy the source value into the correct region of the intermediate slab. - - The `BitStore::as_bytes` method returns the value as native-endian-order - bytes. These bytes are then written into the correct location of the slab - (low addresses on little-endian, high addresses on big-endian) to be - interpreted as `usize`. - */ - match mem::size_of::() { - n @ 1 | n @ 2 | n @ 4 | n @ 8 => { - #[cfg(target_endian = "big")] - let start = mem::size_of::() - n; - - slab[start ..][.. n].copy_from_slice(value.as_bytes()); - }, - _ => unreachable!("BitStore is not implemented on types of this size"), - } - let mid = usize::from_ne_bytes(slab); - // Truncate to the correct size, then wrap in `U` through the trait method. - match mem::size_of::() { - 1 => U::from_bytes(&(mid as u8).to_ne_bytes()[..]), - 2 => U::from_bytes(&(mid as u16).to_ne_bytes()[..]), - 4 => U::from_bytes(&(mid as u32).to_ne_bytes()[..]), - #[cfg(target_pointer_width = "64")] - 8 => U::from_bytes(&mid.to_ne_bytes()[..]), - _ => unreachable!("BitStore is not implemented on types of this size"), - } +where + T: BitStore, + U: BitStore, +{ + use core::intrinsics::copy_nonoverlapping; + + let mut buf = U::all_bits_zeroes(); + let size_of_from = mem::size_of::(); + let size_of_into = mem::size_of::(); + unsafe { + if cfg!(target_endian = "big") { + // Big endian fills from the end. + if size_of_from > size_of_into { + // T has more bytes, move its pointer to match the length of U. + copy_nonoverlapping( + (&value as *const T as *const u8).add(size_of_from - size_of_into), + &mut buf as *mut U as *mut u8, + size_of_from, + ); + } else { + // U has more bytes, move its pointer to match the length of T. + // Won't move pointer if sizes are equal. + copy_nonoverlapping( + &value as *const T as *const u8, + (&mut buf as *mut U as *mut u8).add(size_of_into - size_of_from), + size_of_from, + ); + } + } else { + // Little endian fills from the start. + // Fill buffer with bits from start. + // All unfilled bytes are zeroes, cuz + // `let mut buf = U::all_bits_zeroes();` + if size_of_from > size_of_into { + copy_nonoverlapping( + &value as *const T as *const u8, + &mut buf as *mut U as *mut u8, + size_of_into, + ); + } else { + copy_nonoverlapping( + &value as *const T as *const u8, + &mut buf as *mut U as *mut u8, + size_of_from, + ); + } + } + } + + buf } #[allow(clippy::inconsistent_digit_grouping)] #[cfg(test)] mod tests { - use super::*; - use crate::prelude::*; - - #[test] - fn lsb0() { - let mut bytes = [0u8; 16]; - let bytes = bytes.bits_mut::(); - - bytes[1 ..][.. 4].store_le(0x0Au8); - assert_eq!(bytes[1 ..][.. 4].load_le::(), 0x0Au8); - assert_eq!(bytes.as_slice()[0], 0b000_1010_0u8); - - bytes[1 ..][.. 4].store_be(0x05u8); - assert_eq!(bytes[1 ..][.. 4].load_be::(), 0x05u8); - assert_eq!(bytes.as_slice()[0], 0b000_0101_0u8); - - bytes[1 ..][.. 4].store_le(0u8); - - // expected byte pattern: 0x34 0x12 - // bits: 0011_0100 __01_0010 - // idx: 7654 3210 fedc ba98 - let u16b = u16::from_ne_bytes(0x1234u16.to_le_bytes()); - bytes[5 ..][.. 14].store_le(u16b); - assert_eq!(bytes[5 ..][.. 14].load_le::(), 0x1234u16); - assert_eq!( - &bytes.as_slice()[.. 3], - &[0b100_00000, 0b010_0011_0, 0b00000_01_0], - // 210 a98 7654 3 dc b - ); - // the load/store orderings only affect the order of elements, not of - // bits within the element. - bytes[5 ..][.. 14].store_be(u16b); - assert_eq!(bytes[5 ..][.. 14].load_be::(), 0x1234u16); - assert_eq!( - &bytes.as_slice()[.. 3], - &[0b01_0_00000, 0b010_0011_0, 0b00000_100], - // dc b a98 7654 3 210 - ); - - let mut shorts = [0u16; 8]; - let shorts = shorts.bits_mut::(); - - shorts[3 ..][.. 12].store_le(0x0123u16); - assert_eq!(shorts[3 ..][.. 12].load_le::(), 0x0123u16); - assert_eq!(shorts.as_slice()[0], 0b0_0001_0010_0011_000u16); - - shorts[3 ..][.. 12].store_be(0x0123u16); - assert_eq!(shorts[3 ..][.. 12].load_be::(), 0x0123u16); - assert_eq!(shorts.as_slice()[0], 0b0_0001_0010_0011_000u16); - - let mut ints = [0u32; 4]; - let ints = ints.bits_mut::(); - - ints[1 ..][.. 28].store_le(0x0123_4567u32); - assert_eq!(ints[1 ..][.. 28].load_le::(), 0x0123_4567u32); - assert_eq!(ints.as_slice()[0], 0b000_0001_0010_0011_0100_0101_0110_0111_0u32); - - ints[1 ..][.. 28].store_be(0x0123_4567u32); - assert_eq!(ints[1 ..][.. 28].load_be::(), 0x0123_4567u32); - assert_eq!(ints.as_slice()[0], 0b000_0001_0010_0011_0100_0101_0110_0111_0u32); - - /* - #[cfg(target_pointer_width = "64")] { - - let mut longs = [0u64; 2]; - let longs = longs.bits_mut::(); - - } - */ - } - - #[test] - fn msb0() { - let mut bytes = [0u8; 16]; - let bytes = bytes.bits_mut::(); - - bytes[1 ..][.. 4].store_le(0x0Au8); - assert_eq!(bytes[1 ..][.. 4].load_le::(), 0x0Au8); - assert_eq!(bytes.as_slice()[0], 0b0_1010_000u8); - - bytes[1 ..][.. 4].store_be(0x05u8); - assert_eq!(bytes[1 ..][.. 4].load_be::(), 0x05u8); - assert_eq!(bytes.as_slice()[0], 0b0_0101_000u8); - - bytes[1 ..][.. 4].store_le(0u8); - - // expected byte pattern: 0x34 0x12 - // bits: 0011_0100 __01_0010 - // idx: 7654 3210 fedc ba98 - let u16b = u16::from_ne_bytes(0x1234u16.to_le_bytes()); - bytes[5 ..][.. 14].store_le(u16b); - assert_eq!(bytes[5 ..][.. 14].load_le::(), 0x1234u16); - assert_eq!( - &bytes.as_slice()[.. 3], - &[0b00000_100, 0b010_0011_0, 0b01_0_00000], - // 210 a98 7654 3 dc b - ); - // the load/store orderings only affect the order of elements, not of - // bits within the element. - bytes[5 ..][.. 14].store_be(u16b); - assert_eq!(bytes[5 ..][.. 14].load_be::(), 0x1234u16); - assert_eq!( - &bytes.as_slice()[.. 3], - &[0b00000_01_0, 0b010_0011_0, 0b100_00000], - // dc b a98 7654 3 210 - ); - - let mut shorts = [0u16; 8]; - let shorts = shorts.bits_mut::(); - - shorts[3 ..][.. 12].store_le(0x0123u16); - assert_eq!(shorts[3 ..][.. 12].load_le::(), 0x0123u16); - assert_eq!(shorts.as_slice()[0], 0b000_0001_0010_0011_0u16); - - shorts[3 ..][.. 12].store_be(0x0123u16); - assert_eq!(shorts[3 ..][.. 12].load_be::(), 0x0123u16); - assert_eq!(shorts.as_slice()[0], 0b000_0001_0010_0011_0u16); - - let mut ints = [0u32; 4]; - let ints = ints.bits_mut::(); - - ints[1 ..][.. 28].store_le(0x0123_4567u32); - assert_eq!(ints[1 ..][.. 28].load_le::(), 0x0123_4567u32); - assert_eq!(ints.as_slice()[0], 0b0_0001_0010_0011_0100_0101_0110_0111_000u32); - - ints[1 ..][.. 28].store_be(0x0123_4567u32); - assert_eq!(ints[1 ..][.. 28].load_be::(), 0x0123_4567u32); - assert_eq!(ints.as_slice()[0], 0b0_0001_0010_0011_0100_0101_0110_0111_000u32); - - /* - #[cfg(target_pointer_width = "64")] { - - let mut longs = [0u64; 2]; - let longs = longs.bits_mut::(); - - } - */ - } + use super::*; + use crate::prelude::*; + + #[test] + fn lsb0() { + let mut bytes = [0u8; 16]; + let bytes = bytes.bits_mut::(); + + bytes[1..][..4].store_le(0x0Au8); + assert_eq!(bytes[1..][..4].load_le::(), 0x0Au8); + assert_eq!(bytes.as_slice()[0], 0b000_1010_0u8); + + bytes[1..][..4].store_be(0x05u8); + assert_eq!(bytes[1..][..4].load_be::(), 0x05u8); + assert_eq!(bytes.as_slice()[0], 0b000_0101_0u8); + + bytes[1..][..4].store_le(0u8); + + // expected byte pattern: 0x34 0x12 + // bits: 0011_0100 __01_0010 + // idx: 7654 3210 fedc ba98 + let u16b = u16::from_ne_bytes(0x1234u16.to_le_bytes()); + bytes[5..][..14].store_le(u16b); + assert_eq!(bytes[5..][..14].load_le::(), 0x1234u16); + assert_eq!( + &bytes.as_slice()[..3], + &[0b100_00000, 0b010_0011_0, 0b00000_01_0], + // 210 a98 7654 3 dc b + ); + // the load/store orderings only affect the order of elements, not of + // bits within the element. + bytes[5..][..14].store_be(u16b); + assert_eq!(bytes[5..][..14].load_be::(), 0x1234u16); + assert_eq!( + &bytes.as_slice()[..3], + &[0b01_0_00000, 0b010_0011_0, 0b00000_100], + // dc b a98 7654 3 210 + ); + + let mut shorts = [0u16; 8]; + let shorts = shorts.bits_mut::(); + + shorts[3..][..12].store_le(0x0123u16); + assert_eq!(shorts[3..][..12].load_le::(), 0x0123u16); + assert_eq!(shorts.as_slice()[0], 0b0_0001_0010_0011_000u16); + + shorts[3..][..12].store_be(0x0123u16); + assert_eq!(shorts[3..][..12].load_be::(), 0x0123u16); + assert_eq!(shorts.as_slice()[0], 0b0_0001_0010_0011_000u16); + + let mut ints = [0u32; 4]; + let ints = ints.bits_mut::(); + + ints[1..][..28].store_le(0x0123_4567u32); + assert_eq!(ints[1..][..28].load_le::(), 0x0123_4567u32); + assert_eq!( + ints.as_slice()[0], + 0b000_0001_0010_0011_0100_0101_0110_0111_0u32 + ); + + ints[1..][..28].store_be(0x0123_4567u32); + assert_eq!(ints[1..][..28].load_be::(), 0x0123_4567u32); + assert_eq!( + ints.as_slice()[0], + 0b000_0001_0010_0011_0100_0101_0110_0111_0u32 + ); + + /* + #[cfg(target_pointer_width = "64")] { + + let mut longs = [0u64; 2]; + let longs = longs.bits_mut::(); + + } + */ + } + + #[test] + fn msb0() { + let mut bytes = [0u8; 16]; + let bytes = bytes.bits_mut::(); + + bytes[1..][..4].store_le(0x0Au8); + assert_eq!(bytes[1..][..4].load_le::(), 0x0Au8); + assert_eq!(bytes.as_slice()[0], 0b0_1010_000u8); + + bytes[1..][..4].store_be(0x05u8); + assert_eq!(bytes[1..][..4].load_be::(), 0x05u8); + assert_eq!(bytes.as_slice()[0], 0b0_0101_000u8); + + bytes[1..][..4].store_le(0u8); + + // expected byte pattern: 0x34 0x12 + // bits: 0011_0100 __01_0010 + // idx: 7654 3210 fedc ba98 + let u16b = u16::from_ne_bytes(0x1234u16.to_le_bytes()); + bytes[5..][..14].store_le(u16b); + assert_eq!(bytes[5..][..14].load_le::(), 0x1234u16); + assert_eq!( + &bytes.as_slice()[..3], + &[0b00000_100, 0b010_0011_0, 0b01_0_00000], + // 210 a98 7654 3 dc b + ); + // the load/store orderings only affect the order of elements, not of + // bits within the element. + bytes[5..][..14].store_be(u16b); + assert_eq!(bytes[5..][..14].load_be::(), 0x1234u16); + assert_eq!( + &bytes.as_slice()[..3], + &[0b00000_01_0, 0b010_0011_0, 0b100_00000], + // dc b a98 7654 3 210 + ); + + let mut shorts = [0u16; 8]; + let shorts = shorts.bits_mut::(); + + shorts[3..][..12].store_le(0x0123u16); + assert_eq!(shorts[3..][..12].load_le::(), 0x0123u16); + assert_eq!(shorts.as_slice()[0], 0b000_0001_0010_0011_0u16); + + shorts[3..][..12].store_be(0x0123u16); + assert_eq!(shorts[3..][..12].load_be::(), 0x0123u16); + assert_eq!(shorts.as_slice()[0], 0b000_0001_0010_0011_0u16); + + let mut ints = [0u32; 4]; + let ints = ints.bits_mut::(); + + ints[1..][..28].store_le(0x0123_4567u32); + assert_eq!(ints[1..][..28].load_le::(), 0x0123_4567u32); + assert_eq!( + ints.as_slice()[0], + 0b0_0001_0010_0011_0100_0101_0110_0111_000u32 + ); + + ints[1..][..28].store_be(0x0123_4567u32); + assert_eq!(ints[1..][..28].load_be::(), 0x0123_4567u32); + assert_eq!( + ints.as_slice()[0], + 0b0_0001_0010_0011_0100_0101_0110_0111_000u32 + ); + + /* + #[cfg(target_pointer_width = "64")] { + + let mut longs = [0u64; 2]; + let longs = longs.bits_mut::(); + + } + */ + } } #[cfg(test)] diff --git a/src/store.rs b/src/store.rs index 02ebe2b7..40d20356 100644 --- a/src/store.rs +++ b/src/store.rs @@ -16,7 +16,6 @@ use core::{ fmt::{Binary, Debug, Display, LowerHex, UpperHex}, mem::size_of, ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not, Shl, ShlAssign, Shr, ShrAssign}, - slice, }; use radium::marker::BitOps; @@ -230,32 +229,12 @@ pub trait BitStore: ::count_ones(!self) } - /// Interprets a value as a sequence of bytes. - /// - /// # Parameters - /// - /// - `&self` - /// - /// # Returns - /// - /// A slice covering `*self` as a sequence of individual bytes. - fn as_bytes(&self) -> &[u8]; - - /// Interprets a sequence of bytes as `Self`. - /// - /// # Parameters - /// - /// - `bytes`: The bytes to interpret as `Self`. This must be exactly - /// `mem::size_of::` bytes long. - /// + /// Takes type and returns value of the given type. + /// /// # Returns - /// - /// An instance of `Self` constructed by reinterpreting `bytes`. - /// - /// # Panics - /// - /// This panics if `bytes.len()` is not `mem::size_of::()`. - fn from_bytes(bytes: &[u8]) -> Self; + /// + /// Self where all bits are zeroes. + fn all_bits_zeroes() -> Self; } /** Compute the number of elements required to store a number of bits. @@ -292,23 +271,15 @@ macro_rules! bitstore { #[cfg(not(feature = "atomic"))] type Access = Cell; - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } - } - - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect(concat!("<", core::stringify!($T), " as BitStore>::from_bytes requires a slice of length ", $Size)) - } - #[inline(always)] fn count_ones(self) -> usize { Self::count_ones(self) as usize } + + #[inline(always)] + fn all_bits_zeroes() -> Self { + 0 + } } )* }; @@ -336,143 +307,6 @@ bitstore! { usize => 8 ; AtomicUsize } -/* -impl BitStore for u8 { - const TYPENAME: &'static str = "u8"; - - const FALSE: Self = 0; - const TRUE: Self = !0; - - #[cfg(feature = "atomic")] - type Access = atomic::AtomicU8; - - #[cfg(not(feature = "atomic"))] - type Access = Cell; - - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, 1) } - } - - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of length 1") - } -} - -impl BitStore for u16 { - const TYPENAME: &'static str = "u16"; - - const FALSE: Self = 0; - const TRUE: Self = !0; - - #[cfg(feature = "atomic")] - type Access = atomic::AtomicU16; - - #[cfg(not(feature = "atomic"))] - type Access = Cell; - - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, 2) } - } - - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of length 2") - } -} - -impl BitStore for u32 { - const TYPENAME: &'static str = "u32"; - - const FALSE: Self = 0; - const TRUE: Self = !0; - - #[cfg(feature = "atomic")] - type Access = atomic::AtomicU32; - - #[cfg(not(feature = "atomic"))] - type Access = Cell; - - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, 4) } - } - - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of length 4") - } -} - -#[cfg(target_pointer_width = "64")] -impl BitStore for u64 { - const TYPENAME: &'static str = "u64"; - - const FALSE: Self = 0; - const TRUE: Self = !0; - - #[cfg(feature = "atomic")] - type Access = atomic::AtomicU64; - - #[cfg(not(feature = "atomic"))] - type Access = Cell; - - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, 8) } - } - - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of length 8") - } -} - -impl BitStore for usize { - #[cfg(target_pointer_width = "32")] - const TYPENAME: &'static str = "u32"; - - #[cfg(target_pointer_width = "64")] - const TYPENAME: &'static str = "u64"; - - const FALSE: Self = 0; - const TRUE: Self = !0; - - #[cfg(feature = "atomic")] - type Access = atomic::AtomicUsize; - - #[cfg(not(feature = "atomic"))] - type Access = Cell; - - #[inline] - fn as_bytes(&self) -> &[u8] { - unsafe { slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } - } - - #[inline] - fn from_bytes(bytes: &[u8]) -> Self { - bytes - .try_into() - .map(Self::from_ne_bytes) - .expect("::from_bytes requires a slice of its exact width in bytes") - } -} -*/ - #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] compile_fail!("This architecture is currently not supported. File an issue at https://github.com/myrrlyn/bitvec"); @@ -503,17 +337,6 @@ macro_rules! seal { seal! {u8 u16 u32 usize} seal! { - #![cfg(target_pointer_width = "64")] - u64 + #![cfg(target_pointer_width = "64")] + u64 } - -/* -impl Sealed for u8 {} -impl Sealed for u16 {} -impl Sealed for u32 {} - -#[cfg(target_pointer_width = "64")] -impl Sealed for u64 {} - -impl Sealed for usize {} -*/ From a4c4a0335b28792e13464725bf57722b5f022564 Mon Sep 17 00:00:00 2001 From: ImmConCon <43708554+ImmemorConsultrixContrarie@users.noreply.github.com> Date: Thu, 23 Jan 2020 20:03:24 +0200 Subject: [PATCH 3/3] Fixup for the wrong sizeof --- src/fields.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fields.rs b/src/fields.rs index 52ab1790..20251dad 100644 --- a/src/fields.rs +++ b/src/fields.rs @@ -962,7 +962,7 @@ where copy_nonoverlapping( (&value as *const T as *const u8).add(size_of_from - size_of_into), &mut buf as *mut U as *mut u8, - size_of_from, + size_of_into, ); } else { // U has more bytes, move its pointer to match the length of T.