Skip to content

Commit

Permalink
comp: Do a better effort of computing packedness before bitfield units.
Browse files Browse the repository at this point in the history
Fixes #2067
  • Loading branch information
emilio committed Jun 21, 2021
1 parent b60339e commit 14a8d29
Show file tree
Hide file tree
Showing 4 changed files with 283 additions and 34 deletions.
88 changes: 59 additions & 29 deletions src/ir/comp.rs
Expand Up @@ -1113,21 +1113,17 @@ impl CompInfo {
}

// empty union case
if self.fields().is_empty() {
if !self.has_fields() {
return None;
}

let mut max_size = 0;
// Don't allow align(0)
let mut max_align = 1;
for field in self.fields() {
let field_layout = field.layout(ctx);

if let Some(layout) = field_layout {
max_size = cmp::max(max_size, layout.size);
max_align = cmp::max(max_align, layout.align);
}
}
self.each_known_field_layout(ctx, |layout| {
max_size = cmp::max(max_size, layout.size);
max_align = cmp::max(max_align, layout.align);
});

Some(Layout::new(max_size, max_align))
}
Expand All @@ -1139,12 +1135,49 @@ impl CompInfo {
CompFields::AfterComputingBitfieldUnits { ref fields, .. } => {
fields
}
CompFields::BeforeComputingBitfieldUnits(_) => {
CompFields::BeforeComputingBitfieldUnits(..) => {
panic!("Should always have computed bitfield units first");
}
}
}

fn has_fields(&self) -> bool {
match self.fields {
CompFields::ErrorComputingBitfieldUnits => false,
CompFields::AfterComputingBitfieldUnits { ref fields, .. } => {
!fields.is_empty()
}
CompFields::BeforeComputingBitfieldUnits(ref raw_fields) => {
!raw_fields.is_empty()
}
}
}

fn each_known_field_layout(
&self,
ctx: &BindgenContext,
mut callback: impl FnMut(Layout),
) {
match self.fields {
CompFields::ErrorComputingBitfieldUnits => return,
CompFields::AfterComputingBitfieldUnits { ref fields, .. } => {
for field in fields.iter() {
if let Some(layout) = field.layout(ctx) {
callback(layout);
}
}
}
CompFields::BeforeComputingBitfieldUnits(ref raw_fields) => {
for field in raw_fields.iter() {
let field_ty = ctx.resolve_type(field.0.ty);
if let Some(layout) = field_ty.layout(ctx) {
callback(layout);
}
}
}
}
}

fn has_bitfields(&self) -> bool {
match self.fields {
CompFields::ErrorComputingBitfieldUnits => false,
Expand Down Expand Up @@ -1598,23 +1631,17 @@ impl CompInfo {
// Even though `libclang` doesn't expose `#pragma packed(...)`, we can
// detect it through its effects.
if let Some(parent_layout) = layout {
if self.fields().iter().any(|f| match *f {
Field::Bitfields(ref unit) => {
unit.layout().align > parent_layout.align
}
Field::DataMember(ref data) => {
let field_ty = ctx.resolve_type(data.ty());
field_ty.layout(ctx).map_or(false, |field_ty_layout| {
field_ty_layout.align > parent_layout.align
})
}
}) {
let mut packed = false;
self.each_known_field_layout(ctx, |layout| {
packed = packed || layout.align > parent_layout.align;
});
if packed {
info!("Found a struct that was defined within `#pragma packed(...)`");
return true;
} else if self.has_own_virtual_method {
if parent_layout.align == 1 {
return true;
}
}

if self.has_own_virtual_method && parent_layout.align == 1 {
return true;
}
}

Expand All @@ -1627,10 +1654,13 @@ impl CompInfo {
}

/// Compute this compound structure's bitfield allocation units.
pub fn compute_bitfield_units(&mut self, ctx: &BindgenContext) {
// TODO(emilio): If we could detect #pragma packed here we'd fix layout
// tests in divide-by-zero-in-struct-layout.rs
self.fields.compute_bitfield_units(ctx, self.packed_attr)
pub fn compute_bitfield_units(
&mut self,
ctx: &BindgenContext,
layout: Option<&Layout>,
) {
let packed = self.is_packed(ctx, layout);
self.fields.compute_bitfield_units(ctx, packed)
}

/// Assign for each anonymous field a generated name.
Expand Down
9 changes: 4 additions & 5 deletions src/ir/context.rs
Expand Up @@ -981,12 +981,11 @@ If you encounter an error missing from this list, please file an issue or a PR!"
mem::replace(&mut self.need_bitfield_allocation, vec![]);
for id in need_bitfield_allocation {
self.with_loaned_item(id, |ctx, item| {
item.kind_mut()
.as_type_mut()
.unwrap()
.as_comp_mut()
let ty = item.kind_mut().as_type_mut().unwrap();
let layout = ty.layout(ctx);
ty.as_comp_mut()
.unwrap()
.compute_bitfield_units(ctx);
.compute_bitfield_units(ctx, layout.as_ref());
});
}
}
Expand Down
211 changes: 211 additions & 0 deletions tests/expectations/tests/bitfield_pragma_packed.rs
@@ -0,0 +1,211 @@
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]

#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct __BindgenBitfieldUnit<Storage> {
storage: Storage,
}
impl<Storage> __BindgenBitfieldUnit<Storage> {
#[inline]
pub const fn new(storage: Storage) -> Self {
Self { storage }
}
}
impl<Storage> __BindgenBitfieldUnit<Storage>
where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
} else {
*byte &= !mask;
}
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
let mut val = 0;
for i in 0..(bit_width as usize) {
if self.get_bit(i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
}
#[repr(C, packed)]
#[derive(Debug, Default, Copy, Clone)]
pub struct Struct {
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>,
}
#[test]
fn bindgen_test_layout_Struct() {
assert_eq!(
::std::mem::size_of::<Struct>(),
4usize,
concat!("Size of: ", stringify!(Struct))
);
assert_eq!(
::std::mem::align_of::<Struct>(),
1usize,
concat!("Alignment of ", stringify!(Struct))
);
}
impl Struct {
#[inline]
pub fn a(&self) -> ::std::os::raw::c_uchar {
unsafe {
::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8)
}
}
#[inline]
pub fn set_a(&mut self, val: ::std::os::raw::c_uchar) {
unsafe {
let val: u8 = ::std::mem::transmute(val);
self._bitfield_1.set(0usize, 1u8, val as u64)
}
}
#[inline]
pub fn b(&self) -> ::std::os::raw::c_uchar {
unsafe {
::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8)
}
}
#[inline]
pub fn set_b(&mut self, val: ::std::os::raw::c_uchar) {
unsafe {
let val: u8 = ::std::mem::transmute(val);
self._bitfield_1.set(1usize, 1u8, val as u64)
}
}
#[inline]
pub fn c(&self) -> ::std::os::raw::c_uchar {
unsafe {
::std::mem::transmute(self._bitfield_1.get(2usize, 6u8) as u8)
}
}
#[inline]
pub fn set_c(&mut self, val: ::std::os::raw::c_uchar) {
unsafe {
let val: u8 = ::std::mem::transmute(val);
self._bitfield_1.set(2usize, 6u8, val as u64)
}
}
#[inline]
pub fn d(&self) -> ::std::os::raw::c_ushort {
unsafe {
::std::mem::transmute(self._bitfield_1.get(8usize, 16u8) as u16)
}
}
#[inline]
pub fn set_d(&mut self, val: ::std::os::raw::c_ushort) {
unsafe {
let val: u16 = ::std::mem::transmute(val);
self._bitfield_1.set(8usize, 16u8, val as u64)
}
}
#[inline]
pub fn e(&self) -> ::std::os::raw::c_uchar {
unsafe {
::std::mem::transmute(self._bitfield_1.get(24usize, 8u8) as u8)
}
}
#[inline]
pub fn set_e(&mut self, val: ::std::os::raw::c_uchar) {
unsafe {
let val: u8 = ::std::mem::transmute(val);
self._bitfield_1.set(24usize, 8u8, val as u64)
}
}
#[inline]
pub fn new_bitfield_1(
a: ::std::os::raw::c_uchar,
b: ::std::os::raw::c_uchar,
c: ::std::os::raw::c_uchar,
d: ::std::os::raw::c_ushort,
e: ::std::os::raw::c_uchar,
) -> __BindgenBitfieldUnit<[u8; 4usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> =
Default::default();
__bindgen_bitfield_unit.set(0usize, 1u8, {
let a: u8 = unsafe { ::std::mem::transmute(a) };
a as u64
});
__bindgen_bitfield_unit.set(1usize, 1u8, {
let b: u8 = unsafe { ::std::mem::transmute(b) };
b as u64
});
__bindgen_bitfield_unit.set(2usize, 6u8, {
let c: u8 = unsafe { ::std::mem::transmute(c) };
c as u64
});
__bindgen_bitfield_unit.set(8usize, 16u8, {
let d: u16 = unsafe { ::std::mem::transmute(d) };
d as u64
});
__bindgen_bitfield_unit.set(24usize, 8u8, {
let e: u8 = unsafe { ::std::mem::transmute(e) };
e as u64
});
__bindgen_bitfield_unit
}
}
9 changes: 9 additions & 0 deletions tests/headers/bitfield_pragma_packed.h
@@ -0,0 +1,9 @@
#pragma pack(push, 1)
struct Struct {
unsigned char a : 1;
unsigned char b : 1;
unsigned char c : 6;
unsigned short int d : 16;
unsigned char e : 8;
};
#pragma pack(pop)

0 comments on commit 14a8d29

Please sign in to comment.