blob: 33f0f2ffbc0f7ffb6c8d9996f9ed44cd17229d2b [file] [log] [blame]
#![allow(
dead_code,
non_snake_case,
non_camel_case_types,
non_upper_case_globals
)]
extern crate core;
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct __BindgenBitfieldUnit<Storage> {
storage: Storage,
}
impl<Storage> __BindgenBitfieldUnit<Storage> {
#[inline]
pub const fn new(storage: Storage) -> Self {
Self { storage }
}
}
impl<Storage> __BindgenBitfieldUnit<Storage>
where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
} else {
*byte &= !mask;
}
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
let mut val = 0;
for i in 0..(bit_width as usize) {
if self.get_bit(i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct C {
pub _bitfield_align_1: [u8; 0],
pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>,
pub large_array: [::std::os::raw::c_int; 50usize],
}
#[test]
fn bindgen_test_layout_C() {
assert_eq!(
::core::mem::size_of::<C>(),
204usize,
concat!("Size of: ", stringify!(C))
);
assert_eq!(
::core::mem::align_of::<C>(),
4usize,
concat!("Alignment of ", stringify!(C))
);
assert_eq!(
unsafe {
&(*(::core::ptr::null::<C>())).large_array as *const _ as usize
},
4usize,
concat!(
"Offset of field: ",
stringify!(C),
"::",
stringify!(large_array)
)
);
}
impl Default for C {
fn default() -> Self {
let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
unsafe {
::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
s.assume_init()
}
}
}
impl ::core::fmt::Debug for C {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
write!(
f,
"C {{ a : {:?}, b : {:?}, large_array: [...] }}",
self.a(),
self.b()
)
}
}
impl C {
#[inline]
pub fn a(&self) -> bool {
unsafe {
::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8)
}
}
#[inline]
pub fn set_a(&mut self, val: bool) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
self._bitfield_1.set(0usize, 1u8, val as u64)
}
}
#[inline]
pub fn b(&self) -> bool {
unsafe {
::core::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8)
}
}
#[inline]
pub fn set_b(&mut self, val: bool) {
unsafe {
let val: u8 = ::core::mem::transmute(val);
self._bitfield_1.set(1usize, 7u8, val as u64)
}
}
#[inline]
pub fn new_bitfield_1(
a: bool,
b: bool,
) -> __BindgenBitfieldUnit<[u8; 1usize]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> =
Default::default();
__bindgen_bitfield_unit.set(0usize, 1u8, {
let a: u8 = unsafe { ::core::mem::transmute(a) };
a as u64
});
__bindgen_bitfield_unit.set(1usize, 7u8, {
let b: u8 = unsafe { ::core::mem::transmute(b) };
b as u64
});
__bindgen_bitfield_unit
}
}