blob: adafef0aa73d232f7dfd9650df88c7ff33162d7c [file] [log] [blame]
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Utilities for safe zero-copy parsing and serialization.
//!
//! This crate provides utilities which make it easy to perform zero-copy
//! parsing and serialization by allowing zero-copy conversion to/from byte
//! slices.
//!
//! This is enabled by three core marker traits:
//! - [`FromBytes`] indicates that a type may safely be converted from an
//! arbitrary byte sequence
//! - [`AsBytes`] indicates that a type may safely be converted *to* a byte
//! sequence
//! - [`Unaligned`] indicates that a type's alignment requirement is 1
//!
//! Types which implement a subset of these traits can then be converted to/from
//! byte sequences with little to no runtime overhead.
#![feature(refcell_map_split)]
#![cfg_attr(not(test), no_std)]
use core::cell::{Ref, RefMut};
use core::fmt::{self, Debug, Display, Formatter};
use core::marker::PhantomData;
use core::mem;
use core::ops::{Deref, DerefMut};
// implement an unsafe trait for all signed and unsigned primitive types
macro_rules! impl_for_primitives {
($trait:ident) => (
impl_for_primitives!(@inner $trait, u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize);
);
(@inner $trait:ident, $type:ty) => (
unsafe impl $trait for $type {}
);
(@inner $trait:ident, $type:ty, $($types:ty),*) => (
unsafe impl $trait for $type {}
impl_for_primitives!(@inner $trait, $($types),*);
);
}
// implement an unsafe trait for all array lengths up to 32 with an element type
// which implements the trait
macro_rules! impl_for_array_sizes {
($trait:ident) => (
impl_for_array_sizes!(@inner $trait, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32);
);
(@inner $trait:ident, $n:expr) => (
unsafe impl<T: $trait> $trait for [T; $n] {}
);
(@inner $trait:ident, $n:expr, $($ns:expr),*) => (
unsafe impl<T: $trait> $trait for [T; $n] {}
impl_for_array_sizes!(@inner $trait, $($ns),*);
);
}
/// Types for which any byte pattern is valid.
///
/// `FromBytes` types can safely be deserialized from an untrusted sequence of
/// bytes because any byte sequence corresponds to a valid instance of the type.
///
/// # Safety
///
/// If `T: FromBytes`, then unsafe code may assume that it is sound to treat any
/// initialized sequence of bytes of length `size_of::<T>()` as a `T`. If a type
/// is marked as `FromBytes` which violates this contract, it may cause
/// undefined behavior.
///
/// If a type has the following properties, then it is safe to implement
/// `FromBytes` for that type:
/// - If the type is a struct:
/// - It must be `repr(C)` or `repr(transparent)`
/// - All of its fields must implement `FromBytes`
/// - If the type is an enum:
/// - It must be a C-like enum (meaning that all variants have no fields)
/// - It must be `repr(u8)`, `repr(u16)`, `repr(u32)`, or `repr(u64)`
/// - The maximum number of discriminants must be used (so that every possible
/// bit pattern is a valid one)
pub unsafe trait FromBytes {}
/// Types which are safe to treat as an immutable byte slice.
///
/// `AsBytes` types can be safely viewed as a slice of bytes. In particular,
/// this means that, in any valid instance of the type, none of the bytes of the
/// instance are uninitialized. This precludes the following types:
/// - Structs with internal padding
/// - Unions in which not all variants have the same length
///
/// # Safety
///
/// If `T: AsBytes`, then unsafe code may assume that it is sound to treat any
/// instance of the type as an immutable `[u8]` of the appropriate length. If a
/// type is marked as `AsBytes` which violates this contract, it may cause
/// undefined behavior.
///
/// If a type has the following properties, then it is safe to implement
/// `AsBytes` for that type:
/// - If the type is a struct:
/// - It must be `repr(C)` or `repr(transparent)`
/// - If it is `repr(C)`, its layout must have no inter-field padding (this
/// can be accomplished either by using `repr(packed)` or by manually adding
/// padding fields)
/// - All of its fields must implement `AsBytes`
/// - If the type is an enum:
/// - It must be a C-like enum (meaning that all variants have no fields)
/// - It must be `repr(u8)`, `repr(u16)`, `repr(u32)`, or `repr(u64)`
pub unsafe trait AsBytes {}
impl_for_primitives!(FromBytes);
impl_for_primitives!(AsBytes);
impl_for_array_sizes!(FromBytes);
impl_for_array_sizes!(AsBytes);
/// Types with no alignment requirement.
///
/// If `T: Unaligned`, then `align_of::<T>() == 1`.
///
/// # Safety
///
/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
/// reference to `T` at any memory location regardless of alignment. If a type
/// is marked as `Unaligned` which violates this contract, it may cause
/// undefined behavior.
pub unsafe trait Unaligned {}
unsafe impl Unaligned for u8 {}
unsafe impl Unaligned for i8 {}
impl_for_array_sizes!(Unaligned);
/// A length- and alignment-checked reference to a byte slice which can safely
/// be reinterpreted as another type.
///
/// `LayoutVerified` is a byte slice reference (`&[u8]`, `&mut [u8]`,
/// `Ref<[u8]>`, `RefMut<[u8]>`, etc) with the invaraint that the slice's length
/// and alignment are each greater than or equal to the length and alignment of
/// `T`. Using this invariant, it implements `Deref` for `T` so long as `T:
/// FromBytes` and `DerefMut` so long as `T: FromBytes + AsBytes`.
///
/// # Examples
///
/// `LayoutVerified` can be used to treat a sequence of bytes as a structured
/// type, and to read and write the fields of that type as if the byte slice
/// reference were simply a reference to that type.
///
/// ```rust
/// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, LayoutVerified, Unaligned};
///
/// #[repr(C)]
/// struct UdpHeader {
/// src_port: [u8; 2],
/// dst_port: [u8; 2],
/// length: [u8; 2],
/// checksum: [u8; 2],
/// }
///
/// unsafe impl FromBytes for UdpHeader {}
/// unsafe impl AsBytes for UdpHeader {}
/// unsafe impl Unaligned for UdpHeader {}
///
/// struct UdpPacket<B> {
/// header: LayoutVerified<B, UdpHeader>,
/// body: B,
/// }
///
/// impl<B: ByteSlice> UdpPacket<B> {
/// pub fn parse(bytes: B) -> Option<UdpPacket<B>> {
/// let (header, body) = LayoutVerified::new_unaligned_from_prefix(bytes)?;
/// Some(UdpPacket { header, body })
/// }
///
/// pub fn get_src_port(&self) -> [u8; 2] {
/// self.header.src_port
/// }
/// }
///
/// impl<B: ByteSliceMut> UdpPacket<B> {
/// pub fn set_src_port(&mut self, src_port: [u8; 2]) {
/// self.header.src_port = src_port;
/// }
/// }
/// ```
pub struct LayoutVerified<B, T>(B, PhantomData<T>);
impl<B, T> LayoutVerified<B, T>
where
B: ByteSlice,
{
/// Construct a new `LayoutVerified`.
///
/// `new` verifies that `bytes.len() == size_of::<T>()` and that `bytes` is
/// aligned to `align_of::<T>()`, and constructs a new `LayoutVerified`. If
/// either of these checks fail, it returns `None`.
#[inline]
pub fn new(bytes: B) -> Option<LayoutVerified<B, T>> {
if bytes.len() != mem::size_of::<T>() || !aligned_to(bytes.deref(), mem::align_of::<T>()) {
return None;
}
Some(LayoutVerified(bytes, PhantomData))
}
/// Construct a new `LayoutVerified` from the prefix of a byte slice.
///
/// `new_from_prefix` verifies that `bytes.len() >= size_of::<T>()` and that
/// `bytes` is aligned to `align_of::<T>()`. It consumes the first
/// `size_of::<T>()` bytes from `bytes` to construct a `LayoutVerified`, and
/// returns the remaining bytes to the caller. If either the length or
/// alignment checks fail, it returns `None`.
#[inline]
pub fn new_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
if bytes.len() < mem::size_of::<T>() || !aligned_to(bytes.deref(), mem::align_of::<T>()) {
return None;
}
let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
Some((LayoutVerified(bytes, PhantomData), suffix))
}
/// Construct a new `LayoutVerified` from the suffix of a byte slice.
///
/// `new_from_suffix` verifies that `bytes.len() >= size_of::<T>()` and that
/// the last `size_of::<T>()` bytes of `bytes` are aligned to
/// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
/// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
/// to the caller. If either the length or alignment checks fail, it returns
/// `None`.
#[inline]
pub fn new_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
let bytes_len = bytes.len();
if bytes_len < mem::size_of::<T>() {
return None;
}
let (prefix, bytes) = bytes.split_at(bytes_len - mem::size_of::<T>());
if !aligned_to(bytes.deref(), mem::align_of::<T>()) {
return None;
}
Some((prefix, LayoutVerified(bytes, PhantomData)))
}
#[inline]
pub fn bytes(&self) -> &[u8] {
&self.0
}
}
fn map_zeroed<B: ByteSliceMut, T>(
opt: Option<LayoutVerified<B, T>>,
) -> Option<LayoutVerified<B, T>> {
match opt {
Some(mut lv) => {
for b in lv.0.iter_mut() {
*b = 0;
}
Some(lv)
}
None => None,
}
}
fn map_prefix_tuple_zeroed<B: ByteSliceMut, T>(
opt: Option<(LayoutVerified<B, T>, B)>,
) -> Option<(LayoutVerified<B, T>, B)> {
match opt {
Some((mut lv, rest)) => {
for b in lv.0.iter_mut() {
*b = 0;
}
Some((lv, rest))
}
None => None,
}
}
fn map_suffix_tuple_zeroed<B: ByteSliceMut, T>(
opt: Option<(B, LayoutVerified<B, T>)>,
) -> Option<(B, LayoutVerified<B, T>)> {
map_prefix_tuple_zeroed(opt.map(|(a, b)| (b, a))).map(|(a, b)| (b, a))
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSliceMut,
{
/// Construct a new `LayoutVerified` after zeroing the bytes.
///
/// `new_zeroed` verifies that `bytes.len() == size_of::<T>()` and that
/// `bytes` is aligned to `align_of::<T>()`, and constructs a new
/// `LayoutVerified`. If either of these checks fail, it returns `None`.
///
/// If the checks succeed, then `bytes` will be initialized to zero. This
/// can be useful when re-using buffers to ensure that sensitive data
/// previously stored in the buffer is not leaked.
#[inline]
pub fn new_zeroed(bytes: B) -> Option<LayoutVerified<B, T>> {
map_zeroed(Self::new(bytes))
}
/// Construct a new `LayoutVerified` from the prefix of a byte slice,
/// zeroing the prefix.
///
/// `new_from_prefix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
/// and that `bytes` is aligned to `align_of::<T>()`. It consumes the first
/// `size_of::<T>()` bytes from `bytes` to construct a `LayoutVerified`, and
/// returns the remaining bytes to the caller. If either the length or
/// alignment checks fail, it returns `None`.
///
/// If the checks succeed, then the prefix which is consumed will be
/// initialized to zero. This can be useful when re-using buffers to ensure
/// that sensitive data previously stored in the buffer is not leaked.
#[inline]
pub fn new_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
map_prefix_tuple_zeroed(Self::new_from_prefix(bytes))
}
/// Construct a new `LayoutVerified` from the suffix of a byte slice,
/// zeroing the suffix.
///
/// `new_from_suffix_zeroed` verifies that `bytes.len() >= size_of::<T>()` and that
/// the last `size_of::<T>()` bytes of `bytes` are aligned to
/// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
/// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
/// to the caller. If either the length or alignment checks fail, it returns
/// `None`.
///
/// If the checks succeed, then the suffix which is consumed will be
/// initialized to zero. This can be useful when re-using buffers to ensure
/// that sensitive data previously stored in the buffer is not leaked.
#[inline]
pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
map_suffix_tuple_zeroed(Self::new_from_suffix(bytes))
}
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSlice,
T: Unaligned,
{
/// Construct a new `LayoutVerified` for a type with no alignment
/// requirement.
///
/// `new_unaligned` verifies that `bytes.len() == size_of::<T>()` and
/// constructs a new `LayoutVerified`. If the check fails, it returns
/// `None`.
#[inline]
pub fn new_unaligned(bytes: B) -> Option<LayoutVerified<B, T>> {
if bytes.len() != mem::size_of::<T>() {
return None;
}
Some(LayoutVerified(bytes, PhantomData))
}
/// Construct a new `LayoutVerified` from the prefix of a byte slice for a
/// type with no alignment requirement.
///
/// `new_unaligned_from_prefix` verifies that `bytes.len() >=
/// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
/// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
/// to the caller. If the length check fails, it returns `None`.
#[inline]
pub fn new_unaligned_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
if bytes.len() < mem::size_of::<T>() {
return None;
}
let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
Some((LayoutVerified(bytes, PhantomData), suffix))
}
/// Construct a new `LayoutVerified` from the suffix of a byte slice for a
/// type with no alignment requirement.
///
/// `new_unaligned_from_suffix` verifies that `bytes.len() >=
/// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
/// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
/// to the caller. If the length check fails, it returns `None`.
#[inline]
pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
let bytes_len = bytes.len();
if bytes_len < mem::size_of::<T>() {
return None;
}
let (prefix, bytes) = bytes.split_at(bytes_len - mem::size_of::<T>());
Some((prefix, LayoutVerified(bytes, PhantomData)))
}
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSliceMut,
T: Unaligned,
{
/// Construct a new `LayoutVerified` for a type with no alignment
/// requirement, zeroing the bytes.
///
/// `new_unaligned_zeroed` verifies that `bytes.len() == size_of::<T>()` and
/// constructs a new `LayoutVerified`. If the check fails, it returns
/// `None`.
///
/// If the check succeeds, then `bytes` will be initialized to zero. This
/// can be useful when re-using buffers to ensure that sensitive data
/// previously stored in the buffer is not leaked.
#[inline]
pub fn new_unaligned_zeroed(bytes: B) -> Option<LayoutVerified<B, T>> {
map_zeroed(Self::new_unaligned(bytes))
}
/// Construct a new `LayoutVerified` from the prefix of a byte slice for a
/// type with no alignment requirement, zeroing the prefix.
///
/// `new_unaligned_from_prefix_zeroed` verifies that `bytes.len() >=
/// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
/// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
/// to the caller. If the length check fails, it returns `None`.
///
/// If the check succeeds, then the prefix which is consumed will be
/// initialized to zero. This can be useful when re-using buffers to ensure
/// that sensitive data previously stored in the buffer is not leaked.
#[inline]
pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes))
}
/// Construct a new `LayoutVerified` from the suffix of a byte slice for a
/// type with no alignment requirement, zeroing the suffix.
///
/// `new_unaligned_from_suffix_zeroed` verifies that `bytes.len() >=
/// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
/// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
/// to the caller. If the length check fails, it returns `None`.
///
/// If the check succeeds, then the suffix which is consumed will be
/// initialized to zero. This can be useful when re-using buffers to ensure
/// that sensitive data previously stored in the buffer is not leaked.
#[inline]
pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes))
}
}
fn aligned_to(bytes: &[u8], align: usize) -> bool {
(bytes as *const _ as *const () as usize) % align == 0
}
impl<B, T> LayoutVerified<B, T>
where
B: ByteSliceMut,
{
#[inline]
pub fn bytes_mut(&mut self) -> &mut [u8] {
&mut self.0
}
}
impl<B, T> Deref for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes,
{
type Target = T;
#[inline]
fn deref(&self) -> &T {
unsafe { &mut *(self.0.as_ptr() as *mut T) }
}
}
impl<B, T> DerefMut for LayoutVerified<B, T>
where
B: ByteSliceMut,
T: FromBytes + AsBytes,
{
#[inline]
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *(self.0.as_mut_ptr() as *mut T) }
}
}
impl<T, B> Display for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes + Display,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
let inner: &T = self;
inner.fmt(fmt)
}
}
impl<T, B> Debug for LayoutVerified<B, T>
where
B: ByteSlice,
T: FromBytes + Debug,
{
#[inline]
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
let inner: &T = self;
fmt.debug_tuple("LayoutVerified").field(&inner).finish()
}
}
mod sealed {
use core::cell::{Ref, RefMut};
pub trait Sealed {}
impl<'a> Sealed for &'a [u8] {}
impl<'a> Sealed for &'a mut [u8] {}
impl<'a> Sealed for Ref<'a, [u8]> {}
impl<'a> Sealed for RefMut<'a, [u8]> {}
}
// ByteSlice and ByteSliceMut abstract over [u8] references (&[u8], &mut [u8],
// Ref<[u8]>, RefMut<[u8]>, etc). We rely on various behaviors of these
// references such as that a given reference will never changes its length
// between calls to deref() or deref_mut(), and that split_at() works as
// expected. If ByteSlice or ByteSliceMut were not sealed, consumers could
// implement them in a way that violated these behaviors, and would break our
// unsafe code. Thus, we seal them and implement it only for known-good
// reference types. For the same reason, they're unsafe traits.
/// A mutable or immutable reference to a byte slice.
///
/// `ByteSlice` abstracts over the mutability of a byte slice reference, and is
/// implemented for various special reference types such as `Ref<[u8]>` and
/// `RefMut<[u8]>`.
pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized + self::sealed::Sealed {
fn as_ptr(&self) -> *const u8;
fn split_at(self, mid: usize) -> (Self, Self);
}
/// A mutable reference to a byte slice.
///
/// `ByteSliceMut` abstracts over various ways of storing a mutable reference to
/// a byte slice, and is implemented for various special reference types such as
/// `RefMut<[u8]>`.
pub unsafe trait ByteSliceMut: ByteSlice + DerefMut {
fn as_mut_ptr(&mut self) -> *mut u8;
}
unsafe impl<'a> ByteSlice for &'a [u8] {
fn as_ptr(&self) -> *const u8 {
<[u8]>::as_ptr(self)
}
fn split_at(self, mid: usize) -> (Self, Self) {
<[u8]>::split_at(self, mid)
}
}
unsafe impl<'a> ByteSlice for &'a mut [u8] {
fn as_ptr(&self) -> *const u8 {
<[u8]>::as_ptr(self)
}
fn split_at(self, mid: usize) -> (Self, Self) {
<[u8]>::split_at_mut(self, mid)
}
}
unsafe impl<'a> ByteSlice for Ref<'a, [u8]> {
fn as_ptr(&self) -> *const u8 {
<[u8]>::as_ptr(self)
}
fn split_at(self, mid: usize) -> (Self, Self) {
Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid))
}
}
unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> {
fn as_ptr(&self) -> *const u8 {
<[u8]>::as_ptr(self)
}
fn split_at(self, mid: usize) -> (Self, Self) {
RefMut::map_split(self, |slice| <[u8]>::split_at_mut(slice, mid))
}
}
unsafe impl<'a> ByteSliceMut for &'a mut [u8] {
fn as_mut_ptr(&mut self) -> *mut u8 {
<[u8]>::as_mut_ptr(self)
}
}
unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {
fn as_mut_ptr(&mut self) -> *mut u8 {
<[u8]>::as_mut_ptr(self)
}
}
#[cfg(test)]
mod tests {
use core::ops::Deref;
use core::ptr;
use super::LayoutVerified;
// B should be [u8; N]. T will require that the entire structure is aligned
// to the alignment of T.
#[derive(Default)]
struct AlignedBuffer<T, B> {
buf: B,
_t: T,
}
impl<T, B: Default> AlignedBuffer<T, B> {
fn clear_buf(&mut self) {
self.buf = B::default();
}
}
// convert a u64 to bytes using this platform's endianness
fn u64_to_bytes(u: u64) -> [u8; 8] {
unsafe { ptr::read(&u as *const u64 as *const [u8; 8]) }
}
#[test]
fn test_address() {
// test that the Deref and DerefMut implementations return a reference which
// points to the right region of memory
let buf = [0];
let lv = LayoutVerified::<_, u8>::new(&buf[..]).unwrap();
let buf_ptr = buf.as_ptr();
let deref_ptr = lv.deref() as *const u8;
assert_eq!(buf_ptr, deref_ptr);
}
// verify that values written to a LayoutVerified are properly shared
// between the typed and untyped representations
fn test_new_helper<'a>(mut lv: LayoutVerified<&'a mut [u8], u64>) {
// assert that the value starts at 0
assert_eq!(*lv, 0);
// assert that values written to the typed value are reflected in the
// byte slice
const VAL1: u64 = 0xFF00FF00FF00FF00;
*lv = VAL1;
assert_eq!(lv.bytes(), &u64_to_bytes(VAL1));
// assert that values written to the byte slice are reflected in the
// typed value
const VAL2: u64 = !VAL1; // different from VAL1
lv.bytes_mut().copy_from_slice(&u64_to_bytes(VAL2)[..]);
assert_eq!(*lv, VAL2);
}
// verify that values written to a LayoutVerified are properly shared
// between the typed and untyped representations
fn test_new_helper_unaligned<'a>(mut lv: LayoutVerified<&'a mut [u8], [u8; 8]>) {
// assert that the value starts at 0
assert_eq!(*lv, [0; 8]);
// assert that values written to the typed value are reflected in the
// byte slice
const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00];
*lv = VAL1;
assert_eq!(lv.bytes(), &VAL1);
// assert that values written to the byte slice are reflected in the
// typed value
const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1
lv.bytes_mut().copy_from_slice(&VAL2[..]);
assert_eq!(*lv, VAL2);
}
#[test]
fn test_new_aligned_sized() {
// Test that a properly-aligned, properly-sized buffer works for new,
// new_from_preifx, and new_from_suffix, and that new_from_prefix and
// new_from_suffix return empty slices. Test that xxx_zeroed behaves
// the same, and zeroes the memory.
// a buffer with an alignment of 8
let mut buf = AlignedBuffer::<u64, [u8; 8]>::default();
// buf.buf should be aligned to 8, so this should always succeed
test_new_helper(LayoutVerified::<_, u64>::new(&mut buf.buf[..]).unwrap());
buf.buf = [0xFFu8; 8];
test_new_helper(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[..]).unwrap());
{
// in a block so that lv and suffix don't live too long
buf.clear_buf();
let (lv, suffix) = LayoutVerified::<_, u64>::new_from_prefix(&mut buf.buf[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper(lv);
}
{
buf.buf = [0xFFu8; 8];
let (lv, suffix) =
LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper(lv);
}
{
buf.clear_buf();
let (prefix, lv) = LayoutVerified::<_, u64>::new_from_suffix(&mut buf.buf[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper(lv);
}
{
buf.buf = [0xFFu8; 8];
let (prefix, lv) =
LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper(lv);
}
}
#[test]
fn test_new_unaligned_sized() {
// Test that an unaligned, properly-sized buffer works for
// new_unaligned, new_unaligned_from_prefix, and
// new_unaligned_from_suffix, and that new_unaligned_from_prefix
// new_unaligned_from_suffix return empty slices. Test that xxx_zeroed
// behaves the same, and zeroes the memory.
let mut buf = [0u8; 8];
test_new_helper_unaligned(
LayoutVerified::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap(),
);
buf = [0xFFu8; 8];
test_new_helper_unaligned(
LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap(),
);
{
// in a block so that lv and suffix don't live too long
buf = [0u8; 8];
let (lv, suffix) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
assert!(suffix.is_empty());
test_new_helper_unaligned(lv);
}
{
buf = [0xFFu8; 8];
let (lv, suffix) = LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(
&mut buf[..],
).unwrap();
assert!(suffix.is_empty());
test_new_helper_unaligned(lv);
}
{
buf = [0u8; 8];
let (prefix, lv) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
assert!(prefix.is_empty());
test_new_helper_unaligned(lv);
}
{
buf = [0xFFu8; 8];
let (prefix, lv) = LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(
&mut buf[..],
).unwrap();
assert!(prefix.is_empty());
test_new_helper_unaligned(lv);
}
}
#[test]
fn test_new_oversized() {
// Test that a properly-aligned, overly-sized buffer works for
// new_from_prefix and new_from_suffix, and that they return the
// remainder and prefix of the slice respectively. Test that xxx_zeroed
// behaves the same, and zeroes the memory.
let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
{
// in a block so that lv and suffix don't live too long
// buf.buf should be aligned to 8, so this should always succeed
let (lv, suffix) = LayoutVerified::<_, u64>::new_from_prefix(&mut buf.buf[..]).unwrap();
assert_eq!(suffix.len(), 8);
test_new_helper(lv);
}
{
buf.buf = [0xFFu8; 16];
// buf.buf should be aligned to 8, so this should always succeed
let (lv, suffix) =
LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[..]).unwrap();
// assert that the suffix wasn't zeroed
assert_eq!(suffix, &[0xFFu8; 8]);
test_new_helper(lv);
}
{
buf.clear_buf();
// buf.buf should be aligned to 8, so this should always succeed
let (prefix, lv) = LayoutVerified::<_, u64>::new_from_suffix(&mut buf.buf[..]).unwrap();
assert_eq!(prefix.len(), 8);
test_new_helper(lv);
}
{
buf.buf = [0xFFu8; 16];
// buf.buf should be aligned to 8, so this should always succeed
let (prefix, lv) =
LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).unwrap();
// assert that the prefix wasn't zeroed
assert_eq!(prefix, &[0xFFu8; 8]);
test_new_helper(lv);
}
}
#[test]
fn test_new_unaligned_oversized() {
// Test than an unaligned, overly-sized buffer works for
// new_unaligned_from_prefix and new_unaligned_from_suffix, and that
// they return the remainder and prefix of the slice respectively. Test
// that xxx_zeroed behaves the same, and zeroes the memory.
let mut buf = [0u8; 16];
{
// in a block so that lv and suffix don't live too long
let (lv, suffix) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
assert_eq!(suffix.len(), 8);
test_new_helper_unaligned(lv);
}
{
buf = [0xFFu8; 16];
let (lv, suffix) = LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(
&mut buf[..],
).unwrap();
// assert that the suffix wasn't zeroed
assert_eq!(suffix, &[0xFF; 8]);
test_new_helper_unaligned(lv);
}
{
buf = [0u8; 16];
let (prefix, lv) =
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
assert_eq!(prefix.len(), 8);
test_new_helper_unaligned(lv);
}
{
buf = [0xFFu8; 16];
let (prefix, lv) = LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(
&mut buf[..],
).unwrap();
// assert that the prefix wasn't zeroed
assert_eq!(prefix, &[0xFF; 8]);
test_new_helper_unaligned(lv);
}
}
#[test]
fn test_new_fail() {
// fail because the buffer is too large
// a buffer with an alignment of 8
let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
// buf.buf should be aligned to 8, so only the length check should fail
assert!(LayoutVerified::<_, u64>::new(&buf.buf[..]).is_none());
assert!(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned(&buf.buf[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.buf[..]).is_none());
// fail because the buffer is too small
// a buffer with an alignment of 8
let mut buf = AlignedBuffer::<u64, [u8; 4]>::default();
// buf.buf should be aligned to 8, so only the length check should fail
assert!(LayoutVerified::<_, u64>::new(&buf.buf[..]).is_none());
assert!(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned(&buf.buf[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.buf[..]).is_none());
assert!(LayoutVerified::<_, u64>::new_from_prefix(&buf.buf[..]).is_none());
assert!(LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[..]).is_none());
assert!(LayoutVerified::<_, u64>::new_from_suffix(&buf.buf[..]).is_none());
assert!(LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).is_none());
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.buf[..]).is_none());
assert!(
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.buf[..])
.is_none()
);
assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.buf[..]).is_none());
assert!(
LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.buf[..])
.is_none()
);
// fail because the alignment is insufficient
// a buffer with an alignment of 8
let mut buf = AlignedBuffer::<u64, [u8; 12]>::default();
// slicing from 4, we get a buffer with size 8 (so the length check
// should succeed) but an alignment of only 4, which is insufficient
assert!(LayoutVerified::<_, u64>::new(&buf.buf[4..]).is_none());
assert!(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[4..]).is_none());
assert!(LayoutVerified::<_, u64>::new_from_prefix(&buf.buf[4..]).is_none());
assert!(LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[4..]).is_none());
// slicing from 4 should be unnecessary because new_from_suffix[_zeroed]
// use the suffix of the slice
assert!(LayoutVerified::<_, u64>::new_from_suffix(&buf.buf[..]).is_none());
assert!(LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).is_none());
}
#[test]
fn test_display_debug() {
let buf = AlignedBuffer::<u64, [u8; 8]>::default();
let lv = LayoutVerified::<_, u64>::new(&buf.buf[..]).unwrap();
assert_eq!(format!("{}", lv), "0");
assert_eq!(format!("{:?}", lv), "LayoutVerified(0)");
}
}