[recovery-netstack] WIP: Initial commit
Change-Id: Iaa96cd3dc2f051cbf4240cdfebc572b3ec4fc68a
diff --git a/bin/recovery_netstack/BUILD.gn b/bin/recovery_netstack/BUILD.gn
new file mode 100644
index 0000000..f0cfc98
--- /dev/null
+++ b/bin/recovery_netstack/BUILD.gn
@@ -0,0 +1,28 @@
+# Copyright 2018 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/rust/rustc_binary.gni")
+import("//build/package.gni")
+
+rustc_binary("bin") {
+ name = "recovery_netstack"
+ with_unit_tests = true
+
+ deps = [
+ "//third_party/rust-crates/rustc_deps:byteorder"
+ ]
+}
+
+package("recovery_netstack") {
+ deps = [
+ ":bin",
+ ]
+
+ binary = "rust_crates/recovery_netstack"
+
+ meta = [{
+ path = rebase_path("//garnet/bin/appmgr/legacy_flat_exported_dir")
+ dest = "legacy_flat_exported_dir"
+ }]
+}
diff --git a/bin/recovery_netstack/MAINTAINERS b/bin/recovery_netstack/MAINTAINERS
new file mode 100644
index 0000000..d66f727
--- /dev/null
+++ b/bin/recovery_netstack/MAINTAINERS
@@ -0,0 +1 @@
+joshlf@google.com
diff --git a/bin/recovery_netstack/ip-macro/BUILD.gn b/bin/recovery_netstack/ip-macro/BUILD.gn
new file mode 100644
index 0000000..5b72157
--- /dev/null
+++ b/bin/recovery_netstack/ip-macro/BUILD.gn
@@ -0,0 +1,14 @@
+# Copyright 2018 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/rust/rustc_library.gni")
+
+rustc_library("ip-macro") {
+ name = "ip-macro"
+ version = "0.1.0"
+
+ deps = [
+ "//third_party/rust-crates/rustc_deps:quote"
+ ]
+}
diff --git a/bin/recovery_netstack/ip-macro/src/lib.rs b/bin/recovery_netstack/ip-macro/src/lib.rs
new file mode 100644
index 0000000..28b2e7e
--- /dev/null
+++ b/bin/recovery_netstack/ip-macro/src/lib.rs
@@ -0,0 +1,78 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#![feature(proc_macro)]
+
+///! The `ip!` macro for parsing IP addresses at compile time.
+///!
+///! This crate provides the `ip!` macro, which is capable of parsing
+///! IP addreses and CIDR notation for both IPv4 and IPv6.
+///!
+///! # Examples
+///!
+///! ```rust,ignore
+///! let a = ip!(1.2.3.4);
+///! let b = ip!(ffff::);
+///! let c = ip!(1.2.3.0/24);
+///! let d = ip!(ffff::/16);
+///! ```
+
+extern crate proc_macro;
+#[macro_use]
+extern crate quote;
+
+use std::net::IpAddr;
+
+use proc_macro::TokenStream;
+
+#[proc_macro]
+pub fn ip(input: TokenStream) -> TokenStream {
+ // format and remove all spaces, or else IP parsing will fail
+ let s = format!("{}", input).replace(" ", "");
+ match ip_helper(&s) {
+ Ok(stream) => stream,
+ Err(_) => panic!("invalid IP address or subnet: {}", s),
+ }
+}
+
+fn ip_helper(s: &str) -> Result<TokenStream, ()> {
+ Ok(match s.parse() {
+ Ok(IpAddr::V4(v4)) => {
+ let octets = v4.octets();
+ quote!(::ip::Ipv4Addr::new([#(#octets),*])).into()
+ }
+ Ok(IpAddr::V6(v6)) => {
+ let octets = v6.octets();
+ quote!(::ip::Ipv6Addr::new([#(#octets),*])).into()
+ }
+ Err(_) => {
+ // try to parse as a subnet before returning error
+ if !s.contains('/') {
+ return Err(())
+ }
+ let parts: Vec<&str> = s.split('/').collect();
+ if parts.len() != 2 {
+ return Err(())
+ }
+ let ip: IpAddr = parts[0].parse().map_err(|_| ())?;
+ let prefix: u8 = parts[1].parse().map_err(|_| ())?;
+ match ip {
+ IpAddr::V4(v4) => {
+ if prefix > 32 {
+ return Err(())
+ }
+ let octets = v4.octets();
+ quote!(::ip::Subnet::new(::ip::Ipv4Addr::new([#(#octets),*]), #prefix)).into()
+ }
+ IpAddr::V6(v6) => {
+ if prefix > 128 {
+ return Err(())
+ }
+ let octets = v6.octets();
+ quote!(::ip::Subnet::new(::ip::Ipv6Addr::new([#(#octets),*]), #prefix)).into()
+ }
+ }
+ }
+ })
+}
\ No newline at end of file
diff --git a/bin/recovery_netstack/src/device/ethernet.rs b/bin/recovery_netstack/src/device/ethernet.rs
new file mode 100644
index 0000000..abc25b5
--- /dev/null
+++ b/bin/recovery_netstack/src/device/ethernet.rs
@@ -0,0 +1,52 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/// The broadcast MAC address.
+///
+/// The broadcast MAC address, FF:FF:FF:FF:FF:FF, indicates that a frame should
+/// be received by all receivers regardless of their local MAC address.
+pub const BROADCAST_MAC: MAC = MAC([0xFF; 6]);
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub struct MAC([u8; 6]);
+
+impl MAC {
+ /// Construct a new MAC address.
+ pub const fn new(bytes: [u8; 6]) -> MAC {
+ MAC(bytes)
+ }
+
+ /// Is this a unicast MAC address?
+ ///
+ /// Returns true if the least significant bit of the first byte of the
+ /// address is 0.
+ pub fn is_unicast(&self) -> bool {
+ // https://en.wikipedia.org/wiki/MAC_address#Unicast_vs._multicast
+ self.0[0] & 1 == 0
+ }
+
+ /// Is this a multicast MAC address?
+ ///
+ /// Returns true if the least significant bit of the first byte of the
+ /// address is 1.
+ pub fn is_multicast(&self) -> bool {
+ // https://en.wikipedia.org/wiki/MAC_address#Unicast_vs._multicast
+ self.0[0] & 1 == 1
+ }
+
+ /// Is this the broadcast MAC address?
+ ///
+ /// Returns true if this is the broadcast MAC address, FF:FF:FF:FF:FF:FF.
+ pub fn is_broadcast(&self) -> bool {
+ // https://en.wikipedia.org/wiki/MAC_address#Unicast_vs._multicast
+ *self == BROADCAST_MAC
+ }
+}
+
+#[repr(u16)]
+pub enum EtherType {
+ Ipv4 = 0x0800,
+ Arp = 0x0806,
+ Ipv6 = 0x86DD,
+}
diff --git a/bin/recovery_netstack/src/device/mod.rs b/bin/recovery_netstack/src/device/mod.rs
new file mode 100644
index 0000000..6451715
--- /dev/null
+++ b/bin/recovery_netstack/src/device/mod.rs
@@ -0,0 +1,9 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+pub mod ethernet;
+
+// TODO(joshlf)
+#[derive(Copy, Clone)]
+pub struct DeviceAddr;
diff --git a/bin/recovery_netstack/src/ip/address.rs b/bin/recovery_netstack/src/ip/address.rs
new file mode 100644
index 0000000..1d26db2
--- /dev/null
+++ b/bin/recovery_netstack/src/ip/address.rs
@@ -0,0 +1,322 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::fmt::{Debug, Display};
+
+// use ip_macro::ip;
+
+use byteorder::{BigEndian, ByteOrder};
+
+/// An IP protocol version.
+#[derive(Copy, Clone, Eq, PartialEq)]
+pub enum IpVersion {
+ V4,
+ V6,
+}
+
+impl IpVersion {
+ pub fn version_number(&self) -> u8 {
+ match self {
+ IpVersion::V4 => 4,
+ IpVersion::V6 => 6,
+ }
+ }
+}
+
+// Ensure that only Ipv4 and Ipv6 can implement IpVersion and that only Ipv4Addr
+// and Ipv6Addr can implement IpAddr.
+trait Sealed {}
+
+/// An trait for IP protocol versions.
+///
+/// `Ip` encapsulates the details of a version of the IP protocol. It includes
+/// the `IpVersion` enum (`VERSION`) and address type (`Addr`). It is
+/// implemented by `Ipv4` and `Ipv6`.
+#[allow(private_in_public)]
+pub trait Ip: Sealed {
+ /// The IP version.
+ ///
+ /// `V4` for IPv4 and `V6` for IPv6.
+ const VERSION: IpVersion;
+
+ /// The default loopback address.
+ ///
+ /// When sending packets to a loopback interface, this address is used as
+ /// the source address. It is an address in the loopback subnet.
+ const LOOPBACK_ADDRESS: Self::Addr;
+
+ /// The subnet of loopback addresses.
+ ///
+ /// Addresses in this subnet must not appear outside a host, and may only be
+ /// used for loopback interfaces.
+ const LOOPBACK_SUBNET: Subnet<Self::Addr>;
+
+ /// The address type for this IP version.
+ ///
+ /// `Ipv4Addr` for IPv4 and `Ipv6Addr` for IPv6.
+ type Addr: IpAddr;
+}
+
+/// IPv4.
+///
+/// `Ipv4` implements `Ip` for IPv4.
+pub struct Ipv4;
+
+impl Ip for Ipv4 {
+ const VERSION: IpVersion = IpVersion::V4;
+ // https://tools.ietf.org/html/rfc5735#section-3
+ // const LOOPBACK_ADDRESS: Ipv4Addr = ip!(127.0.0.1);
+ const LOOPBACK_ADDRESS: Ipv4Addr = Ipv4Addr::new([127, 0, 0, 1]);
+ const LOOPBACK_SUBNET: Subnet<Ipv4Addr> = Subnet {
+ network: Ipv4Addr::new([127, 0, 0, 0]),
+ prefix: 8,
+ };
+ type Addr = Ipv4Addr;
+}
+
+impl Sealed for Ipv4 {}
+
+/// IPv6.
+///
+/// `Ipv6` implements `Ip` for IPv6.
+pub struct Ipv6;
+
+impl Ip for Ipv6 {
+ const VERSION: IpVersion = IpVersion::V6;
+ // const LOOPBACK_ADDRESS: Ipv6Addr = ip!(::1);
+ const LOOPBACK_ADDRESS: Ipv6Addr = Ipv6Addr::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]);
+ const LOOPBACK_SUBNET: Subnet<Ipv6Addr> = Subnet {
+ network: Ipv6Addr::new([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]),
+ prefix: 128,
+ };
+ type Addr = Ipv6Addr;
+}
+
+impl Sealed for Ipv6 {}
+
+/// An IPv4 or IPv6 address.
+#[allow(private_in_public)]
+pub trait IpAddr
+where
+ Self: Eq + Copy + Display + Sealed,
+{
+ /// The number of bytes in an address of this type.
+ ///
+ /// 4 for IPv4 and 16 for IPv6.
+ const BYTES: u8;
+
+ /// The IP version type of this address.
+ ///
+ /// `Ipv4` for `Ipv4Addr` and `Ipv6` for `Ipv6Addr`.
+ type Version: Ip;
+
+ /// Get the underlying bytes of the address.
+ fn bytes(&self) -> &[u8];
+
+ /// Mask off the top bits of the address.
+ ///
+ /// Return a copy of `self` where all but the top `bits` bits are set to 0.
+ fn mask(&self, bits: u8) -> Self;
+}
+
+/// An IPv4 address.
+#[derive(Copy, Clone, Default, PartialEq, Eq)]
+pub struct Ipv4Addr([u8; 4]);
+
+impl Ipv4Addr {
+ /// Create a new IPv4 address.
+ pub const fn new(bytes: [u8; 4]) -> Self {
+ Ipv4Addr(bytes)
+ }
+
+ pub const fn ipv4_bytes(&self) -> [u8; 4] {
+ self.0
+ }
+}
+
+impl IpAddr for Ipv4Addr {
+ const BYTES: u8 = 4;
+
+ type Version = Ipv4;
+
+ fn mask(&self, bits: u8) -> Self {
+ assert!(bits <= 32);
+ if bits == 32 {
+ // shifting left by the size of the value is undefined
+ Ipv4Addr([0; 4])
+ } else {
+ let mask = <u32>::max_value() << (32 - bits);
+ let masked = BigEndian::read_u32(&self.0) & mask;
+ let mut ret = Ipv4Addr::default();
+ BigEndian::write_u32(&mut ret.0, masked);
+ ret
+ }
+ }
+
+ fn bytes(&self) -> &[u8] {
+ &self.0
+ }
+}
+
+impl Sealed for Ipv4Addr {}
+
+impl Display for Ipv4Addr {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
+ write!(f, "{}.{}.{}.{}", self.0[0], self.0[1], self.0[2], self.0[3])
+ }
+}
+
+impl Debug for Ipv4Addr {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
+ Display::fmt(self, f)
+ }
+}
+
+/// An IPv6 address.
+#[derive(Copy, Clone, Default, PartialEq, Eq)]
+pub struct Ipv6Addr([u8; 16]);
+
+impl Ipv6Addr {
+ /// Create a new IPv6 address.
+ pub const fn new(bytes: [u8; 16]) -> Self {
+ Ipv6Addr(bytes)
+ }
+
+ pub const fn ipv6_bytes(&self) -> [u8; 16] {
+ self.0
+ }
+}
+
+impl IpAddr for Ipv6Addr {
+ const BYTES: u8 = 16;
+
+ type Version = Ipv6;
+
+ fn mask(&self, bits: u8) -> Self {
+ assert!(bits <= 128);
+ if bits == 128 {
+ // shifting left by the size of the value is undefined
+ Ipv6Addr([0; 16])
+ } else {
+ let mask = <u128>::max_value() << (128 - bits);
+ let masked = BigEndian::read_u128(&self.0) & mask;
+ let mut ret = Ipv6Addr::default();
+ BigEndian::write_u128(&mut ret.0, masked);
+ ret
+ }
+ }
+
+ fn bytes(&self) -> &[u8] {
+ &self.0
+ }
+}
+
+impl Sealed for Ipv6Addr {}
+
+impl Display for Ipv6Addr {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
+ // TODO(joshlf): Replace longest run of zeros with ::.
+ let to_u16 = |idx| BigEndian::read_u16(&self.0[idx..idx + 2]);
+ write!(
+ f,
+ "{:04x}:{:04x}:{:04x}:{:04x}:{:04x}:{:04x}:{:04x}:{:04x}",
+ to_u16(0),
+ to_u16(2),
+ to_u16(4),
+ to_u16(6),
+ to_u16(8),
+ to_u16(10),
+ to_u16(12),
+ to_u16(14)
+ )
+ }
+}
+
+impl Debug for Ipv6Addr {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
+ Display::fmt(self, f)
+ }
+}
+
+/// An IP subnet.
+///
+/// `Subnet` is a combination of an IP network address and a prefix length.
+#[derive(Copy, Clone)]
+pub struct Subnet<A: IpAddr> {
+ // invariant: normalized to contain only prefix bits
+ network: A,
+ prefix: u8,
+}
+
+impl<A: IpAddr> Subnet<A> {
+ /// Create a new subnet.
+ ///
+ /// Create a new subnet with the given network address and prefix length.
+ ///
+ /// # Panics
+ ///
+ /// `new` panics if `prefix` is longer than the number of bits in this type
+ /// of IP address (32 for IPv4 and 128 for IPv6).
+ pub fn new(network: A, prefix: u8) -> Subnet<A> {
+ assert!(prefix <= A::BYTES * 8);
+ let network = network.mask(prefix);
+ Subnet { network, prefix }
+ }
+
+ /// Get the network address component of this subnet.
+ ///
+ /// `network` returns the network address component of this subnet. Any bits
+ /// beyond the prefix will be zero.
+ pub fn network(&self) -> A {
+ self.network
+ }
+
+ /// Get the prefix length component of this subnet.
+ pub fn prefix(&self) -> u8 {
+ self.prefix
+ }
+
+ /// Test whether an address is in this subnet.
+ ///
+ /// Test whether `address` is in this subnet by testing whether the prefix
+ /// bits match the prefix bits of the subnet's network address. This is
+ /// equivalent to `subnet.network() == address.mask(subnet.prefix())`.
+ pub fn contains(&self, address: A) -> bool {
+ self.network == address.mask(self.prefix)
+ }
+}
+
+impl<A: IpAddr> Display for Subnet<A> {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
+ write!(f, "{}/{}", self.network, self.prefix)
+ }
+}
+
+impl<A: IpAddr> Debug for Subnet<A> {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
+ write!(f, "{}/{}", self.network, self.prefix)
+ }
+}
+
+// #[cfg(test)]
+// mod tests {
+// use ip_macro::ip;
+
+// #[test]
+// fn test_subnet_contains() {
+// let subnet = ip!(255.0.255.255/24);
+// assert_eq!(subnet.network(), ip!(255.0.255.0));
+// assert!(subnet.contains(ip!(255.0.255.255)));
+// assert!(subnet.contains(ip!(255.0.255.0)));
+// assert!(!subnet.contains(ip!(0.0.255.255)));
+// assert!(!subnet.contains(ip!(255.255.255.255)));
+
+// let subnet = ip!(ffff:0000:ffff::/48);
+// assert_eq!(subnet.network(), ip!(ffff:0000:ffff::));
+// assert!(subnet.contains(ip!(ffff:0:ffff:ffff::)));
+// assert!(subnet.contains(ip!(ffff:0:ffff::)));
+// assert!(!subnet.contains(ip!(0:0:ffff:ffff::)));
+// assert!(!subnet.contains(ip!(ffff:ffff:ffff:ffff::)));
+// }
+// }
diff --git a/bin/recovery_netstack/src/ip/forwarding.rs b/bin/recovery_netstack/src/ip/forwarding.rs
new file mode 100644
index 0000000..fe4f917
--- /dev/null
+++ b/bin/recovery_netstack/src/ip/forwarding.rs
@@ -0,0 +1,121 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use device::DeviceAddr;
+use ip::*;
+
+// TODO(joshlf):
+// - Implement route deletion.
+// - How do we detect circular routes? Do we attempt to detect at rule
+// installation time? At runtime? Using what algorithm?
+
+// NOTE on loopback addresses: Loopback addresses should be handled before
+// reaching the forwarding table. For that reason, we do not prevent a rule
+// whose subet is a subset of the loopback subnet from being installed; they
+// will never get triggered anyway, so implementing the logic of detecting these
+// rules is a needless complexity.
+
+/// The destination of an outbound IP packet.
+///
+/// Outbound IP packets are sent to a particular device (specified by the
+/// `device` field). They are sent to a particular IP host on the local network
+/// attached to that device, identified by `next_hop`. Note that `next_hop` is
+/// not necessarily the destination IP address of the IP packet. In particular,
+/// if the destination is not on the local network, the `next_hop` will be the
+/// IP address of the next IP router on the way to the destination.
+pub struct Destination<I: Ip> {
+ pub next_hop: I::Addr,
+ pub device: DeviceAddr,
+}
+
+#[derive(Copy, Clone)]
+struct Entry<I: Ip> {
+ subnet: Subnet<I::Addr>,
+ dest: EntryDest<I::Addr>,
+}
+
+#[derive(Copy, Clone)]
+enum EntryDest<A> {
+ Local { device: DeviceAddr },
+ Remote { next_hop: A },
+}
+
+/// An IP forwarding table.
+///
+/// `ForwardingTable` maps destination subnets to the nearest IP hosts (on the
+/// local network) able to route IP packets to those subnets.
+#[derive(Default)]
+pub struct ForwardingTable<I: Ip> {
+ entries: Vec<Entry<I>>,
+}
+
+impl<I: Ip> ForwardingTable<I> {
+ pub fn add_route(&mut self, subnet: Subnet<I::Addr>, next_hop: I::Addr) {
+ self.entries.push(Entry {
+ subnet,
+ dest: EntryDest::Remote { next_hop },
+ });
+ }
+
+ pub fn add_device_route(&mut self, subnet: Subnet<I::Addr>, device: DeviceAddr) {
+ self.entries.push(Entry {
+ subnet,
+ dest: EntryDest::Local { device },
+ });
+ }
+
+ /// Look up an address in the table.
+ ///
+ /// Look up an IP address in the table, returning a next hop IP address and
+ /// a device to send over. If `address` is link-local, then the returned
+ /// next hop will be `address`. Otherwise, it will be the link-local address
+ /// of an IP router capable of delivering packets to `address`.
+ ///
+ /// If `address` matches an entry which maps to an IP address, `lookup` will
+ /// look that address up in the table as well, continuing until a link-local
+ /// address and device are found.
+ ///
+ /// If multiple entries match `address` or any intermediate IP address, the
+ /// entry with the longest prefix will be chosen.
+ ///
+ /// # Panics
+ ///
+ /// `lookup` asserts that `address` is not in the loopback interface.
+ /// Traffic destined for loopback addresses from local applications should
+ /// be properly routed without consulting the forwarding table, and traffic
+ /// from the network with a loopback destination address is invalid and
+ /// should be dropped before consulting the forwarding table.
+ pub fn lookup(&self, address: I::Addr) -> Option<Destination<I>> {
+ assert!(
+ !I::LOOPBACK_SUBNET.contains(address),
+ "loopback addresses should be handled before consulting the forwarding table"
+ );
+
+ let best_match = self.entries
+ .iter()
+ .filter_map(|e| {
+ if e.subnet.contains(address) {
+ Some(e)
+ } else {
+ None
+ }
+ })
+ .max_by_key(|e| e.subnet.prefix());
+
+ match best_match {
+ Some(Entry {
+ dest: EntryDest::Local { device },
+ ..
+ }) => Some(Destination {
+ next_hop: address,
+ device: *device,
+ }),
+ Some(Entry {
+ dest: EntryDest::Remote { next_hop },
+ ..
+ }) => self.lookup(*next_hop),
+ None => None,
+ }
+ }
+}
diff --git a/bin/recovery_netstack/src/ip/mod.rs b/bin/recovery_netstack/src/ip/mod.rs
new file mode 100644
index 0000000..d89e3a4
--- /dev/null
+++ b/bin/recovery_netstack/src/ip/mod.rs
@@ -0,0 +1,30 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+mod address;
+mod forwarding;
+
+pub use self::address::*;
+
+/// An IP protocol or next header number.
+///
+/// For IPv4, this is the protocol number. For IPv6, this is the next header
+/// number.
+#[repr(u8)]
+pub enum IpProto {
+ Tcp = 6,
+ Udp = 17,
+}
+
+pub struct Ipv4Option {
+ pub copied: bool,
+ // TODO: include "Option Class"?
+ pub inner: Ipv4OptionInner,
+}
+
+pub enum Ipv4OptionInner {
+ // According to https://myweb.ntut.edu.tw/~kwke/DC2006/ipo.pdf, maximum IPv4
+ // option length is 40 bytes, leaving 38 bytes for data.
+ Unrecognized { kind: u8, len: u8, data: [u8; 38] },
+}
diff --git a/bin/recovery_netstack/src/main.rs b/bin/recovery_netstack/src/main.rs
new file mode 100644
index 0000000..1b0fdaf
--- /dev/null
+++ b/bin/recovery_netstack/src/main.rs
@@ -0,0 +1,20 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#![feature(const_fn)]
+#![feature(nonzero)]
+#![feature(proc_macro, proc_macro_non_items)]
+#![feature(repr_transparent)]
+
+extern crate byteorder;
+// extern crate ip_macro;
+extern crate zerocopy;
+
+mod device;
+mod ip;
+mod queue;
+mod transport;
+mod wire;
+
+fn main() {}
diff --git a/bin/recovery_netstack/src/queue.rs b/bin/recovery_netstack/src/queue.rs
new file mode 100644
index 0000000..2c20880
--- /dev/null
+++ b/bin/recovery_netstack/src/queue.rs
@@ -0,0 +1,33 @@
+use std::collections::VecDeque;
+
+pub trait Worker: Sized {
+ type Event;
+
+ fn handle_event<P: FnMut(Self::Event)>(&mut self, push: P, event: Self::Event);
+}
+
+pub struct WorkQueue<W: Worker> {
+ worker: W,
+ queue: VecDeque<W::Event>,
+}
+
+impl<W: Worker> WorkQueue<W> {
+ pub fn new(worker: W) -> WorkQueue<W> {
+ WorkQueue {
+ worker,
+ queue: VecDeque::new(),
+ }
+ }
+
+ pub fn push_event(&mut self, event: W::Event) {
+ self.queue.push_back(event);
+ }
+
+ pub fn run(&mut self) {
+ while let Some(event) = self.queue.pop_front() {
+ let queue = &mut self.queue;
+ let push = |event| queue.push_back(event);
+ self.worker.handle_event(push, event);
+ }
+ }
+}
diff --git a/bin/recovery_netstack/src/transport/mod.rs b/bin/recovery_netstack/src/transport/mod.rs
new file mode 100644
index 0000000..fcb722b
--- /dev/null
+++ b/bin/recovery_netstack/src/transport/mod.rs
@@ -0,0 +1 @@
+pub mod tcp;
diff --git a/bin/recovery_netstack/src/transport/tcp.rs b/bin/recovery_netstack/src/transport/tcp.rs
new file mode 100644
index 0000000..b03ebdd
--- /dev/null
+++ b/bin/recovery_netstack/src/transport/tcp.rs
@@ -0,0 +1,19 @@
+pub enum TcpOption {
+ Mss(u16),
+ WindowScale(u8),
+ SackPermitted,
+ Sack {
+ blocks: [TcpSackBlock; 4],
+ num_blocks: u8,
+ },
+ Timestamp {
+ ts_val: u32,
+ ts_echo_reply: u32,
+ },
+}
+
+#[derive(Copy, Clone, Default)]
+pub struct TcpSackBlock {
+ pub left_edge: u32,
+ pub right_edge: u32,
+}
diff --git a/bin/recovery_netstack/src/wire/ethernet.rs b/bin/recovery_netstack/src/wire/ethernet.rs
new file mode 100644
index 0000000..9045140
--- /dev/null
+++ b/bin/recovery_netstack/src/wire/ethernet.rs
@@ -0,0 +1,122 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved. Use of this source
+// code is governed by a BSD-style license that can be found in the LICENSE
+// file.
+
+use byteorder::{BigEndian, ByteOrder};
+use zerocopy::{AsBytes, ByteSlice, FromBytes, LayoutVerified};
+
+use device::ethernet::MAC;
+use wire::util::PacketFormat;
+
+// Header has the same memory layout (thanks to repr(C, packed)) as an Ethernet
+// header prefix. Thus, we can simply reinterpret the bytes of the Ethernet
+// header prefix as a HeaderPrefix and then safely access its fields. Note,
+// however, that it is *not* safe to have the types of any of the fields be
+// anything other than u8 or [u8; x] since network byte order (big endian) may
+// not be the same as the endianness of the computer we're running on, and since
+// repr(packed) is only safe with values with no alignment requirements.
+#[repr(C, packed)]
+struct HeaderPrefix {
+ dst_mac: [u8; 6],
+ src_mac: [u8; 6],
+}
+
+unsafe impl FromBytes for HeaderPrefix {}
+unsafe impl AsBytes for HeaderPrefix {}
+
+const TPID_8021Q: u16 = 0x8100;
+const TPID_8021AD: u16 = 0x88a8;
+
+enum Tag {
+ Tag8021Q(u16),
+ Tag8021ad(u16),
+ None,
+}
+
+/// An Ethernet frame.
+///
+/// An `EthernetFrame` shares its underlying memory with the byte slice it was
+/// parsed from or serialized to, meaning that no copying or extra allocation is
+/// necessary.
+pub struct EthernetFrame<B> {
+ hdr_prefix: LayoutVerified<B, HeaderPrefix>,
+ tag: Tag,
+ ethertype: u16,
+ body: B,
+}
+
+impl<B> PacketFormat for EthernetFrame<B> {
+ const MAX_HEADER_BYTES: usize = 18;
+ const MAX_FOOTER_BYTES: usize = 0;
+}
+
+impl<B: ByteSlice> EthernetFrame<B> {
+ /// Parse an Ethernet frame.
+ ///
+ /// `parse` parses `bytes` as an Ethernet frame.
+ pub fn parse(bytes: B) -> Result<EthernetFrame<B>, ()> {
+ // See for details: https://en.wikipedia.org/wiki/Ethernet_frame#Frame_%E2%80%93_data_link_layer
+
+ let (hdr_prefix, rest) =
+ LayoutVerified::<B, HeaderPrefix>::new_from_prefix(bytes).ok_or(())?;
+ if rest.len() < 46 {
+ // "The minimum payload is 42 octets when an 802.1Q tag is present
+ // and 46 octets when absent." - Wikipedia
+ //
+ // An 802.1Q tag is 4 bytes, and we haven't consumed it yet, so in
+ // either case, the minimum is 46.
+ return Err(());
+ }
+
+ // "The IEEE 802.1Q tag or IEEE 802.1ad tag, if present, is a four-octet
+ // field that indicates virtual LAN (VLAN) membership and IEEE 802.1p
+ // priority. The first two octets of the tag are called the Tag Protocol
+ // IDentifier and double as the EtherType field indicating that the
+ // frame is either 802.1Q or 802.1ad tagged. 802.1Q uses a TPID of
+ // 0x8100. 802.1ad uses a TPID of 0x88a8." - Wikipedia
+ let ethertype = BigEndian::read_u16(&rest);
+ // in case a tag is present; if not, these are the first two bytes of
+ // the payload, and we don't use this variable
+ let next_u16 = BigEndian::read_u16(&rest[2..]);
+ let (tag, ethertype, body) = match ethertype {
+ self::TPID_8021Q => {
+ let (ethertype, body) = rest.split_at(2);
+ (
+ Tag::Tag8021Q(next_u16),
+ BigEndian::read_u16(ðertype),
+ body,
+ )
+ }
+ self::TPID_8021AD => {
+ let (ethertype, body) = rest.split_at(2);
+ (
+ Tag::Tag8021ad(next_u16),
+ BigEndian::read_u16(ðertype),
+ body,
+ )
+ }
+ ethertype => (Tag::None, ethertype, rest),
+ };
+
+ Ok(EthernetFrame {
+ hdr_prefix,
+ tag,
+ ethertype,
+ body,
+ })
+ }
+}
+
+impl<B: ByteSlice> EthernetFrame<B> {
+ pub fn src_mac(&self) -> MAC {
+ MAC::new(self.hdr_prefix.src_mac)
+ }
+
+ pub fn dst_mac(&self) -> MAC {
+ MAC::new(self.hdr_prefix.dst_mac)
+ }
+
+ pub fn ethertype(&self) -> u16 {
+ self.ethertype
+ }
+}
diff --git a/bin/recovery_netstack/src/wire/ipv4.rs b/bin/recovery_netstack/src/wire/ipv4.rs
new file mode 100644
index 0000000..f7009ca
--- /dev/null
+++ b/bin/recovery_netstack/src/wire/ipv4.rs
@@ -0,0 +1,241 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use byteorder::{BigEndian, ByteOrder};
+use zerocopy::{AsBytes, ByteSlice, FromBytes, LayoutVerified};
+
+use ip::{Ipv4Addr, Ipv4Option};
+use wire::util::{Checksum, Options, PacketFormat};
+
+use self::options::Ipv4OptionImpl;
+
+const HEADER_PREFIX_SIZE: usize = 20;
+
+// HeaderPrefix has the same memory layout (thanks to repr(C, packed)) as an
+// IPv4 header. Thus, we can simply reinterpret the bytes of the IPv4 header as
+// a HeaderPrefix and then safely access its fields. Note, however, that it is
+// *not* safe to have the types of any of the fields be anything other than u8
+// or [u8; x] since network byte order (big endian) may not be the same as the
+// endianness of the computer we're running on, and since repr(packed) is only
+// safe with values with no alignment requirements.
+#[repr(C, packed)]
+struct HeaderPrefix {
+ version_ihl: u8,
+ dscp_ecn: u8,
+ total_len: [u8; 2],
+ id: [u8; 2],
+ flags_frag_off: [u8; 2],
+ ttl: u8,
+ proto: u8,
+ hdr_checksum: [u8; 2],
+ src_ip: [u8; 4],
+ dst_ip: [u8; 4],
+}
+
+unsafe impl FromBytes for HeaderPrefix {}
+unsafe impl AsBytes for HeaderPrefix {}
+
+impl HeaderPrefix {
+ fn version(&self) -> u8 {
+ self.version_ihl >> 4
+ }
+
+ fn ihl(&self) -> u8 {
+ self.version_ihl & 0xF
+ }
+}
+
+/// An IPv4 packet.
+///
+/// An `Ipv4Packet` shares its underlying memory with the byte slice it was
+/// parsed from or serialized to, meaning that no copying or extra allocation is
+/// necessary.
+pub struct Ipv4Packet<B> {
+ hdr_prefix: LayoutVerified<B, HeaderPrefix>,
+ options: Options<B, Ipv4OptionImpl>,
+ body: B,
+}
+
+impl<B> PacketFormat for Ipv4Packet<B> {
+ const MAX_HEADER_BYTES: usize = 60;
+ const MAX_FOOTER_BYTES: usize = 0;
+}
+
+impl<B: ByteSlice> Ipv4Packet<B> {
+ /// Parse an IPv4 packet.
+ ///
+ /// `parse` parses `bytes` as an IPv4 packet and validates the checksum.
+ #[cfg_attr(feature = "clippy", allow(needless_pass_by_value))]
+ pub fn parse(bytes: B) -> Result<Ipv4Packet<B>, ()> {
+ // See for details: https://en.wikipedia.org/wiki/IPv4#Header
+
+ let total_len = bytes.len();
+ let (hdr_prefix, rest) =
+ LayoutVerified::<B, HeaderPrefix>::new_from_prefix(bytes).ok_or(())?;
+ let hdr_bytes = (hdr_prefix.ihl() * 4) as usize;
+ if hdr_bytes > total_len {
+ return Err(());
+ }
+ let (options, body) = rest.split_at(hdr_bytes - HEADER_PREFIX_SIZE);
+ let options = Options::parse(options).map_err(|_| ())?;
+ let packet = Ipv4Packet {
+ hdr_prefix,
+ options,
+ body,
+ };
+ if packet.hdr_prefix.version() != 4 {
+ return Err(());
+ }
+ if packet.compute_header_checksum() != 0 {
+ return Err(());
+ }
+ if packet.total_length() as usize != total_len {
+ // we don't yet support IPv4 fragmentation
+ return Err(());
+ }
+
+ Ok(packet)
+ }
+
+ pub fn iter_options<'a>(&'a self) -> impl 'a + Iterator<Item = Ipv4Option> {
+ self.options.iter()
+ }
+}
+
+impl<B: ByteSlice> Ipv4Packet<B> {
+ fn compute_header_checksum(&self) -> u16 {
+ let mut c = Checksum::new();
+ c.add_bytes(self.hdr_prefix.bytes());
+ c.add_bytes(self.options.bytes());
+ c.sum()
+ }
+
+ pub fn body(&self) -> &[u8] {
+ &self.body
+ }
+
+ pub fn version(&self) -> u8 {
+ self.hdr_prefix.version()
+ }
+
+ pub fn ihl(&self) -> u8 {
+ self.hdr_prefix.ihl()
+ }
+
+ pub fn dscp(&self) -> u8 {
+ self.hdr_prefix.dscp_ecn >> 2
+ }
+
+ pub fn ecn(&self) -> u8 {
+ self.hdr_prefix.dscp_ecn & 3
+ }
+
+ pub fn total_length(&self) -> u16 {
+ BigEndian::read_u16(&self.hdr_prefix.total_len)
+ }
+
+ pub fn id(&self) -> u16 {
+ BigEndian::read_u16(&self.hdr_prefix.id)
+ }
+
+ pub fn flags(&self) -> u8 {
+ self.hdr_prefix.flags_frag_off[0] >> 5
+ }
+
+ pub fn fragment_offset(&self) -> u16 {
+ ((u16::from(self.hdr_prefix.flags_frag_off[0] & 0x1F)) << 8)
+ | u16::from(self.hdr_prefix.flags_frag_off[1])
+ }
+
+ pub fn ttl(&self) -> u8 {
+ self.hdr_prefix.ttl
+ }
+
+ pub fn proto(&self) -> u8 {
+ self.hdr_prefix.proto
+ }
+
+ pub fn hdr_checksum(&self) -> u16 {
+ BigEndian::read_u16(&self.hdr_prefix.hdr_checksum)
+ }
+
+ pub fn src_ip(&self) -> Ipv4Addr {
+ Ipv4Addr::new(self.hdr_prefix.src_ip)
+ }
+
+ pub fn dst_ip(&self) -> Ipv4Addr {
+ Ipv4Addr::new(self.hdr_prefix.dst_ip)
+ }
+}
+
+impl<'a> Ipv4Packet<&'a mut [u8]> {
+ pub fn set_id(&mut self, id: u16) {
+ BigEndian::write_u16(&mut self.hdr_prefix.id, id);
+ }
+
+ pub fn set_ttl(&mut self, ttl: u8) {
+ self.hdr_prefix.ttl = ttl;
+ }
+
+ pub fn set_proto(&mut self, proto: u8) {
+ self.hdr_prefix.proto = proto;
+ }
+
+ pub fn set_src_ip(&mut self, src_ip: Ipv4Addr) {
+ self.hdr_prefix.src_ip = src_ip.ipv4_bytes();
+ }
+
+ pub fn set_dst_ip(&mut self, dst_ip: Ipv4Addr) {
+ self.hdr_prefix.dst_ip = dst_ip.ipv4_bytes();
+ }
+
+ /// Compute and set the header checksum.
+ ///
+ /// Compute the header checksum from the current header state, and set it in
+ /// the header.
+ pub fn set_checksum(&mut self) {
+ self.hdr_prefix.hdr_checksum = [0, 0];
+ let c = self.compute_header_checksum();
+ BigEndian::write_u16(&mut self.hdr_prefix.hdr_checksum, c);
+ }
+}
+
+mod options {
+ use ip::{Ipv4Option, Ipv4OptionInner};
+ use wire::util::OptionImpl;
+
+ const OPTION_KIND_EOL: u8 = 0;
+ const OPTION_KIND_NOP: u8 = 1;
+
+ pub struct Ipv4OptionImpl;
+
+ impl OptionImpl for Ipv4OptionImpl {
+ type Output = Ipv4Option;
+ type Error = ();
+
+ fn parse(kind: u8, data: &[u8]) -> Result<Option<Ipv4Option>, ()> {
+ let copied = kind & (1 << 7) > 0;
+ match kind {
+ self::OPTION_KIND_EOL | self::OPTION_KIND_NOP => {
+ unreachable!("wire::util::Options promises to handle EOL and NOP")
+ }
+ kind => if data.len() > 38 {
+ Err(())
+ } else {
+ let len = data.len();
+ let mut d = [0u8; 38];
+ (&mut d[..len]).copy_from_slice(data);
+ Ok(Some(Ipv4Option {
+ copied,
+ inner: Ipv4OptionInner::Unrecognized {
+ kind,
+ len: len as u8,
+ data: d,
+ },
+ }))
+ },
+ }
+ }
+ }
+}
diff --git a/bin/recovery_netstack/src/wire/macros.rs b/bin/recovery_netstack/src/wire/macros.rs
new file mode 100644
index 0000000..c528bb1
--- /dev/null
+++ b/bin/recovery_netstack/src/wire/macros.rs
@@ -0,0 +1,33 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// pub use zerocopy_macro::zerocopy_inner;
+
+macro_rules! zerocopy {
+ (struct $name:ident {
+ $($field_name:ident: $field_type:ty,)*
+ }) => (
+ ::wire::macros::zerocopy_inner!($name|$($field_name: $field_type,)*);
+ );
+}
+
+#[cfg(test)]
+mod tests {
+ // zerocopy!(struct Foo {
+ // a: u8,
+ // b: u16,
+ // c: u3,
+ // d: u2,
+ // e: bool,
+ // f: bool,
+ // g: bool,
+ // });
+
+ // #[test]
+ // #[should_panic]
+ // fn test_panic() {
+ // let mut foo = Foo::default();
+ // foo.set_d(4);
+ // }
+}
diff --git a/bin/recovery_netstack/src/wire/mod.rs b/bin/recovery_netstack/src/wire/mod.rs
new file mode 100644
index 0000000..8d619cc
--- /dev/null
+++ b/bin/recovery_netstack/src/wire/mod.rs
@@ -0,0 +1,27 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//! Serialization and deserialization of wire formats.
+//!
+//! This module provides efficient serialization and deserialization of the
+//! various wire formats used by this program. Where possible, it uses lifetimes
+//! and immutability to allow for safe zero-copy parsing.
+
+// We use repr(packed) in this module to create structs whose layout matches the
+// layout of network packets on the wire. This ensures that the compiler will
+// stop us from using repr(packed) in an unsound manner without using unsafe
+// code.
+#![deny(safe_packed_borrows)]
+
+#[macro_use]
+mod macros;
+mod ethernet;
+mod ipv4;
+mod tcp;
+mod udp;
+mod util;
+
+pub use self::ipv4::*;
+pub use self::tcp::*;
+pub use self::udp::*;
diff --git a/bin/recovery_netstack/src/wire/tcp.rs b/bin/recovery_netstack/src/wire/tcp.rs
new file mode 100644
index 0000000..d88a300
--- /dev/null
+++ b/bin/recovery_netstack/src/wire/tcp.rs
@@ -0,0 +1,366 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::num::NonZeroU16;
+
+use byteorder::{BigEndian, ByteOrder};
+use zerocopy::{AsBytes, ByteSlice, FromBytes, LayoutVerified};
+
+use ip::{Ip, IpAddr, IpProto, IpVersion};
+use transport::tcp::TcpOption;
+use wire::util::{Checksum, Options, PacketFormat};
+
+use self::options::TcpOptionImpl;
+
+const HEADER_PREFIX_SIZE: usize = 20;
+
+// zerocopy!(struct HeaderPrefix {
+// src_port: u16,
+// dst_port: u16,
+// seq_num: u32,
+// ack_num: u32,
+// data_off: u4,
+// reserved: u3,
+// ns: bool,
+// cwr: bool,
+// ece: bool,
+// urg: bool,
+// ack: bool,
+// psh: bool,
+// rst: bool,
+// syn: bool,
+// fin: bool,
+// window_size: u16,
+// checksum: u16,
+// urg_ptr: u16,
+// });
+
+// HeaderPrefix has the same memory layout (thanks to repr(C, packed)) as a TCP
+// header. Thus, we can simply reinterpret the bytes of the TCP header as a
+// HeaderPrefix and then safely access its fields. Note, however, that it is
+// *not* safe to have the types of any of the fields be anything other than u8
+// or [u8; x] since network byte order (big endian) may not be the same as the
+// endianness of the computer we're running on, and since repr(packed) is only
+// safe with values with no alignment requirements.
+#[repr(C, packed)]
+struct HeaderPrefix {
+ src_port: [u8; 2],
+ dst_port: [u8; 2],
+ seq_num: [u8; 4],
+ ack: [u8; 4],
+ data_off_reserved_ns: u8,
+ flags: u8,
+ window_size: [u8; 2],
+ checksum: [u8; 2],
+ urg_ptr: [u8; 2],
+}
+
+unsafe impl FromBytes for HeaderPrefix {}
+unsafe impl AsBytes for HeaderPrefix {}
+
+impl HeaderPrefix {
+ pub fn src_port(&self) -> u16 {
+ BigEndian::read_u16(&self.src_port)
+ }
+
+ pub fn dst_port(&self) -> u16 {
+ BigEndian::read_u16(&self.dst_port)
+ }
+
+ fn data_off(&self) -> u8 {
+ self.data_off_reserved_ns >> 4
+ }
+}
+
+/// A TCP segment.
+///
+/// A `TcpSegment` shares its underlying memory with the byte slice it was
+/// parsed from or serialized to, meaning that no copying or extra allocation is
+/// necessary.
+pub struct TcpSegment<B> {
+ hdr_prefix: LayoutVerified<B, HeaderPrefix>,
+ options: Options<B, TcpOptionImpl>,
+ body: B,
+}
+
+impl<B> PacketFormat for TcpSegment<B> {
+ const MAX_HEADER_BYTES: usize = 60;
+ const MAX_FOOTER_BYTES: usize = 0;
+}
+
+impl<B: ByteSlice> TcpSegment<B> {
+ /// Parse a TCP segment.
+ ///
+ /// `parse` parses `bytes` as a TCP segment and validates the checksum.
+ #[cfg_attr(feature = "clippy", allow(needless_pass_by_value))]
+ pub fn parse<A: IpAddr>(bytes: B, src_ip: A, dst_ip: A) -> Result<TcpSegment<B>, ()> {
+ // See for details: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
+
+ let total_len = bytes.len();
+ let (hdr_prefix, rest) =
+ LayoutVerified::<B, HeaderPrefix>::new_from_prefix(bytes).ok_or(())?;
+ let hdr_bytes = (hdr_prefix.data_off() * 4) as usize;
+ if hdr_bytes > HEADER_PREFIX_SIZE + rest.len() {
+ return Err(());
+ }
+ let (options, body) = rest.split_at(hdr_bytes - HEADER_PREFIX_SIZE);
+ let options = Options::parse(options).map_err(|_| ())?;
+ let segment = TcpSegment {
+ hdr_prefix,
+ options,
+ body,
+ };
+
+ // For IPv4, the "TCP length" field in the pseudo-header used for
+ // calculating checksums is 16 bits. For IPv6, it's 32 bits. Verify that
+ // the length of the entire payload (including header) does not
+ // overflow. On 32-bit platforms for IPv6, we omit the check since '1 <<
+ // 32' would overflow usize.
+ if A::Version::VERSION == IpVersion::V4 && total_len >= 1 << 16
+ || (!cfg!(target_pointer_width = "32") && total_len >= 1 << 32)
+ {
+ return Err(());
+ }
+ if segment.compute_checksum(src_ip, dst_ip) != 0 {
+ return Err(());
+ }
+ Ok(segment)
+ }
+
+ pub fn iter_options<'a>(&'a self) -> impl 'a + Iterator<Item = TcpOption> {
+ self.options.iter()
+ }
+}
+
+impl<B: ByteSlice> TcpSegment<B> {
+ fn compute_checksum<A: IpAddr>(&self, src_ip: A, dst_ip: A) -> u16 {
+ // See for details: https://en.wikipedia.org/wiki/Transmission_Control_Protocol#Checksum_computation
+ let mut checksum = Checksum::new();
+ checksum.add_bytes(src_ip.bytes());
+ checksum.add_bytes(dst_ip.bytes());
+ let total_len =
+ self.hdr_prefix.bytes().len() + self.options.bytes().len() + self.body.len();
+ if A::Version::VERSION == IpVersion::V4 {
+ checksum.add_bytes(&[0]);
+ checksum.add_bytes(&[IpProto::Tcp as u8]);
+ // For IPv4, the "TCP length" field in the pseudo-header is 16 bits.
+ let mut l = [0; 2];
+ BigEndian::write_u16(&mut l, total_len as u16);
+ checksum.add_bytes(&l);
+ } else {
+ // For IPv6, the "TCP length" field in the pseudo-header is 32 bits.
+ let mut l = [0; 4];
+ BigEndian::write_u32(&mut l, total_len as u32);
+ checksum.add_bytes(&l);
+ checksum.add_bytes(&[0; 3]);
+ checksum.add_bytes(&[IpProto::Tcp as u8])
+ }
+ checksum.add_bytes(self.hdr_prefix.bytes());
+ checksum.add_bytes(self.options.bytes());
+ checksum.add_bytes(&self.body);
+ checksum.sum()
+ }
+
+ pub fn body(&self) -> &[u8] {
+ &self.body
+ }
+
+ pub fn src_port(&self) -> NonZeroU16 {
+ NonZeroU16::new(self.hdr_prefix.src_port()).unwrap()
+ }
+
+ pub fn dst_port(&self) -> NonZeroU16 {
+ NonZeroU16::new(self.hdr_prefix.dst_port()).unwrap()
+ }
+
+ pub fn seq_num(&self) -> u32 {
+ // self.hdr_prefix.get_seq_num()
+ BigEndian::read_u32(&self.hdr_prefix.seq_num)
+ }
+
+ fn get_flag(&self, mask: u8) -> bool {
+ self.hdr_prefix.flags & mask > 0
+ }
+
+ pub fn ack_num(&self) -> u32 {
+ // self.hdr_prefix.get_ack_num()
+ BigEndian::read_u32(&self.hdr_prefix.ack)
+ }
+
+ pub fn ack(&self) -> bool {
+ // self.hdr_prefix.get_ack()
+ self.get_flag(ACK_MASK)
+ }
+
+ pub fn rst(&self) -> bool {
+ // self.hdr_prefix.get_rst()
+ self.get_flag(RST_MASK)
+ }
+
+ pub fn syn(&self) -> bool {
+ // self.hdr_prefix.get_syn()
+ self.get_flag(SYN_MASK)
+ }
+
+ pub fn fin(&self) -> bool {
+ // self.hdr_prefix.get_fin()
+ self.get_flag(FIN_MASK)
+ }
+
+ pub fn window_size(&self) -> u16 {
+ // self.hdr_prefix.get_window_size()
+ BigEndian::read_u16(&self.hdr_prefix.window_size)
+ }
+}
+
+impl<'a> TcpSegment<&'a mut [u8]> {
+ pub fn set_src_port(&mut self, src_port: NonZeroU16) {
+ // self.hdr_prefix.set_src_port(src_port.get());
+ BigEndian::write_u16(&mut self.hdr_prefix.src_port, src_port.get());
+ }
+
+ pub fn set_dst_port(&mut self, dst_port: NonZeroU16) {
+ // self.hdr_prefix.set_src_port(dst_port.get());
+ BigEndian::write_u16(&mut self.hdr_prefix.dst_port, dst_port.get());
+ }
+
+ pub fn set_seq_num(&mut self, seq_num: u32) {
+ // self.hdr_prefix.set_seq_num(seq_num);
+ BigEndian::write_u32(&mut self.hdr_prefix.seq_num, seq_num);
+ }
+
+ pub fn set_ack_num(&mut self, ack_num: u32) {
+ // self.hdr_prefix.set_seq_num(ack_num);
+ BigEndian::write_u32(&mut self.hdr_prefix.ack, ack_num);
+ }
+
+ fn set_flag(&mut self, mask: u8, set: bool) {
+ if set {
+ self.hdr_prefix.flags |= mask;
+ } else {
+ self.hdr_prefix.flags &= 0xFF - mask;
+ }
+ }
+
+ pub fn set_ack(&mut self, ack: bool) {
+ // self.hdr_prefix.set_ack(ack);
+ self.set_flag(ACK_MASK, ack);
+ }
+
+ pub fn set_rst(&mut self, rst: bool) {
+ // self.hdr_prefix.set_rst(rst);
+ self.set_flag(RST_MASK, rst);
+ }
+
+ pub fn set_syn(&mut self, syn: bool) {
+ // self.hdr_prefix.set_syn(syn);
+ self.set_flag(SYN_MASK, syn);
+ }
+
+ pub fn set_fin(&mut self, fin: bool) {
+ // self.hdr_prefix.set_fin(fin);
+ self.set_flag(FIN_MASK, fin);
+ }
+
+ pub fn set_window_size(&mut self, window_size: u16) {
+ // self.hdr_prefix.set_window_size(window_size);
+ BigEndian::write_u16(&mut self.hdr_prefix.window_size, window_size);
+ }
+
+ /// Compute and set the TCP checksum.
+ ///
+ /// Compute the TCP checksum from the current segment state, source IP, and
+ /// destination IP, and set it in the header.
+ pub fn set_checksum<A: IpAddr>(&mut self, src_ip: A, dst_ip: A) {
+ // self.hdr_prefix.set_checksum(0);
+ self.hdr_prefix.checksum = [0, 0];
+ let c = self.compute_checksum(src_ip, dst_ip);
+ // self.hdr_prefix.set_checksum(c);
+ BigEndian::write_u16(&mut self.hdr_prefix.checksum, c);
+ }
+}
+
+const ACK_MASK: u8 = 1u8 << 4;
+const RST_MASK: u8 = 1u8 << 2;
+const SYN_MASK: u8 = 1u8 << 1;
+const FIN_MASK: u8 = 1u8;
+
+mod options {
+ use std::mem;
+
+ use byteorder::{BigEndian, ByteOrder};
+
+ use transport::tcp::{TcpOption, TcpSackBlock};
+ use wire::util::OptionImpl;
+
+ fn parse_sack_block(bytes: &[u8]) -> TcpSackBlock {
+ TcpSackBlock {
+ left_edge: BigEndian::read_u32(bytes),
+ right_edge: BigEndian::read_u32(&bytes[4..]),
+ }
+ }
+
+ const OPTION_KIND_EOL: u8 = 0;
+ const OPTION_KIND_NOP: u8 = 1;
+ const OPTION_KIND_MSS: u8 = 2;
+ const OPTION_KIND_WINDOW_SCALE: u8 = 3;
+ const OPTION_KIND_SACK_PERMITTED: u8 = 4;
+ const OPTION_KIND_SACK: u8 = 5;
+ const OPTION_KIND_TIMESTAMP: u8 = 8;
+
+ pub struct TcpOptionImpl;
+
+ impl OptionImpl for TcpOptionImpl {
+ type Output = TcpOption;
+ type Error = ();
+
+ fn parse(kind: u8, data: &[u8]) -> Result<Option<TcpOption>, ()> {
+ match kind {
+ self::OPTION_KIND_EOL | self::OPTION_KIND_NOP => {
+ unreachable!("wire::util::Options promises to handle EOL and NOP")
+ }
+ self::OPTION_KIND_MSS => if data.len() != 2 {
+ Err(())
+ } else {
+ Ok(Some(TcpOption::Mss(BigEndian::read_u16(&data))))
+ },
+ self::OPTION_KIND_WINDOW_SCALE => if data.len() != 1 {
+ Err(())
+ } else {
+ Ok(Some(TcpOption::WindowScale(data[0])))
+ },
+ self::OPTION_KIND_SACK_PERMITTED => if !data.is_empty() {
+ Err(())
+ } else {
+ Ok(Some(TcpOption::SackPermitted))
+ },
+ self::OPTION_KIND_SACK => match data.len() {
+ 8 | 16 | 24 | 32 => {
+ let num_blocks = data.len() / mem::size_of::<TcpSackBlock>();
+ let mut blocks = [TcpSackBlock::default(); 4];
+ for i in 0..num_blocks {
+ blocks[i] = parse_sack_block(&data[i * 8..]);
+ }
+ Ok(Some(TcpOption::Sack {
+ blocks,
+ num_blocks: num_blocks as u8,
+ }))
+ }
+ _ => Err(()),
+ },
+ self::OPTION_KIND_TIMESTAMP => if data.len() != 8 {
+ Err(())
+ } else {
+ let ts_val = BigEndian::read_u32(&data);
+ let ts_echo_reply = BigEndian::read_u32(&data[4..]);
+ Ok(Some(TcpOption::Timestamp {
+ ts_val,
+ ts_echo_reply,
+ }))
+ },
+ _ => Ok(None),
+ }
+ }
+ }
+}
diff --git a/bin/recovery_netstack/src/wire/udp.rs b/bin/recovery_netstack/src/wire/udp.rs
new file mode 100644
index 0000000..9f95493
--- /dev/null
+++ b/bin/recovery_netstack/src/wire/udp.rs
@@ -0,0 +1,184 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+use std::num::NonZeroU16;
+
+use byteorder::{BigEndian, ByteOrder};
+use zerocopy::{AsBytes, ByteSlice, FromBytes, LayoutVerified};
+
+use ip::{Ip, IpAddr, IpProto, IpVersion};
+use wire::util::{Checksum, PacketFormat};
+
+const HEADER_SIZE: usize = 8;
+
+// Header has the same memory layout (thanks to repr(C, packed)) as a UDP
+// header. Thus, we can simply reinterpret the bytes of the UDP header as a
+// Header and then safely access its fields. Note, however, that it is *not*
+// safe to have the types of any of the fields be anything other than u8 or [u8;
+// x] since network byte order (big endian) may not be the same as the
+// endianness of the computer we're running on, and since repr(packed) is only
+// safe with values with no alignment requirements.
+#[repr(C, packed)]
+struct Header {
+ src_port: [u8; 2],
+ dst_port: [u8; 2],
+ length: [u8; 2],
+ checksum: [u8; 2],
+}
+
+unsafe impl FromBytes for Header {}
+unsafe impl AsBytes for Header {}
+
+impl Header {
+ fn dst_port(&self) -> u16 {
+ BigEndian::read_u16(&self.dst_port)
+ }
+
+ fn length(&self) -> u16 {
+ BigEndian::read_u16(&self.length)
+ }
+
+ fn checksum(&self) -> u16 {
+ BigEndian::read_u16(&self.checksum)
+ }
+}
+
+/// A UDP packet.
+///
+/// A `UdpPacket` shares its underlying memory with the byte slice it was parsed
+/// from or serialized to, meaning that no copying or extra allocation is
+/// necessary.
+pub struct UdpPacket<B> {
+ header: LayoutVerified<B, Header>,
+ body: B,
+}
+
+impl<B> PacketFormat for UdpPacket<B> {
+ const MAX_HEADER_BYTES: usize = 8;
+ const MAX_FOOTER_BYTES: usize = 0;
+}
+
+impl<B: ByteSlice> UdpPacket<B> {
+ /// Parse a UDP packet.
+ ///
+ /// `parse` parses `bytes` as a UDP packet and validates the checksum.
+ ///
+ /// `src_ip` is the source address in the IP header. In IPv4, `dst_ip` is
+ /// the destination address in the IP header. In IPv6, it's more
+ /// complicated. From Wikipedia:
+ ///
+ /// > The destination address is the final destination; if the IPv6 packet
+ /// > does not contain a Routing header, that will be the destination
+ /// > address in the IPv6 header; otherwise, at the originating node, it
+ /// > will be the address in the last element of the Routing header, and, at
+ /// > the receiving node, it will be the destination address in the IPv6
+ /// > header.
+ pub fn parse<A: IpAddr>(bytes: B, src_ip: A, dst_ip: A) -> Result<UdpPacket<B>, ()> {
+ // See for details: https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure
+
+ let bytes_len = bytes.len();
+ let (header, body) = LayoutVerified::<B, Header>::new_from_prefix(bytes).ok_or(())?;
+ let packet = UdpPacket { header, body };
+ let len = if packet.header.length() == 0 && A::Version::VERSION == IpVersion::V6 {
+ // "In IPv6 jumbograms it is possible to have UDP packets of size
+ // greater than 65,535 bytes. RFC 2675 specifies that the length
+ // field is set to zero if the length of the UDP header plus UDP
+ // data is greater than 65,535." - Wikipedia
+ if !cfg!(target_pointer_width = "32") && bytes_len >= 1 << 32 {
+ // For IPv6, the packet length in the pseudo-header is 32
+ // bits. When hdr.length() is used, it fits trivially since
+ // hdr.length() is a u16. However, when we use buf.len(), it
+ // might overflow on 64-bit platforms. We omit this check on
+ // 32-bit platforms because a) buf.len() trivially fits in a
+ // u32 and, b) 1 << 32 overflows usize.
+ return Err(());
+ }
+ bytes_len
+ } else {
+ packet.header.length() as usize
+ };
+ if len != bytes_len {
+ return Err(());
+ }
+ if packet.header.dst_port() == 0 {
+ return Err(());
+ }
+
+ // In IPv4, a 0 checksum indicates that the checksum wasn't computed,
+ // and so shouldn't be validated.
+ if packet.header.checksum != [0, 0] {
+ // When computing the checksum, a checksum of 0 is sent as 0xFFFF.
+ let target = if packet.header.checksum == [0xFF, 0xFF] {
+ 0
+ } else {
+ BigEndian::read_u16(&packet.header.checksum)
+ };
+ if packet.compute_checksum(src_ip, dst_ip) != target {
+ return Err(());
+ }
+ } else if A::Version::VERSION == IpVersion::V6 {
+ // "Unlike IPv4, when UDP packets are originated by an IPv6 node,
+ // the UDP checksum is not optional. That is, whenever originating
+ // a UDP packet, an IPv6 node must compute a UDP checksum over the
+ // packet and the pseudo-header, and, if that computation yields a
+ // result of zero, it must be changed to hex FFFF for placement in
+ // the UDP header. IPv6 receivers must discard UDP packets
+ // containing a zero checksum, and should log the error." - RFC 2460
+ return Err(());
+ }
+
+ Ok(packet)
+ }
+}
+
+impl<B: ByteSlice> UdpPacket<B> {
+ fn compute_checksum<A: IpAddr>(&self, src_ip: A, dst_ip: A) -> u16 {
+ // See for details: https://en.wikipedia.org/wiki/User_Datagram_Protocol#Checksum_computation
+ let mut c = Checksum::new();
+ c.add_bytes(src_ip.bytes());
+ c.add_bytes(dst_ip.bytes());
+ if A::Version::VERSION == IpVersion::V4 {
+ c.add_bytes(&[0]);
+ c.add_bytes(&[IpProto::Udp as u8]);
+ c.add_bytes(&self.header.length);
+ } else {
+ let len = HEADER_SIZE + self.body.len();
+ let mut len_bytes = [0; 4];
+ BigEndian::write_u32(&mut len_bytes, len as u32);
+ c.add_bytes(&len_bytes);
+ c.add_bytes(&[0; 3]);
+ c.add_bytes(&[IpProto::Udp as u8]);
+ }
+ c.add_bytes(&self.header.src_port);
+ c.add_bytes(&self.header.dst_port);
+ c.add_bytes(&self.header.length);
+ c.add_bytes(&self.body);
+ c.sum()
+ }
+
+ pub fn body(&self) -> &[u8] {
+ self.body.deref()
+ }
+
+ pub fn src_port(&self) -> Option<NonZeroU16> {
+ NonZeroU16::new(BigEndian::read_u16(&self.header.src_port))
+ }
+
+ pub fn dst_port(&self) -> NonZeroU16 {
+ NonZeroU16::new(self.header.dst_port()).unwrap()
+ }
+
+ /// Did this packet have a checksum?
+ ///
+ /// On IPv4, the sender may optionally omit the checksum. If this function
+ /// returns false, the sender ommitted the checksum, and `parse` will not
+ /// have validated it.
+ ///
+ /// On IPv6, it is guaranteed that `checksummed` will return true because
+ /// IPv6 requires a checksum, and so any UDP packet missing one will fail
+ /// validation in `parse`.
+ pub fn checksummed(&self) -> bool {
+ self.header.checksum() != 0
+ }
+}
diff --git a/bin/recovery_netstack/src/wire/util.rs b/bin/recovery_netstack/src/wire/util.rs
new file mode 100644
index 0000000..768407e
--- /dev/null
+++ b/bin/recovery_netstack/src/wire/util.rs
@@ -0,0 +1,256 @@
+pub use self::checksum::*;
+pub use self::options::*;
+pub use self::serialize::*;
+
+mod checksum {
+ use byteorder::{BigEndian, ByteOrder};
+
+ /// A checksum used by IPv4 and TCP.
+ ///
+ /// This checksum operates by computing the 1s complement sum of successive
+ /// 16-bit words of the input.
+ pub struct Checksum(u32);
+
+ impl Checksum {
+ /// Initialize a new checksum.
+ pub fn new() -> Self {
+ Checksum(0)
+ }
+
+ /// Add bytes to the checksum.
+ ///
+ /// If `bytes` does not contain an even number of bytes, a single zero byte
+ /// will be added to the end before updating the checksum.
+ pub fn add_bytes(&mut self, mut bytes: &[u8]) {
+ while bytes.len() > 1 {
+ self.0 += u32::from(BigEndian::read_u16(bytes));
+ bytes = &bytes[2..];
+ }
+ if bytes.len() == 1 {
+ self.0 += u32::from(BigEndian::read_u16(&[bytes[0], 0]));
+ }
+ }
+
+ /// Compute the checksum.
+ ///
+ /// `sum` returns the checksum of all data added using `add_bytes` so far.
+ /// Calling `sum` does *not* reset the checksum. More bytes may be added
+ /// after calling `sum`, and they will be added to the checksum as expected.
+ pub fn sum(&self) -> u16 {
+ let mut sum = self.0;
+ while (sum >> 16) != 0 {
+ sum = (sum >> 16) + (sum & 0xFF);
+ }
+ !sum as u16
+ }
+ }
+
+ /// Checksum bytes.
+ ///
+ /// `checksum` is a shorthand for
+ ///
+ /// ```rust
+ /// let mut c = Checksum::new();
+ /// c.add_bytes(bytes);
+ /// c.sum()
+ /// ```
+ pub fn checksum(bytes: &[u8]) -> u16 {
+ let mut c = Checksum::new();
+ c.add_bytes(bytes);
+ c.sum()
+ }
+}
+
+mod options {
+ use std::fmt::Debug;
+ use std::marker::PhantomData;
+ use std::ops::Deref;
+
+ use zerocopy::ByteSlice;
+
+ /// A parsed set of header options.
+ ///
+ /// `Options` represents a parsed set of options from a TCP or IPv4 header.
+ pub struct Options<B, O> {
+ bytes: B,
+ _marker: PhantomData<O>,
+ }
+
+ /// An iterator over header options.
+ ///
+ /// `OptionIter` is an iterator over packet header options stored in the
+ /// format used by IPv4 and TCP, where each option is either a single kind
+ /// byte or a kind byte, a length byte, and length - 2 data bytes.
+ ///
+ /// In both IPv4 and TCP, the only single-byte options are End of Options
+ /// List (EOL) and No Operation (NOP), both of which can be handled
+ /// internally by OptionIter. Thus, the caller only needs to be able to
+ /// parse multi-byte options.
+ pub struct OptionIter<'a, O> {
+ bytes: &'a [u8],
+ idx: usize,
+ _marker: PhantomData<O>,
+ }
+
+ /// Errors returned from parsing options.
+ ///
+ /// `OptionParseErr` is either `Internal`, which indicates that this module
+ /// encountered a malformed sequence of options (likely with a length field
+ /// larger than the remaining bytes in the options buffer), or `External`,
+ /// which indicates that the `OptionImpl::parse` callback returned an error.
+ #[derive(Debug)]
+ pub enum OptionParseErr<E> {
+ Internal,
+ External(E),
+ }
+
+ /// An implementation of an options parser.
+ ///
+ /// `OptionImpl` provides functions to parse fixed- and variable-length
+ /// options. It is required in order to construct an `Options` or
+ /// `OptionIter`.
+ pub trait OptionImpl {
+ type Output;
+ type Error;
+
+ /// Parse an option.
+ ///
+ /// `parse` takes a kind byte and variable-length data associated and
+ /// returns `Ok(Some(o))` if the option successfully parsed as `o`,
+ /// `Ok(None)` if the kind byte was unrecognized, and `Err(err)` if the
+ /// kind byte was recognized but `data` was malformed for that option
+ /// kind. `parse` is allowed to not recognize certain option kinds, as
+ /// the length field can still be used to safely skip over them.
+ ///
+ /// `parse` must be deterministic, or else `Options::parse` cannot
+ /// guarantee that future iterations will not produce errors (and
+ /// panic).
+ fn parse(kind: u8, data: &[u8]) -> Result<Option<Self::Output>, Self::Error>;
+ }
+
+ impl<B, O> Options<B, O>
+ where
+ B: ByteSlice,
+ O: OptionImpl,
+ {
+ /// Parse a set of options.
+ ///
+ /// `parse` parses `bytes` as a sequence of options. `parse` performs a
+ /// single pass over all of the options to verify that they are
+ /// well-formed. Once `parse` returns successfully, the resulting
+ /// `Options` can be used to construct infallible iterators.
+ pub fn parse(bytes: B) -> Result<Options<B, O>, OptionParseErr<O::Error>> {
+ // First, do a single pass over the bytes to detect any errors up
+ // front. Once this is done, since we have a reference to `bytes`,
+ // these bytes can't change out from under us, and so we can treat
+ // any iterator over these bytes as infallible. This makes a few
+ // assumptions, but none of them are that big of a deal. In all
+ // cases, breaking these assumptions would just result in a runtime
+ // panic.
+ // - B could return different bytes each time
+ // - O::parse could be non-deterministic
+ while next::<B, O>(&bytes, &mut 0)?.is_some() {}
+ Ok(Options {
+ bytes,
+ _marker: PhantomData,
+ })
+ }
+ }
+
+ impl<B: Deref<Target = [u8]>, O> Options<B, O> {
+ /// Get the underlying bytes.
+ ///
+ /// `bytes` returns a reference to the byte slice backing this
+ /// `Options`.
+ pub fn bytes(&self) -> &[u8] {
+ &self.bytes
+ }
+ }
+
+ impl<'a, B, O> Options<B, O>
+ where
+ B: 'a + ByteSlice,
+ O: OptionImpl,
+ {
+ /// Create an iterator over options.
+ ///
+ /// `iter` constructs an iterator over the options. Since the options
+ /// were validated in `parse`, then so long as `from_kind` and
+ /// `from_data` are deterministic, the iterator is infallible.
+ pub fn iter(&'a self) -> OptionIter<'a, O> {
+ OptionIter {
+ bytes: &self.bytes,
+ idx: 0,
+ _marker: PhantomData,
+ }
+ }
+ }
+
+ impl<'a, O> Iterator for OptionIter<'a, O>
+ where
+ O: OptionImpl,
+ O::Error: Debug,
+ {
+ type Item = O::Output;
+
+ fn next(&mut self) -> Option<O::Output> {
+ next::<&'a [u8], O>(&self.bytes, &mut self.idx)
+ .expect("already-validated options should not fail to parse")
+ }
+ }
+
+ // End of Options List in both IPv4 and TCP
+ const END_OF_OPTIONS: u8 = 0;
+ // NOP in both IPv4 and TCP
+ const NOP: u8 = 1;
+
+ fn next<B, O>(bytes: &B, idx: &mut usize) -> Result<Option<O::Output>, OptionParseErr<O::Error>>
+ where
+ B: ByteSlice,
+ O: OptionImpl,
+ {
+ // For an explanation of this format, see the "Options" section of
+ // https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure
+ loop {
+ let bytes = &bytes[*idx..];
+ if bytes.is_empty() {
+ return Ok(None);
+ }
+ if bytes[0] == END_OF_OPTIONS {
+ return Ok(None);
+ }
+ if bytes[0] == NOP {
+ *idx += 1;
+ continue;
+ }
+ let len = bytes[1] as usize;
+ if len < 2 || len > bytes.len() {
+ return Err(OptionParseErr::Internal);
+ }
+ *idx += len;
+ match O::parse(bytes[0], &bytes[2..]) {
+ Ok(Some(o)) => return Ok(Some(o)),
+ Ok(None) => {}
+ Err(err) => return Err(OptionParseErr::External(err)),
+ }
+ }
+ }
+}
+
+mod serialize {
+ pub trait PacketFormat {
+ /// The maximum length of a packet header in bytes.
+ ///
+ /// If `MAX_HEADER_BYTES` bytes are allocated in a buffer preceding a
+ /// payload, it is guaranteed that any header generated by this packet
+ /// format will be able to fit in the space preceding the payload.
+ const MAX_HEADER_BYTES: usize;
+
+ /// The maximum length of a packet footer in bytes.
+ ///
+ /// If `MAX_FOOTER_BYTES` bytes are allocated in a buffer following a
+ /// payload, it is guaranteed that any footer generated by this packet
+ /// format will be able to fit in the space following the payload.
+ const MAX_FOOTER_BYTES: usize;
+ }
+}
diff --git a/bin/recovery_netstack/zerocopy-macro/BUILD.gn b/bin/recovery_netstack/zerocopy-macro/BUILD.gn
new file mode 100644
index 0000000..f5bd755
--- /dev/null
+++ b/bin/recovery_netstack/zerocopy-macro/BUILD.gn
@@ -0,0 +1,14 @@
+# Copyright 2018 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/rust/rustc_library.gni")
+
+rustc_library("zerocopy-macro") {
+ name = "zerocopy-macro"
+ version = "0.1.0"
+
+ deps = [
+ "//third_party/rust-crates/rustc_deps:quote"
+ ]
+}
diff --git a/bin/recovery_netstack/zerocopy-macro/src/lib.rs b/bin/recovery_netstack/zerocopy-macro/src/lib.rs
new file mode 100644
index 0000000..8d817b5
--- /dev/null
+++ b/bin/recovery_netstack/zerocopy-macro/src/lib.rs
@@ -0,0 +1,228 @@
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#![feature(proc_macro)]
+
+extern crate proc_macro;
+extern crate proc_macro2;
+#[macro_use]
+extern crate quote;
+
+use proc_macro::TokenStream;
+
+use proc_macro2::{Span, Term};
+use quote::Tokens;
+
+#[proc_macro]
+pub fn zerocopy_inner(input: TokenStream) -> TokenStream {
+ // format and remove all spaces to simplify parsing code
+ let s = format!("{}", input).replace(" ", "").replace("\n", "");
+ println!("{}", s);
+
+ let pipe_index = s.find('|')
+ .expect("expected input of the form <struct name>|<fields>");
+ let (name, fields) = s.split_at(pipe_index);
+ // remove leading |
+ let fields = &fields[1..];
+
+ let fields = fields
+ .split(',')
+ .filter(|s| !s.is_empty())
+ .map(|field_str| {
+ let (name, ty) =
+ if let [name, ty] = field_str.split(':').collect::<Vec<&str>>().as_slice() {
+ (*name, *ty)
+ } else {
+ panic!("could not parse {} as name:type", field_str);
+ };
+ if name.is_empty() || ty.is_empty() {
+ panic!("empty name or type");
+ }
+
+ let ty = if ty.starts_with('u') {
+ let n = ty[1..]
+ .parse::<u8>()
+ .expect(&format!("could not parse {} as bit width", ty));
+ Type::U(n)
+ } else if ty == "bool" {
+ Type::Bool
+ } else if ty.starts_with("[u8;") && ty.ends_with("]") {
+ let n = ty["[u8;".len()..ty.len() - 1]
+ .parse::<u8>()
+ .expect(&format!("could not parse {} as byte array", ty));
+ Type::ByteArray(n)
+ } else {
+ panic!("unsupported type {}", ty);
+ };
+
+ (name, ty)
+ })
+ .collect::<Vec<_>>();
+
+ let mut offsets = Vec::new();
+ let mut bit_offset = 0;
+ for (_, ty) in &fields {
+ offsets.push((bit_offset / 8, bit_offset % 8));
+ bit_offset += match *ty {
+ Type::U(n) => n as usize,
+ Type::ByteArray(n) => 8 * (n as usize),
+ Type::Bool => 1,
+ };
+ }
+ if bit_offset % 8 != 0 {
+ panic!("must have an integer number of bytes");
+ }
+ let bytes = bit_offset / 8;
+
+ let getters_setters = fields.iter().zip(offsets.iter()).map(
+ |((name, ty), (byte_offset, bit_offset))| {
+ getter_setter(name, *byte_offset, *bit_offset as u8, *ty)
+ },
+ );
+
+ let name = str_to_tokens(name);
+ let tokens = quote!(
+ // ensure that the repr is equivalent to [u8; #bytes]
+ #[repr(transparent)]
+ #[derive(Copy, Clone, Default, Eq, PartialEq)]
+ struct #name([u8; #bytes]);
+ impl #name { #(#getters_setters)* }
+ unsafe impl ::wire::util::Pod for #name {}
+ ).into();
+ println!("{}", tokens);
+ tokens
+}
+
+#[derive(Copy, Clone)]
+enum Type {
+ U(u8),
+ ByteArray(u8),
+ Bool,
+}
+
+impl Type {
+ fn name(&self) -> Tokens {
+ match *self {
+ Type::U(n) => str_to_tokens(&format!("u{}", round_bits_up(n))),
+ Type::ByteArray(n) => {
+ // array size must be a usize, not a u8
+ let n = n as usize;
+ quote!([u8; #n])
+ }
+ Type::Bool => quote!(bool),
+ }
+ }
+}
+
+fn round_bits_up(n: u8) -> u8 {
+ assert!(n <= 128);
+ if n < 8 {
+ 8
+ } else {
+ n.next_power_of_two()
+ }
+}
+
+/// Create getters and setters for this field.
+///
+/// Note that `bit_offset_msb` is the offset from the MSB, not the LSB.
+fn getter_setter(name: &str, byte_offset: usize, bit_offset_msb: u8, ty: Type) -> Tokens {
+ // construct getter and setter bodies for a bool type
+ fn bool_bodies(name: &Tokens, byte_offset: usize, bit_offset_msb: u8) -> (Tokens, Tokens) {
+ let true_mask = 1u8 << (7 - bit_offset_msb);
+ let false_mask = 0xFFu8 - (1u8 << (7 - bit_offset_msb));
+ let getter = quote!((self.0[#byte_offset] & #true_mask) != 0);
+ let setter = quote!(
+ if #name {
+ self.0[#byte_offset] |= #true_mask;
+ } else {
+ self.0[#byte_offset] &= #false_mask;
+ }
+ );
+ (getter, setter)
+ }
+
+ // construct getter and setter bodies for a byte array type
+ fn byte_array_bodies(name: &Tokens, byte_offset: usize, bit_offset_msb: u8, bytes: u8) -> (Tokens, Tokens) {
+ assert_eq!(bit_offset_msb, 0,"we don't support byte slices not on a byte boundary");
+ let end = byte_offset + (bytes as usize);
+ // array size must be a usize, not a u8
+ let bytes = bytes as usize;
+ // create a temporary buffer, copy from the slice into that buffer,
+ // and then return the buffer
+ let getter = quote!(
+ let mut buf = [0; #bytes];
+ buf.copy_from_slice(&self.0[#byte_offset..#end]);
+ buf
+ );
+ let setter = quote!(
+ self.0[#byte_offset..#end].copy_from_slice(&#name);
+ );
+ (getter, setter)
+ }
+
+ // construct getter and setter bodies for a uXXX type
+ fn u_bodies(name: &Tokens, byte_offset: usize, bit_offset_msb: u8, bits: u8) -> (Tokens, Tokens) {
+ if bits < 8 {
+ let trailing_bits = 8 - (bit_offset_msb + bits);
+ let mask = ((1u16 << bits) - 1) as u8;
+ let inv_shifted_mask = !(mask << trailing_bits);
+ let getter = quote!((self.0[#byte_offset] >> #trailing_bits) & #mask);
+ let setter = quote!(
+ assert!(#name <= #mask);
+ let zeroed = self.0[#byte_offset] & #inv_shifted_mask;
+ self.0[#byte_offset] = zeroed | (#name << #trailing_bits);
+ );
+ (getter, setter)
+ } else if bits == 8 {
+ assert_eq!(bit_offset_msb, 0, "we don't support u8s not on a byte boundary");
+ (quote!(self.0[#byte_offset]), quote!(self.0[#byte_offset] = #name))
+ } else if bits.is_power_of_two() {
+ assert_eq!(bit_offset_msb, 0,"we don't support u{}s not on a byte boundary",bits);
+ let read = str_to_tokens(&format!("read_u{}", bits));
+ let write = str_to_tokens(&format!("write_u{}", bits));
+ let end = byte_offset + ((bits/8) as usize);
+ let getter = quote!(
+ use byteorder::ByteOrder;
+ ::byteorder::BigEndian::#read(&self.0[#byte_offset..#end])
+ );
+ let setter = quote!(
+ use byteorder::ByteOrder;
+ ::byteorder::BigEndian::#write(&mut self.0[#byte_offset..#end], #name)
+ );
+ (getter, setter)
+ } else {
+ panic!("unsupported bit size: {}", bits);
+ }
+ }
+
+ let name_tokens = str_to_tokens(name);
+ let (getter_body, setter_body) = match ty {
+ Type::U(bits) => u_bodies(&name_tokens, byte_offset, bit_offset_msb, bits),
+ Type::ByteArray(bytes) => byte_array_bodies(&name_tokens, byte_offset, bit_offset_msb, bytes),
+ Type::Bool => bool_bodies(&name_tokens, byte_offset, bit_offset_msb),
+ };
+
+ let type_name = ty.name();
+ let get_name = str_to_tokens(&("get_".to_owned() + name));
+ let set_name = str_to_tokens(&("set_".to_owned() + name));
+ quote!(
+ #[allow(unused)]
+ fn #get_name(&self) -> #type_name { #getter_body }
+ #[allow(unused)]
+ fn #set_name(&mut self, #name_tokens: #type_name) { #setter_body }
+ )
+}
+
+/// Convert a string to a `Tokens` which can be used in `quote!`.
+///
+/// If a string-typed variable is used directly in `quote!`, it will show up in
+/// quotes in the resulting AST. E.g., `fn "get_foo" (&self) -> u8 { ... }`.
+/// This usually isn't what you want. Instead, you want a `Tokens`, which
+/// `quote!` will handle properly.
+fn str_to_tokens(s: &str) -> Tokens {
+ let mut t = Tokens::new();
+ t.append(Term::new(s, Span::call_site()));
+ t
+}
diff --git a/bin/recovery_netstack/zerocopy/src/lib.rs b/bin/recovery_netstack/zerocopy/src/lib.rs
new file mode 100644
index 0000000..4b2a774
--- /dev/null
+++ b/bin/recovery_netstack/zerocopy/src/lib.rs
@@ -0,0 +1,657 @@
+#![no_std]
+
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+use core::ptr;
+
+// TODO:
+// - FromBits
+// - Is it safe to relax the constraint when T is a DST to say that the
+// conversion is valid so long as size_of_val(t) > size_of::<Self>()?
+// - Figure out what to do when Self is a DST.
+// - transmute
+// - Add various ref/mut implementations?
+
+/// Types which can be constructed from the bits of a `T`.
+///
+/// `FromBits<T>` is a marker trait indicating that the bits of any valid `T`
+/// also correspond to a valid instance of this type. As such, it is safe to
+/// construct an instance of this type simply be re-interpreting the bits of any
+/// valid instance of `T`.
+///
+/// If `T: Sized` and `Self: Sized, then `T` is guaranteed to be at least as
+/// large as `Self`. In other words, if `T: Sized` and `Self: Sized`, then
+/// `Self: FitsIn<T>`.
+///
+/// If `T` is a DST, then the bits of any valid `T` with length
+/// `mem::size_of::<Self>()` correspond to a valid instance of this type, but no
+/// guarantees are made about sizes larger than `size_of::<Self>()`.
+///
+/// # Safety
+///
+/// Unsafe code may assume that types implementing this trait can be safely
+/// constructed from the bits of a `T`. Implementing this trait for a type for
+/// which this isn't actually safe may cause undefined behavior.
+pub unsafe trait FromBits<T>
+where
+ T: ?Sized,
+{
+}
+
+unsafe impl<T> FromBits<T> for [u8] {}
+
+// NOTE on FitsIn and AlignedTo: Currently, these traits use constant evaluation
+// to create a constant whose evaluation results in a divide-by-zero error if a
+// particular boolean expression evaluates to false. Because this error is
+// encountered during constant evaluation, it will only be triggered if the
+// constant is actually accessed. Thus, both traits have a const_assert_xxx
+// associated function that must be called in order to trigger the error.
+//
+// Eventually, Rust will add support for constant expressions in array lengths
+// (https://github.com/rust-lang/rust/issues/43408). When this happens, we will
+// be able to use that to trigger the divide-by-zero error during type checking,
+// at which point:
+// - We can remove the const_assert_xx functions
+// - We will need to remove the blanket impl
+// - We will probably want to add specific impls (e.g., T: FitsIn<T>,
+// u8: FitsIn<u16>, etc)
+
+/// Types which are no larger than `T`.
+///
+/// If a type is `FitsIn<T>`, then `mem::size_of::<Self>() <=
+/// mem::size_of::<T>()`.
+///
+/// Currently, unsafe code may *not* assume that `T: FitsIn<U>` guarantees that
+/// `T` fits in `U`. It must call `T::const_assert_fits_in()`, which will cause
+/// a compile-time error such as:
+///
+/// ```text
+/// error[E0080]: constant evaluation error
+/// --> src/main.rs:12:21
+/// |
+/// 12 | const BAD: u8 = 1u8 / ((std::mem::size_of::<T>() >= std::mem::size_of::<Self>()) as
+/// u8); |
+/// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ attempt to divide by
+/// zero ```
+///
+/// if `T` does not fit in `U`.
+pub unsafe trait FitsIn<T>
+where
+ T: Sized,
+ Self: Sized,
+{
+ #[doc(hidden)]
+ const BAD: u8 = 1u8 / ((mem::size_of::<T>() >= mem::size_of::<Self>()) as u8);
+
+ fn const_assert_fits_in() {
+ let _ = Self::BAD;
+ }
+}
+
+// While any pair of types will type check, any (T, U) for which U is larger
+// than T will fail during constant evaluation.
+unsafe impl<T, U> FitsIn<T> for U {}
+
+/// Types with alignment requirements at least as strict as those of `T`.
+///
+/// If a type is `AlignedTo<T>`, then any validly-aligned instance of it is
+/// guaranteed to satisfy the alignment requirements of `T`.
+///
+/// Currently, unsafe code may *not* assume that `T: AlignedTo<U>` guarantees
+/// that `T` satisfies `U`'s alignment requirements. It must call
+/// `T::const_assert_aligned_to()`, which will cause a compile-time error such
+/// as:
+///
+/// ```text
+/// error[E0080]: constant evaluation error
+/// --> src/main.rs:12:21
+/// |
+/// 12 | const BAD: u8 = 1u8 / ((std::mem::align_of::<T>() <= std::mem::align_of::<Self>()) as
+/// u8); |
+/// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ attempt to divide by
+/// zero ```
+///
+/// if `T`'s alignment requirement is less strict than `U`'s.
+pub unsafe trait AlignedTo<T>
+where
+ // TODO(joshlf): Remove this bound once there's a way to get the
+ // align_of an unsized value
+ Self: Sized,
+{
+ #[doc(hidden)]
+ const BAD: u8 = 1u8 / ((mem::align_of::<T>() <= mem::align_of::<Self>()) as u8);
+
+ fn const_assert_aligned_to() {
+ let _ = Self::BAD;
+ }
+}
+
+// While any pair of types will type check, any (T, U) for which U has less
+// strict alignment requirements than T will fail during constant evaluation.
+unsafe impl<T, U> AlignedTo<T> for U {}
+
+/// Reinterpret the bits of one type as another type.
+///
+/// Unlike `std::mem::transmute`, `transmute` allows `T` and `U` to have
+/// different sizes so long as `T` is larger than `U`. In that case, the return
+/// value is constructed from the first `std::mem::size_of::<U>()` bytes of `x`.
+/// Otherwise, `transmute` is identical to `std::mem::transmute`. In particular,
+/// `x` is forgotten; it is not dropped.
+pub unsafe fn transmute<T, U>(x: T) -> U
+where
+ U: FitsIn<T>,
+{
+ U::const_assert_fits_in();
+ let ret = ptr::read(&x as *const T as *const U);
+ mem::forget(x);
+ ret
+}
+
+/// Safely reinterpret the bits of one type as another type.
+///
+/// `coerce` is like `transmute`, except that the `U: FromBits<T>` bound ensures
+/// that the conversion is safe.
+pub fn coerce<T, U>(x: T) -> U
+where
+ U: FromBits<T>,
+{
+ let ret = unsafe { ptr::read(&x as *const T as *const U) };
+ mem::forget(x);
+ ret
+}
+
+/// Safely coerce an immutable reference.
+///
+/// `coerce_ref` coerces an immutable reference to `T` into an immutable
+/// reference to `U`, provided that any instance of `T` is a valid instance of
+/// `U`, and that `T`'s alignment requirements are no less strict than `U`'s.
+pub fn coerce_ref<T, U>(x: &T) -> &U
+where
+ U: FromBits<T>,
+ T: AlignedTo<U>,
+{
+ T::const_assert_aligned_to();
+ unsafe { &*(x as *const T as *const U) }
+}
+
+/// Safely coerce an immutable reference, checking size at runtime.
+///
+/// `coerce_ref_size_checked` coerces an immutable reference to `T` into an
+/// immutable reference to `U`, provided that any instance of `T` is a valid
+/// instance of `U`, and that `x` has the same size as `U`. If `x` has a
+/// different size than `U`, `coerce_ref_size_checked` returns `None`.
+pub fn coerce_ref_size_checked<T, U>(x: &T) -> Option<&U>
+where
+ T: ?Sized + AlignedTo<U>,
+ U: FromBits<T>,
+{
+ if mem::size_of_val(x) != mem::size_of::<U>() {
+ return None;
+ }
+ Some(unsafe { &*(x as *const T as *const U) })
+}
+
+/// Safely coerce an immutable reference, checking alignment at runtime.
+///
+/// `coerce_ref_align_checked` coerces an immutable reference to `T` into an
+/// immutable reference to `U`, provided that any instance of `T` is a valid
+/// instance of `U`, and that `x` satisfies `U`'s alignment requirements. If `x`
+/// does not satisfy `U`'s alignment requirements, `coerce_ref_align_checked`
+/// returns `None`.
+pub fn coerce_ref_align_checked<T, U>(x: &T) -> Option<&U>
+where
+ U: FromBits<T>,
+{
+ if (x as *const T as usize) % mem::align_of::<U>() != 0 {
+ return None;
+ }
+ Some(unsafe { &*(x as *const T as *const U) })
+}
+
+/// Safely coerce an immutable reference, checking size and alignment at runtime.
+///
+/// `coerce_ref_size_align_checked` coerces an immutable reference to `T` into
+/// an immutable reference to `U`, provided that any instance of `T` is a valid
+/// instance of `U`, that `x` has the same size as `U`, and that `x` satisfies
+/// `U`'s alignment requirements. If `x` has a different size than `U` or does
+/// not satisfy `U`'s alignment requirements, `coerce_ref_size_align_checked`
+/// returns `None`.
+pub fn coerce_ref_size_align_checked<T, U>(x: &T) -> Option<&U>
+where
+ T: ?Sized,
+ U: FromBits<T>,
+{
+ if mem::size_of_val(x) != mem::size_of::<U>()
+ && (x as *const _ as *const () as usize) % mem::align_of::<U>() != 0
+ {
+ return None;
+ }
+ Some(unsafe { &*(x as *const T as *const U) })
+}
+
+/// Safely coerce a mutable reference.
+///
+/// `coerce_mut` coerces a mutable reference to `T` into a mutable reference to
+/// `U`, provided that any instance of `T` is a valid instance of `U`, any
+/// instance of `U` is a valid instance of `T`, and that `T`'s alignment
+/// requirements are no less strict than `U`'s.
+pub fn coerce_mut<T, U>(x: &mut T) -> &mut U
+where
+ U: FromBits<T>,
+ T: FromBits<U>,
+ T: AlignedTo<U>,
+{
+ T::const_assert_aligned_to();
+ unsafe { &mut *(x as *mut T as *mut U) }
+}
+
+/// Safely coerce a mutable reference, checking size at runtime.
+///
+/// `coerce_mut_size_checked` coerces a mutable reference to `T` into a mutable
+/// reference to `U`, provided that any instance of `T` is a valid instance of
+/// `U`, any instance of `U` is a valid instance of `T`, that `T`'s alignment
+/// requirements are no less strict than `U`'s, and that `x` has the same size
+/// as `U`. If `x` has a different size than `U`, `coerce_mut_size_checked`
+/// returns `None`.
+pub fn coerce_mut_size_checked<T, U>(x: &mut T) -> Option<&mut U>
+where
+ T: ?Sized + FromBits<U> + AlignedTo<U>,
+ U: FromBits<T>,
+{
+ if mem::size_of_val(x) != mem::size_of::<U>() {
+ return None;
+ }
+ Some(unsafe { &mut *(x as *mut T as *mut U) })
+}
+
+/// Safely coerce a mutable reference, checking alignment at runtime.
+///
+/// `coerce_mut_align_checked` coerces a mutable reference to `T` into a mutable
+/// reference to `U`, provided that any instance of `T` is a valid instance of
+/// `U`, any instance of `U` is a valid instance of `T`, and that `x` satisfies
+/// `U`'s alignment requirements. If `x` does not satisfy `U`'s alignment
+/// requirements, `coerce_mut_align_checked` returns `None`.
+pub fn coerce_mut_align_checked<T, U>(x: &mut T) -> Option<&mut U>
+where
+ T: FromBits<U>,
+ U: FromBits<T>,
+{
+ if (x as *const T as usize) % mem::align_of::<U>() != 0 {
+ return None;
+ }
+ Some(unsafe { &mut *(x as *mut T as *mut U) })
+}
+
+/// Safely coerce a mutable reference, checking size and alignment at runtime.
+///
+/// `coerce_mut_size_align_checked` coerces a mutable reference to `T` into a
+/// mutable reference to `U`, provided that any instance of `T` is a valid
+/// instance of `U`, any instance of `U` is a valid instance of `T`, `x` has the
+/// same size as `U`, and that `x` satisfies `U`'s alignment requirements. If
+/// `x` has a different size than `U` or does not satisfy `U`'s alignment
+/// requirements, `coerce_mut_size_align_checked` returns `None`.
+pub fn coerce_mut_size_align_checked<T, U>(x: &T) -> Option<&U>
+where
+ T: ?Sized + FromBits<U>,
+ U: FromBits<T>,
+{
+ if mem::size_of_val(x) != mem::size_of::<U>()
+ && (x as *const _ as *const () as usize) % mem::align_of::<U>() != 0
+ {
+ return None;
+ }
+ Some(unsafe { &*(x as *const T as *const U) })
+}
+
+/// Coerce an immutable reference without checking size.
+///
+/// `coerce_ref_size_unchecked` coerces an immutable reference to `T` into an
+/// immutable reference to `U`, provided that any properly-sized instance of `T`
+/// is a valid instance of `U`. It is the caller's responsibility to ensure that
+/// `x` is equal in size to `U`.
+///
+/// # Safety
+///
+/// If `x` is not equal in size to `U`, it may cause undefined behavior.
+pub unsafe fn coerce_ref_size_unchecked<T, U>(x: &T) -> &U
+where
+ T: ?Sized + AlignedTo<U>,
+ U: FromBits<T>,
+{
+ &*(x as *const T as *const U)
+}
+
+/// Coerce an immutable reference without checking alignment.
+///
+/// `coerce_ref_align_unchecked` coerces an immutable reference to `T` into an
+/// immutable reference to `U`, provided that any instance of `T` is a valid
+/// instance of `U`. It is the caller's responsibility to ensure that `x`
+/// satisfies `U`'s alignment requirements.
+///
+/// # Safety
+///
+/// If `x` does not satisfy `U`'s alignment, it may result in undefined
+/// behavior.
+pub unsafe fn coerce_ref_align_unchecked<T, U>(x: &T) -> &U
+where
+ U: FromBits<T>,
+{
+ &*(x as *const T as *const U)
+}
+
+/// Coerce an immutable reference without checking size or alignment.
+///
+/// `coerce_ref_align_unchecked` coerces an immutable reference to `T` into an
+/// immutable reference to `U`, provided that any properly-sized instance of `T`
+/// is a valid instance of `U`. It is the caller's responsibility to ensure that
+/// `x` is equal in size to `U` and that it satisfies `U`'s alignment
+/// requirements.
+///
+/// # Safety
+///
+/// If `x` is not equal in size to `U` or does not satisfy `U`'s alignment, it
+/// may result in undefined behavior.
+pub unsafe fn coerce_ref_size_align_unchecked<T, U>(x: &T) -> &U
+where
+ T: ?Sized,
+ U: FromBits<T>,
+{
+ &*(x as *const T as *const U)
+}
+
+/// Coerce a mutable reference without checking size.
+///
+/// `coerce_mut_size_unchecked` coerces a mutable reference to `T` into a
+/// mutable reference to `U`, provided that any properly-sized instance of `T`
+/// is a valid instance of `U` and that any instance of `U` is a valid instance
+/// of `T`. It is the caller's responsibility to ensure that `x` is equal in
+/// size to `U`.
+///
+/// # Safety
+///
+/// If `x` is not equal in size to `U`, it may cause undefined behavior.
+pub unsafe fn coerce_mut_size_unchecked<T, U>(x: &mut T) -> &mut U
+where
+ T: ?Sized + AlignedTo<U> + FromBits<U>,
+ U: FromBits<T>,
+{
+ &mut *(x as *mut T as *mut U)
+}
+
+/// Coerce a mutable reference without checking alignment.
+///
+/// `coerce_mut_align_unchecked` coerces a mutable reference to `T` into a
+/// mutable reference to `U`, provided that any instance of `T` is a valid
+/// instance of `U` and that any instance of `U` is a valid instance of `T`. It
+/// is the caller's responsibility to ensure that `x` satisfies `U`'s alignment
+/// requirements.
+///
+/// # Safety
+///
+/// If `x` does not satisfy `U`'s alignment, it may result in undefined
+/// behavior.
+pub unsafe fn coerce_mut_align_unchecked<T, U>(x: &mut T) -> &mut U
+where
+ T: FromBits<U>,
+ U: FromBits<T>,
+{
+ &mut *(x as *mut T as *mut U)
+}
+
+/// Coerce a mutable reference without checking size or alignment.
+///
+/// `coerce_mut_size_align_unchecked` coerces a mutable reference to `T` into a
+/// mutable reference to `U`, provided that any properly-sized instance of `T`
+/// is a valid instance of `U` and that any instance of `U` is a valid instance
+/// of `T`. It is the caller's responsibility to ensure that `x` is equal in
+/// size to `U` and that it satisfies `U`'s alignment requirements.
+///
+/// # Safety
+///
+/// If `x` is not equal in size to `U` or does not satisfy `U`'s alignment, it
+/// may results in undefined behavior.
+pub unsafe fn coerce_mut_size_align_unchecked<T, U>(x: &mut T) -> &mut U
+where
+ T: ?Sized + FromBits<U>,
+ U: FromBits<T>,
+{
+ &mut *(x as *mut T as *mut U)
+}
+
+/// A length- and alignment-checked reference to an object which can safely
+/// be reinterpreted as another type.
+///
+/// `LayoutVerified` is an owned reference with the invaraint that the
+/// referent's length and alignment are each greater than or equal to the length
+/// and alignment of `U`. Using this invariant, it implements `Deref` and
+/// `DerefMut` for `U`.
+pub struct LayoutVerified<T, U>(T, PhantomData<U>);
+
+impl<T, U> LayoutVerified<T, U>
+where
+ T: TrustedDeref,
+{
+ /// Construct a new `LayoutVerified`.
+ ///
+ /// `new` verifies that `x` is at least as large as `mem::size_of::<U>()`
+ /// and that it satisfies `U`'s alignment requirements.
+ pub fn new(x: T) -> Option<LayoutVerified<T, U>> {
+ if mem::size_of_val(x.deref()) < mem::size_of::<U>()
+ || (x.deref() as *const _ as *const () as usize) % mem::align_of::<U>() != 0
+ {
+ return None;
+ }
+ Some(LayoutVerified(x, PhantomData))
+ }
+}
+
+impl<A, T, U> LayoutVerified<A, U>
+where
+ A: TrustedDeref<Target = [T]> + SplitAt,
+{
+ /// Construct a new `LayoutVerified` from the prefix of another type.
+ ///
+ /// `new_prefix` verifies that `x` is at least as large as
+ /// `mem::size_of::<U>()` and that it satisfies `U`'s alignment
+ /// requirements. It splits `x` at the smallest index such that the first
+ /// half of the split is no smaller than `mem::size_of::<U>()`, uses the
+ /// first half of the split to construct a `LayoutVerified`, and returns the
+ /// second half of the split. If `mem::size_of::<U>()` is a multiple of
+ /// `A`'s element size, then the first half's length is simply
+ /// `mem::size_of::<U>()` divided by that element size.
+ pub fn new_prefix(x: A) -> Option<(LayoutVerified<A, U>, A)> {
+ if mem::size_of_val(x.deref()) < mem::size_of::<U>()
+ || (x.deref() as *const _ as *const () as usize) % mem::align_of::<U>() != 0
+ {
+ return None;
+ }
+ Some(Self::new_prefix_helper(x))
+ }
+}
+
+impl<T, U> LayoutVerified<T, U>
+where
+ T: TrustedDeref,
+ T: AlignedTo<U>,
+{
+ /// Construct a new `LayoutVerified` with statically-guaranteed alignment.
+ ///
+ /// `new_aligned` verifies that `x` is at least as large as
+ /// `mem::size_of::<U>()`.
+ ///
+ /// `T::Target`'s alignment guarantees must be at least as strict as `U`'s
+ /// so that a reference to `T::Target` can be converted to a reference to
+ /// `U` without violating `U`'s alignment requirements.
+ pub fn new_aligned(x: T) -> Option<LayoutVerified<T, U>> {
+ T::const_assert_aligned_to();
+ if mem::size_of_val(x.deref()) < mem::size_of::<U>() {
+ return None;
+ }
+ Some(LayoutVerified(x, PhantomData))
+ }
+}
+
+impl<A, T, U> LayoutVerified<A, U>
+where
+ A: TrustedDeref<Target = [T]> + SplitAt,
+ A: AlignedTo<U>,
+{
+ /// Construct a new `LayoutVerified` with statically-guaranteed alignment
+ /// from the prefix of another type.
+ ///
+ /// `new_aligned_prefix` verifies that `x` is at least as large as
+ /// `mem::size_of::<U>()` and that it satisfies `U`'s alignment
+ /// requirements. It splits `x` at the smallest index such that the first
+ /// half of the split is no smaller than `mem::size_of::<U>()`, uses the
+ /// first half of the split to construct a `LayoutVerified`, and returns the
+ /// second half of the split. If `mem::size_of::<U>()` is a multiple of
+ /// `A`'s element size, then the first half's length is simply
+ /// `mem::size_of::<U>()` divided by that element size.
+ ///
+ /// `A::Target`'s alignment guarantees must be at least as strict as `U`'s
+ /// so that a reference to `A::Target` can be converted to a reference to
+ /// `U` without violating `U`'s alignment requirements.
+ pub fn new_aligned_prefix(x: A) -> Option<(LayoutVerified<A, U>, A)> {
+ A::const_assert_aligned_to();
+ if mem::size_of_val(x.deref()) < mem::size_of::<U>() {
+ return None;
+ }
+ Some(Self::new_prefix_helper(x))
+ }
+}
+
+impl<T, U> LayoutVerified<T, U>
+where
+ T: TrustedDeref,
+ T::Target: Sized,
+ U: FitsIn<T::Target>,
+{
+ /// Construct a new `LayoutVerified` with statically-guaranteed size.
+ ///
+ /// `new_aligned` verifies that `x` satisfies `U`'s alignment requirements.
+ ///
+ /// `T::Target` must be at least as large as `U`.
+ pub fn new_sized(x: T) -> Option<LayoutVerified<T, U>> {
+ U::const_assert_fits_in();
+ if (x.deref() as *const T::Target as usize) % mem::align_of::<U>() != 0 {
+ return None;
+ }
+ Some(LayoutVerified(x, PhantomData))
+ }
+}
+
+impl<T, U> LayoutVerified<T, U>
+where
+ T: TrustedDeref,
+ T::Target: Sized,
+ U: FitsIn<T::Target>,
+ T: AlignedTo<U>,
+{
+ /// Construct a new `LayoutVerified` with statically-guaranteed size and
+ /// alignment.
+ ///
+ /// `T::Target` must be at least as large as `U`. `T::Target`'s alignment
+ /// guarantees must be at least as strict as `U`'s so that a reference to
+ /// `T::Target` can be converted to a reference to `U` without violating
+ /// `U`'s alignment requirements.
+ pub fn new_sized_aligned(x: T) -> LayoutVerified<T, U> {
+ U::const_assert_fits_in();
+ T::const_assert_aligned_to();
+ LayoutVerified(x, PhantomData)
+ }
+}
+
+impl<A, T, U> LayoutVerified<A, U>
+where
+ A: TrustedDeref<Target = [T]> + SplitAt,
+{
+ fn new_prefix_helper(x: A) -> (LayoutVerified<A, U>, A) {
+ let idx = if mem::size_of_val(x.deref()) % mem::size_of::<T>() == 0 {
+ mem::size_of_val(x.deref()) / mem::size_of::<T>()
+ } else {
+ (mem::size_of_val(x.deref()) / mem::size_of::<T>()) + 1
+ };
+ let (x, rest) = x.split_at(idx);
+ (LayoutVerified(x, PhantomData), rest)
+ }
+}
+
+impl<T, U> LayoutVerified<T, U> {
+ /// Get the underlying `T`.
+ ///
+ /// `get_t` returns a reference to the `T` backing this `LayoutVerified`.
+ pub fn get_t(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T, U> Deref for LayoutVerified<T, U>
+where
+ T: TrustedDeref,
+ U: FromBits<T::Target>,
+{
+ type Target = U;
+ fn deref(&self) -> &U {
+ unsafe { &*((&self.0) as *const _ as *const U) }
+ }
+}
+
+impl<T, U> DerefMut for LayoutVerified<T, U>
+where
+ T: TrustedDerefMut,
+ U: FromBits<T::Target>,
+ T::Target: FromBits<U>,
+{
+ fn deref_mut(&mut self) -> &mut U {
+ unsafe { &mut *((&mut self.0) as *mut _ as *mut U) }
+ }
+}
+
+/// Like `Deref`, but guaranteed to always return the same length.
+///
+/// `TrustedDeref` is like `Deref`, but multiple calls to `deref` on the same
+/// object are always guaranteed to return references to objects of the same
+/// size.
+///
+/// # Safety
+///
+/// Unsafe code may rely on the size-consistency property, so violating that
+/// property may cause undefined behavior.
+pub unsafe trait TrustedDeref: Deref {}
+
+/// Like `DerefMut`, but guaranteed to always return the same length.
+///
+/// `TrustedDerefMut` is like `DerefMut`, but multiple calls to `deref` on the
+/// same object are always guaranteed to return references to objects of the
+/// same size.
+///
+/// # Safety
+///
+/// Unsafe code may rely on the size-consistency property, so violating that
+/// property may cause undefined behavior.
+pub unsafe trait TrustedDerefMut: TrustedDeref + DerefMut {}
+
+// unsafe impl<'a> TrustedDeref for &'a [u8] {}
+// unsafe impl<'a> TrustedDerefMut for &'a mut [u8] {}
+
+/// Types which can be split at an index.
+///
+/// Types which implement `SplitAt` must guarantee that splits work as expected
+/// - if an object has a length, `len`, and then is split at index `idx`, the
+/// first return value will have length `idx` and the second will have length
+/// `len - idx`.
+///
+/// # Safety
+///
+/// Unsafe code may rely on `SplitAt` types to behave as documented, so
+/// violating this documentation may cause undefined behavior.
+pub unsafe trait SplitAt: Sized {
+ fn split_at(self, mid: usize) -> (Self, Self);
+}
+
+unsafe impl<'a> SplitAt for &'a [u8] {
+ fn split_at(self, mid: usize) -> (Self, Self) {
+ <[u8]>::split_at(self, mid)
+ }
+}
diff --git a/packages/prod/all b/packages/prod/all
index 6ccf776..4b959c1 100644
--- a/packages/prod/all
+++ b/packages/prod/all
@@ -51,6 +51,7 @@
"garnet/packages/prod/pm",
"garnet/packages/prod/power_manager",
"garnet/packages/prod/ralink",
+ "garnet/packages/prod/recovery_netstack",
"garnet/packages/prod/root_ssl_certificates",
"garnet/packages/prod/run",
"garnet/packages/prod/runtime",
diff --git a/packages/prod/recovery_netstack b/packages/prod/recovery_netstack
new file mode 100644
index 0000000..f4f34ce
--- /dev/null
+++ b/packages/prod/recovery_netstack
@@ -0,0 +1,5 @@
+{
+ "packages": {
+ "recovery-netstack": "//garnet/bin/recovery_netstack"
+ }
+}