Merge branch 'docs' into develop
diff --git a/src/client.rs b/src/client.rs
index da617e5..097b309 100644
--- a/src/client.rs
+++ b/src/client.rs
@@ -1,9 +1,54 @@
 //! Clients for high level interactions with TUF repositories.
+//!
+//! # Example
+//!
+//! ```no_run
+//! extern crate hyper;
+//! extern crate tuf;
+//! extern crate url;
+//!
+//! use hyper::client::Client as HttpClient;
+//! use std::path::PathBuf;
+//! use tuf::Tuf;
+//! use tuf::crypto::KeyId;
+//! use tuf::client::{Client, Config};
+//! use tuf::metadata::{RootMetadata, SignedMetadata, Role, MetadataPath,
+//!     MetadataVersion};
+//! use tuf::interchange::JsonDataInterchange;
+//! use tuf::repository::{Repository, FileSystemRepository, HttpRepository};
+//! use url::Url;
+//!
+//! static TRUSTED_ROOT_KEY_IDS: &'static [&str] = &[
+//!     "diNfThTFm0PI8R-Bq7NztUIvZbZiaC_weJBgcqaHlWw=",
+//!     "ar9AgoRsmeEcf6Ponta_1TZu1ds5uXbDemBig30O7ck=",
+//!     "T5vfRrM1iHpgzGwAHe7MbJH_7r4chkOAphV3OPCCv0I=",
+//! ];
+//!
+//! fn main() {
+//!     let key_ids: Vec<KeyId> = TRUSTED_ROOT_KEY_IDS.iter()
+//!         .map(|k| KeyId::from_string(k).unwrap())
+//!         .collect();
+//!
+//!     let local = FileSystemRepository::<JsonDataInterchange>::new(PathBuf::from("~/.rustup"));
+//!
+//!     let remote = HttpRepository::new(
+//!         Url::parse("https://static.rust-lang.org/").unwrap(),
+//!         HttpClient::new(),
+//!         Some("rustup/1.4.0".into()),
+//!         None);
+//!
+//!     let config = Config::build().finish().unwrap();
+//!
+//!     let mut client = Client::with_root_pinned(&key_ids, config, local, remote).unwrap();
+//!     let _ = client.update_local().unwrap();
+//!     let _ = client.update_remote().unwrap();
+//! }
+//! ```
 
 use std::io::{Read, Write};
 
 use Result;
-use crypto;
+use crypto::{self, KeyId};
 use error::Error;
 use interchange::DataInterchange;
 use metadata::{MetadataVersion, RootMetadata, Role, MetadataPath, TargetPath, TargetDescription,
@@ -31,12 +76,83 @@
     L: Repository<D>,
     R: Repository<D>,
 {
-    /// Create a new TUF client from the given `Tuf` (metadata storage) and local and remote
-    /// repositories.
-    pub fn new(tuf: Tuf<D>, config: Config, mut local: L, mut remote: R) -> Result<Self> {
+    /// Create a new TUF client. It will attempt to load initial root metadata from the local repo
+    /// and return an error if it cannot do so.
+    ///
+    /// **WARNING**: This method offers weaker security guarantees than the related method
+    /// `with_root_pinned`.
+    pub fn new(config: Config, mut local: L, mut remote: R) -> Result<Self> {
         local.initialize()?;
         remote.initialize()?;
 
+        let root = local
+            .fetch_metadata(
+                &Role::Root,
+                &MetadataPath::from_role(&Role::Root),
+                &MetadataVersion::Number(1),
+                &config.max_root_size,
+                config.min_bytes_per_second,
+                None,
+            )
+            .or_else(|_| {
+                local.fetch_metadata(
+                    &Role::Root,
+                    &MetadataPath::from_role(&Role::Root),
+                    &MetadataVersion::Number(1),
+                    &config.max_root_size,
+                    config.min_bytes_per_second,
+                    None,
+                )
+            })?;
+
+        let tuf = Tuf::from_root(root)?;
+
+        Ok(Client {
+            tuf: tuf,
+            config: config,
+            local: local,
+            remote: remote,
+        })
+    }
+
+    /// Create a new TUF client. It will attempt to load initial root metadata the local and remote
+    /// repositories using the provided key IDs to pin the verification.
+    ///
+    /// This is the preferred method of creating a client.
+    pub fn with_root_pinned<'a, I>(
+        trusted_root_keys: I,
+        config: Config,
+        mut local: L,
+        mut remote: R,
+    ) -> Result<Self>
+    where
+        I: IntoIterator<Item = &'a KeyId>,
+    {
+        local.initialize()?;
+        remote.initialize()?;
+
+        let root = local
+            .fetch_metadata(
+                &Role::Root,
+                &MetadataPath::from_role(&Role::Root),
+                &MetadataVersion::Number(1),
+                &config.max_root_size,
+                config.min_bytes_per_second,
+                None,
+            )
+            .or_else(|_| {
+                remote.fetch_metadata(
+                    &Role::Root,
+                    &MetadataPath::from_role(&Role::Root),
+                    &MetadataVersion::Number(1),
+                    &config.max_root_size,
+                    config.min_bytes_per_second,
+                    None,
+                )
+            })?;
+
+        let tuf = Tuf::from_root_pinned(root, trusted_root_keys)?;
+
         Ok(Client {
             tuf: tuf,
             config: config,
@@ -480,7 +596,7 @@
     }
 }
 
-/// Helper for building and validating a TUF `Config`.
+/// Helper for building and validating a TUF client `Config`.
 #[derive(Debug, PartialEq)]
 pub struct ConfigBuilder {
     max_root_size: Option<usize>,
@@ -594,8 +710,6 @@
             &root,
         ).unwrap();
 
-        let tuf = Tuf::from_root(root).unwrap();
-
         let root = RootMetadata::new(
             2,
             Utc.ymd(2038, 1, 1).and_hms(0, 0, 0),
@@ -649,7 +763,6 @@
         ).unwrap();
 
         let mut client = Client::new(
-            tuf,
             Config::build().finish().unwrap(),
             repo,
             EphemeralRepository::new(),
diff --git a/src/crypto.rs b/src/crypto.rs
index 634ba7c..ab21637 100644
--- a/src/crypto.rs
+++ b/src/crypto.rs
@@ -106,11 +106,6 @@
     Ok((size, hashes))
 }
 
-/// Calculate the given key's ID.
-///
-/// A `KeyId` is calculated as `sha256(public_key_bytes)`. The TUF spec says that it should be
-/// `sha256(cjson(encoded(public_key_bytes)))`, but this is meaningless once the spec moves away
-/// from using only JSON as the data interchange format.
 fn calculate_key_id(public_key: &[u8]) -> KeyId {
     let mut context = digest::Context::new(&SHA256);
     context.update(&public_key);
@@ -120,27 +115,41 @@
 /// Wrapper type for public key's ID.
 ///
 /// # Calculating
-/// In order to future proof the calculation of key IDs and preserver them across encoding types,
-/// a key's ID is calculated as the Sha-256 hash of the DER bytes of a key in Subject Public Key
-/// Info (SPKI) format.
+/// A `KeyId` is calculated as `sha256(spki(pub_key_bytes))` where `spki` is a function that takes
+/// any encoding for a public key an converts it into the `SubjectPublicKeyInfo` (SPKI) DER
+/// encoding.
 ///
+/// Note: Historically the TUF spec says that a key's ID should be calculated with
+/// `sha256(cjson(encoded(pub_key_bytes)))`, but since there could be multiple supported data
+/// interchange formats, relying on an encoding that uses JSON does not make sense.
+///
+/// # ASN.1
 /// ```bash
-/// SEQUENCE {
-///   SEQUENCE {
-///     OBJECT IDENTIFIER
-///     NULL
-///   }
-///   BIT STTRING
+/// PublicKey ::= CHOICE {
+///     -- This field is checked for consistency against `subjectPublicKey`.
+///     -- The OID determines how we attempt to parse the `BIT STRING`.
+///     algorithm        AlgorithmIdentifier,
+///     -- Either:
+///     --   1. Encapsulates an `RsaPublicKey`
+///     --   2. Equals an `Ed25519PublicKey`
+///     subjectPublicKey BIT STRING
 /// }
-/// ```
 ///
-/// Where `BIT STRING` encapsulates the actual public key. In the case of RSA this is:
-///
-/// ```bash
-/// SEQUENCE {
-///   INTEGER (n, modulus)
-///   INTEGER (e, exponent)
+/// AlgorithmIdentifier ::= SEQUENCE {
+///     -- Either:
+///     --   1. 1.2.840.113549.1.1.1 rsaEncryption(PKCS #1)
+///     --   2. 1.3.101.112 curveEd25519(EdDSA 25519 signature algorithm)
+///     algorithm  OBJECT IDENTIFIER,
+///     -- In our cases, this is always `NULL`.
+///     parameters ANY DEFINED BY algorithm OPTIONAL
 /// }
+///
+/// RsaPublicKey ::= SEQUENCE {
+///     modulus  INTEGER (1..MAX),
+///     exponent INTEGER (1..MAX)
+/// }
+///
+/// Ed25519PublicKey ::= BIT STRING
 /// ```
 #[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
 pub struct KeyId(Vec<u8>);
@@ -237,11 +246,15 @@
 
 impl SignatureValue {
     /// Create a new `SignatureValue` from the given bytes.
+    ///
+    /// Note: It is unlikely that you ever want to do this manually.
     pub fn new(bytes: Vec<u8>) -> Self {
         SignatureValue(bytes)
     }
 
     /// Create a new `SignatureValue` from the given base64url string.
+    ///
+    /// Note: It is unlikely that you ever want to do this manually.
     pub fn from_string(string: &str) -> Result<Self> {
         Ok(SignatureValue(BASE64URL.decode(string.as_bytes())?))
     }
@@ -286,7 +299,7 @@
             x if x == RSA_SPKI_OID => Ok(KeyType::Rsa),
             x if x == ED25519_SPKI_OID => Ok(KeyType::Ed25519),
             x => Err(Error::Encoding(format!(
-                "Unknown OID:{}",
+                "Unknown OID: {}",
                 x.iter().map(|b| format!("{:x}", b)).collect::<String>()
             ))),
         }
@@ -365,6 +378,9 @@
     ///
     /// # Generating Keys
     ///
+    /// If you use `cargo install tuf`, you will have access to the TUF CLI tool that will allow
+    /// you to generate keys. If you do not want to do this, the following can be used instead.
+    ///
     /// ## Ed25519
     ///
     /// ```bash
@@ -532,6 +548,8 @@
 
 impl PublicKey {
     /// Parse DER bytes as an SPKI key.
+    ///
+    /// See the documentation on `KeyValue` for more information on SPKI.
     pub fn from_spki(der_bytes: &[u8]) -> Result<Self> {
         let input = Input::from(der_bytes);
         let (typ, value) = input.read_all(derp::Error::Read, |input| {
@@ -541,6 +559,7 @@
                     let typ = KeyType::from_oid(typ.as_slice_less_safe()).map_err(|_| {
                         derp::Error::WrongValue
                     })?;
+                    // for RSA / ed25519 this is null, so don't both parsing it
                     let _ = derp::read_null(input)?;
                     Ok(typ)
                 })?;
@@ -557,6 +576,8 @@
     }
 
     /// Write the public key as SPKI DER bytes.
+    ///
+    /// See the documentation on `KeyValue` for more information on SPKI.
     pub fn as_spki(&self) -> Result<Vec<u8>> {
         Ok(write_spki(&self.value.0, &self.typ)?)
     }
@@ -643,7 +664,7 @@
 }
 
 impl Signature {
-    /// An immutable reference to the `KeyId` that produced the signature.
+    /// An immutable reference to the `KeyId` of the key that produced the signature.
     pub fn key_id(&self) -> &KeyId {
         &self.key_id
     }
diff --git a/src/interchange/mod.rs b/src/interchange/mod.rs
index 7635dba..470ad25 100644
--- a/src/interchange/mod.rs
+++ b/src/interchange/mod.rs
@@ -34,7 +34,7 @@
 
     /// Write a struct to a stream.
     ///
-    /// Note: This *MUST* writer the bytes canonically for hashes to line up correctly in other
+    /// Note: This *MUST* write the bytes canonically for hashes to line up correctly in other
     /// areas of the library.
     fn to_writer<W, T: Sized>(writer: W, value: &T) -> Result<()>
     where
@@ -49,6 +49,165 @@
 }
 
 /// JSON data interchange.
+///
+/// # Schema
+///
+/// This doesn't use JSON Schema because that specification language is rage inducing. Here's
+/// something else instead.
+///
+/// ## Common Entities
+///
+/// `NATURAL_NUMBER` is an integer in the range `[1, 2**32)`.
+///
+/// `EXPIRES` is an ISO-8601 date time in format `YYYY-MM-DD'T'hh:mm:ss'Z'`.
+///
+/// `KEY_ID` is the base64url encoded value of `sha256(spki(pub_key))`.
+///
+/// `PUB_KEY` is the following:
+///
+/// ```bash
+/// {
+///   "type": KEY_TYPE,
+///   "value": PUBLIC
+/// }
+/// ```
+///
+/// `PUBLIC` is a base64url encoded `SubjectPublicKeyInfo` DER public key.
+///
+/// `KEY_TYPE` is a string (either `rsa` or `ed25519`).
+///
+/// `HASH_VALUE` is a base64url encoded hash value.
+///
+/// `METADATA_DESCRIPTION` is the following:
+///
+/// ```bash
+/// {
+///   "version": NATURAL_NUMBER,
+///   "size": NATURAL_NUMBER,
+///   "hashes": {
+///     HASH_ALGORITHM: HASH_VALUE
+///     ...
+///   }
+/// }
+/// ```
+///
+/// ## `SignedMetadata`
+///
+/// ```bash
+/// {
+///   "signatures": [SIGNATURE],
+///   "signed": SIGNED
+/// }
+/// ```
+///
+/// `SIGNED` is one of:
+///
+/// - `RootMetadata`
+/// - `SnapshotMetadata`
+/// - `TargetsMetadata`
+/// - `TimestampMetadata`
+///
+/// The the elements of `signatures` must have unique `key_id`s.
+///
+/// ## `RootMetadata`
+///
+/// ```bash
+/// {
+///   "type": "root",
+///   "version": NATURAL_NUMBER,
+///   "expires": EXPIRES,
+///   "keys": {
+///     KEY_ID: PUB_KEY,
+///     ...
+///   },
+///   "root": ROLE_DESCRIPTION,
+///   "snapshot": ROLE_DESCRIPTION,
+///   "targets": ROLE_DESCRIPTION,
+///   "timestamp": ROLE_DESCRIPTION
+/// }
+/// ```
+///
+/// `ROLE_DESCRIPTION` is the following:
+///
+/// ```bash
+/// {
+///   "threshold": NATURAL_NUMBER,
+///   "key_ids": [KEY_ID]
+/// }
+/// ```
+///
+/// ## `SnapshotMetadata`
+///
+/// ```bash
+/// {
+///   "type": "snapshot",
+///   "version": NATURAL_NUMBER,
+///   "expires": EXPIRES,
+///   "meta": {
+///     META_PATH: METADATA_DESCRIPTION
+///   }
+/// }
+/// ```
+///
+/// `META_PATH` is a string.
+///
+///
+/// ## `TargetsMetadata`
+///
+/// ```bash
+/// {
+///   "type": "timestamp",
+///   "version": NATURAL_NUMBER,
+///   "expires": EXPIRES,
+///   "targets": {
+///     TARGET_PATH: TARGET_DESCRIPTION
+///     ...
+///   },
+///   "delegations": DELEGATIONS
+/// }
+/// ```
+///
+/// `DELEGATIONS` is optional and is described by the following:
+///
+/// ```bash
+/// {
+///   "keys": {
+///     KEY_ID: PUB_KEY,
+///     ...
+///   },
+///   "roles": {
+///     ROLE: DELEGATION,
+///     ...
+///   }
+/// }
+/// ```
+///
+/// `DELEGATION` is:
+///
+/// ```bash
+/// {
+///   "name": ROLE,
+///   "threshold": NATURAL_NUMBER,
+///   "terminating": BOOLEAN,
+///   "key_ids": [KEY_ID],
+///   "paths": [PATH]
+/// }
+/// ```
+///
+/// `ROLE` is a string,
+///
+/// `PATH` is a string.
+///
+/// ## `TimestampMetadata`
+///
+/// ```bash
+/// {
+///   "type": "timestamp",
+///   "version": NATURAL_NUMBER,
+///   "expires": EXPIRES,
+///   "snapshot": METADATA_DESCRIPTION
+/// }
+/// ```
 #[derive(Debug, Clone, PartialEq)]
 pub struct JsonDataInterchange {}
 impl DataInterchange for JsonDataInterchange {
diff --git a/src/lib.rs b/src/lib.rs
index cd23a47..ca94c60 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,67 +1,105 @@
 //! This crate provides an API for talking to repositories that implement The Update Framework
 //! (TUF).
 //!
-//! If you are unfamiliar with TUF, you should read up on via the [official
+//! If you are unfamiliar with TUF, you should read up on it via the [official
 //! website](http://theupdateframework.github.io/). This crate aims to implement the entirety of
 //! the specification as defined at the [head of the `develop`
 //! branch](https://github.com/theupdateframework/tuf/blob/develop/docs/tuf-spec.txt) in the
 //! official TUF git repository.
 //!
-//! # Example
+//! Additionally, the following two papers are valuable supplements in understanding how to
+//! actually implement TUF for a community repository.
 //!
-//! ```no_run
-//! extern crate hyper;
-//! extern crate tuf;
-//! extern crate url;
+//! - [The Diplomat paper
+//! (2016)](https://www.usenix.org/conference/nsdi16/technical-sessions/presentation/kuppusamy)
+//! - [The Mercury paper
+//! (2017)](https://www.usenix.org/conference/atc17/technical-sessions/presentation/kuppusamy)
 //!
-//! use hyper::client::Client as HttpClient;
-//! use std::path::PathBuf;
-//! use tuf::Tuf;
-//! use tuf::crypto::KeyId;
-//! use tuf::client::{Client, Config};
-//! use tuf::metadata::{RootMetadata, SignedMetadata, Role, MetadataPath,
-//!     MetadataVersion};
-//! use tuf::interchange::JsonDataInterchange;
-//! use tuf::repository::{Repository, FileSystemRepository, HttpRepository};
-//! use url::Url;
+//! Failure to read the spec and the above papers will likely lead to an implementation that does
+//! not take advantage of all the security guarantees that TUF offers.
 //!
-//! static TRUSTED_ROOT_KEY_IDS: &'static [&str] = &[
-//!     "diNfThTFm0PI8R-Bq7NztUIvZbZiaC_weJBgcqaHlWw=",
-//!     "ar9AgoRsmeEcf6Ponta_1TZu1ds5uXbDemBig30O7ck=",
-//!     "T5vfRrM1iHpgzGwAHe7MbJH_7r4chkOAphV3OPCCv0I=",
-//! ];
+//! # Interoperability
 //!
-//! fn main() {
-//!     let key_ids: Vec<KeyId> = TRUSTED_ROOT_KEY_IDS.iter()
-//!         .map(|k| KeyId::from_string(k).unwrap())
-//!         .collect();
+//! It should be noted that historically the TUF spec defined exactly one metadata format and one
+//! way of organizing metadata within a repository. Thus, all TUF implementation could perfectly
+//! interoperate. The TUF spec has moved to describing *how a framework should behave* leaving many
+//! of the detais up to the implementor. Therefore, there are **zero** guarantees that this library
+//! will work with any other TUF implementation. Should you want to access a TUF repository that
+//! uses `rust-tuf` as its backend from another language, ASN.1 modules and metadata schemas are
+//! provided that will allow you to interoperate with this library.
 //!
-//!     let mut local = FileSystemRepository::new(PathBuf::from("~/.rustup"));
+//! # Implementation Considerations
 //!
-//!     let mut remote = HttpRepository::new(
-//!         Url::parse("https://static.rust-lang.org/").unwrap(),
-//!         HttpClient::new(),
-//!         Some("rustup/1.4.0".into()),
-//!         None);
+//! ## Key Management
 //!
-//!     let config = Config::build().finish().unwrap();
+//! Part of TUF is that it acts as its own PKI, and there is no integration that needs to be done
+//! for managing keys.
 //!
-//!     // fetching this original root from the network is safe because
-//!     // we are using trusted, pinned keys to verify it
-//!     let root = remote.fetch_metadata(&Role::Root,
-//!                                      &MetadataPath::from_role(&Role::Root),
-//!                                      &MetadataVersion::None,
-//!                                      config.max_root_size(),
-//!                                      config.min_bytes_per_second(),
-//!                                      None).unwrap();
+//! Note: No two private keys that are generated should ever exist on the same hardware. When a
+//! step says "generate `N` keys," the implication is that these `N` keys are generated on `N`
+//! devices.
 //!
-//!     let tuf = Tuf::<JsonDataInterchange>::from_root_pinned(root, &key_ids).unwrap();
+//! The first set of keys that need to be generated at the root keys that are used to sign the root
+//! metadata. The root should be defined with the following properties:
 //!
-//!     let mut client = Client::new(tuf, config, local, remote).unwrap();
-//!     let _ = client.update_local().unwrap();
-//!     let _ = client.update_remote().unwrap();
-//! }
-//! ```
+//! - Minimum:
+//!   - 3 keys
+//!   - threshold of 2
+//! - Recommended:
+//!   - 5 keys
+//!   - threshold of 3
+//!
+//! If a threshold of root keys are compromised, then the entire system is compromised and TUF
+//! clients will need to be manually updated. Similarly, if some `X` keys are lost such that the
+//! threshold `N` cannot be reached, then clients will also need to be manually updated. Both of
+//! situations are considered critically unsafe. Whatever number of keys are used, it should be
+//! assumed that some small number may be lost or compromised.
+//!
+//! These root keys should be kept offline on secure media.
+//!
+//! ## Delegations
+//!
+//! TUF's most useful feature is the ability to delegate certain roles to sign certain targets.
+//! This is discussed in extensive detail in the aforementioned Diplomat paper. There are three
+//! problems faced when delegating trust in TUF:
+//!
+//! 1. What to do for existent accounts that have not yet created and signed TUF metadata
+//! 2. What to do when a new account registers
+//! 3. What to do when an account uploads a new target and new metadata
+//!
+//! There are several approaches for dealing with the above scenarios. We are only going to discuss
+//! on here as it is the recommended approach. This approach is taken directly from Section 6.1 of
+//! he Diplomat paper
+//!
+//! ### Maximum Security Model
+//!
+//! The top-level targets role delegates to three other roles and are listed in the following order:
+//!
+//! 1. `claimed-projects`
+//!   - `terminating: true`
+//!   - Delegates to project-specific roles that have registered keys with TUF
+//! 2. `rarely-updated-projects`
+//!   - `terminating: true`
+//!   - Signs all packages for all projects that have been "abandoned" or left unupdated for a long
+//!   time AND have not yet registered keys with TUF
+//! 3. `new-projects`
+//!   - `terminating: false`
+//!   - Signs all packages for all new projects as well as projects that were relegated to
+//!   `rarely-updated-projects`
+//!
+//! The top-level `targets` role as well as `claimed-projects` and `rarely-updated-projects`
+//! **MUST** all use offline keys.
+//!
+//! The critical, manual step is to register new projects with TUF keys and move them into the
+//! `claimed-projects` role. Projects that refuse to register keys should have their packages
+//! periodically moved into the `rarely-updated-projects` role. Projects in either of these two
+//! roles are safe from compromise as their keys are offline. Since the keys used for the above
+//! operation are kept offline, this is periodic, manual process.
+//!
+//! ## Snapshot & Timestamp
+//!
+//! In a community repository, these two keys need to be kept online and will be used to sign new
+//! metadata on every update.
 
 #![deny(missing_docs)]
 
diff --git a/src/metadata.rs b/src/metadata.rs
index fef50a6..eda6908 100644
--- a/src/metadata.rs
+++ b/src/metadata.rs
@@ -1,4 +1,4 @@
-//! Structures used to represent TUF metadata
+//! TUF metadata.
 
 use chrono::DateTime;
 use chrono::offset::Utc;
@@ -194,7 +194,7 @@
 /// Enum used for addressing versioned TUF metadata.
 #[derive(Debug, PartialEq, Eq, Clone, Hash)]
 pub enum MetadataVersion {
-    /// The metadata is unversioned.
+    /// The metadata is unversioned. This is the latest version of the metadata.
     None,
     /// The metadata is addressed by a specific version number.
     Number(u32),
@@ -239,7 +239,33 @@
     D: DataInterchange,
     M: Metadata,
 {
-    /// Create a new `SignedMetadata`.
+    /// Create a new `SignedMetadata`. The supplied private key is used to sign the canonicalized
+    /// bytes of the provided metadata with the provided scheme.
+    ///
+    /// ```
+    /// extern crate chrono;
+    /// extern crate tuf;
+    ///
+    /// use chrono::prelude::*;
+    /// use tuf::crypto::{PrivateKey, SignatureScheme, HashAlgorithm};
+    /// use tuf::interchange::JsonDataInterchange;
+    /// use tuf::metadata::{MetadataDescription, TimestampMetadata, SignedMetadata};
+    ///
+    /// fn main() {
+    ///     let key: &[u8] = include_bytes!("./tests/ed25519/ed25519-1.pk8.der");
+    ///     let key = PrivateKey::from_pkcs8(&key).unwrap();
+    ///
+    ///     let timestamp = TimestampMetadata::new(
+    ///         1,
+    ///         Utc.ymd(2017, 1, 1).and_hms(0, 0, 0),
+    ///         MetadataDescription::from_reader(&*vec![0x01, 0x02, 0x03], 1,
+    ///             &[HashAlgorithm::Sha256]).unwrap()
+    ///     ).unwrap();
+    ///
+    ///     SignedMetadata::<JsonDataInterchange, TimestampMetadata>::new(
+    ///         &timestamp, &key, SignatureScheme::Ed25519).unwrap();
+    /// }
+    /// ```
     pub fn new(
         metadata: &M,
         private_key: &PrivateKey,
@@ -263,6 +289,41 @@
     /// you're using this to append several signatures are once, you are doing something wrong. The
     /// preferred method is to generate your copy of the metadata locally and use `merge_signatures`
     /// to perform the "append" operations.
+    ///
+    /// ```
+    /// extern crate chrono;
+    /// extern crate tuf;
+    ///
+    /// use chrono::prelude::*;
+    /// use tuf::crypto::{PrivateKey, SignatureScheme, HashAlgorithm};
+    /// use tuf::interchange::JsonDataInterchange;
+    /// use tuf::metadata::{MetadataDescription, TimestampMetadata, SignedMetadata};
+    ///
+    /// fn main() {
+    ///     let key_1: &[u8] = include_bytes!("./tests/ed25519/ed25519-1.pk8.der");
+    ///     let key_1 = PrivateKey::from_pkcs8(&key_1).unwrap();
+    ///
+    ///     // Note: This is for demonstration purposes only.
+    ///     // You should never have multiple private keys on the same device.
+    ///     let key_2: &[u8] = include_bytes!("./tests/ed25519/ed25519-2.pk8.der");
+    ///     let key_2 = PrivateKey::from_pkcs8(&key_2).unwrap();
+    ///
+    ///     let timestamp = TimestampMetadata::new(
+    ///         1,
+    ///         Utc.ymd(2017, 1, 1).and_hms(0, 0, 0),
+    ///         MetadataDescription::from_reader(&*vec![0x01, 0x02, 0x03], 1,
+    ///             &[HashAlgorithm::Sha256]).unwrap()
+    ///     ).unwrap();
+    ///     let mut timestamp = SignedMetadata::<JsonDataInterchange, TimestampMetadata>::new(
+    ///         &timestamp, &key_1, SignatureScheme::Ed25519).unwrap();
+    ///
+    ///     timestamp.add_signature(&key_2, SignatureScheme::Ed25519).unwrap();
+    ///     assert_eq!(timestamp.signatures().len(), 2);
+    ///
+    ///     timestamp.add_signature(&key_2, SignatureScheme::Ed25519).unwrap();
+    ///     assert_eq!(timestamp.signatures().len(), 2);
+    /// }
+    /// ```
     pub fn add_signature(
         &mut self,
         private_key: &PrivateKey,
@@ -320,12 +381,61 @@
     }
 
     /// Verify this metadata.
-    pub fn verify(
-        &self,
-        threshold: u32,
-        authorized_key_ids: &HashSet<KeyId>,
-        available_keys: &HashMap<KeyId, PublicKey>,
-    ) -> Result<()> {
+    ///
+    /// ```
+    /// extern crate chrono;
+    /// #[macro_use]
+    /// extern crate maplit;
+    /// extern crate tuf;
+    ///
+    /// use chrono::prelude::*;
+    /// use tuf::crypto::{PrivateKey, SignatureScheme, HashAlgorithm};
+    /// use tuf::interchange::JsonDataInterchange;
+    /// use tuf::metadata::{MetadataDescription, TimestampMetadata, SignedMetadata};
+    ///
+    /// fn main() {
+    ///     let key_1: &[u8] = include_bytes!("./tests/ed25519/ed25519-1.pk8.der");
+    ///     let key_1 = PrivateKey::from_pkcs8(&key_1).unwrap();
+    ///
+    ///     let key_2: &[u8] = include_bytes!("./tests/ed25519/ed25519-2.pk8.der");
+    ///     let key_2 = PrivateKey::from_pkcs8(&key_2).unwrap();
+    ///
+    ///     let timestamp = TimestampMetadata::new(
+    ///         1,
+    ///         Utc.ymd(2017, 1, 1).and_hms(0, 0, 0),
+    ///         MetadataDescription::from_reader(&*vec![0x01, 0x02, 0x03], 1,
+    ///             &[HashAlgorithm::Sha256]).unwrap()
+    ///     ).unwrap();
+    ///     let timestamp = SignedMetadata::<JsonDataInterchange, TimestampMetadata>::new(
+    ///         &timestamp, &key_1, SignatureScheme::Ed25519).unwrap();
+    ///
+    ///     assert!(timestamp.verify(
+    ///         1,
+    ///         vec![key_1.public()],
+    ///     ).is_ok());
+    ///
+    ///     // fail with increased threshold
+    ///     assert!(timestamp.verify(
+    ///         2,
+    ///         vec![key_1.public()],
+    ///     ).is_err());
+    ///
+    ///     // fail when the keys aren't authorized
+    ///     assert!(timestamp.verify(
+    ///         1,
+    ///         vec![key_2.public()],
+    ///     ).is_err());
+    ///
+    ///     // fail when the keys don't exist
+    ///     assert!(timestamp.verify(
+    ///         1,
+    ///         &[],
+    ///     ).is_err());
+    /// }
+    pub fn verify<'a, I>(&self, threshold: u32, authorized_keys: I) -> Result<()>
+    where
+        I: IntoIterator<Item = &'a PublicKey>,
+    {
         if self.signatures.len() < 1 {
             return Err(Error::VerificationFailure(
                 "The metadata was not signed with any authorized keys."
@@ -339,19 +449,16 @@
             ));
         }
 
+        let authorized_keys = authorized_keys
+            .into_iter()
+            .map(|k| (k.key_id(), k))
+            .collect::<HashMap<&KeyId, &PublicKey>>();
+
         let canonical_bytes = D::canonicalize(&self.signed)?;
 
         let mut signatures_needed = threshold;
         for sig in self.signatures.iter() {
-            if !authorized_key_ids.contains(sig.key_id()) {
-                warn!(
-                    "Key ID {:?} is not authorized to sign metadata.",
-                    sig.key_id()
-                );
-                continue;
-            }
-
-            match available_keys.get(sig.key_id()) {
+            match authorized_keys.get(sig.key_id()) {
                 Some(ref pub_key) => {
                     match pub_key.verify(&canonical_bytes, &sig) {
                         Ok(()) => {
@@ -365,7 +472,7 @@
                 }
                 None => {
                     warn!(
-                        "Key ID {:?} was not found in the set of available keys.",
+                        "Key ID {:?} was not found in the set of authorized keys.",
                         sig.key_id()
                     );
                 }
diff --git a/src/repository.rs b/src/repository.rs
index 10b1e24..cc1d879 100644
--- a/src/repository.rs
+++ b/src/repository.rs
@@ -33,7 +33,7 @@
 
     /// Store signed metadata.
     ///
-    /// Note: This *MUST* canonicalize the bytes before storing them as a read will expect the
+    /// Note: This **MUST** canonicalize the bytes before storing them as a read will expect the
     /// hashes of the metadata to match.
     fn store_metadata<M>(
         &mut self,
diff --git a/src/tuf.rs b/src/tuf.rs
index f3af563..1db839a 100644
--- a/src/tuf.rs
+++ b/src/tuf.rs
@@ -25,10 +25,15 @@
 impl<D: DataInterchange> Tuf<D> {
     /// Create a new `TUF` struct from a known set of pinned root keys that are used to verify the
     /// signed metadata.
-    pub fn from_root_pinned(
+    pub fn from_root_pinned<'a, I>(
         mut signed_root: SignedMetadata<D, RootMetadata>,
-        root_key_ids: &[KeyId],
-    ) -> Result<Self> {
+        root_key_ids: I,
+    ) -> Result<Self>
+    where
+        I: IntoIterator<Item = &'a KeyId>,
+    {
+        let root_key_ids = root_key_ids.into_iter().collect::<HashSet<&KeyId>>();
+
         signed_root.signatures_mut().retain(|s| {
             root_key_ids.contains(s.key_id())
         });
@@ -43,8 +48,16 @@
         let root = D::deserialize::<RootMetadata>(signed_root.signed())?;
         let _ = signed_root.verify(
             root.root().threshold(),
-            root.root().key_ids(),
-            root.keys(),
+            root.keys().iter().filter_map(
+                |(k, v)| if root.root()
+                    .key_ids()
+                    .contains(k)
+                {
+                    Some(v)
+                } else {
+                    None
+                },
+            ),
         )?;
         Ok(Tuf {
             root: root,
@@ -85,8 +98,13 @@
     pub fn update_root(&mut self, signed_root: SignedMetadata<D, RootMetadata>) -> Result<bool> {
         signed_root.verify(
             self.root.root().threshold(),
-            self.root.root().key_ids(),
-            self.root.keys(),
+            self.root.keys().iter().filter_map(|(k, v)| {
+                if self.root.root().key_ids().contains(k) {
+                    Some(v)
+                } else {
+                    None
+                }
+            }),
         )?;
 
         let root = D::deserialize::<RootMetadata>(signed_root.signed())?;
@@ -111,8 +129,16 @@
 
         let _ = signed_root.verify(
             root.root().threshold(),
-            root.root().key_ids(),
-            root.keys(),
+            root.keys().iter().filter_map(
+                |(k, v)| if root.root()
+                    .key_ids()
+                    .contains(k)
+                {
+                    Some(v)
+                } else {
+                    None
+                },
+            ),
         )?;
 
         self.purge_metadata();
@@ -128,8 +154,15 @@
     ) -> Result<bool> {
         signed_timestamp.verify(
             self.root.timestamp().threshold(),
-            self.root.timestamp().key_ids(),
-            self.root.keys(),
+            self.root.keys().iter().filter_map(
+                |(k, v)| {
+                    if self.root.timestamp().key_ids().contains(k) {
+                        Some(v)
+                    } else {
+                        None
+                    }
+                },
+            ),
         )?;
 
         let current_version = self.timestamp.as_ref().map(|t| t.version()).unwrap_or(0);
@@ -175,8 +208,15 @@
 
             signed_snapshot.verify(
                 root.snapshot().threshold(),
-                root.snapshot().key_ids(),
-                root.keys(),
+                self.root.keys().iter().filter_map(
+                    |(k, v)| {
+                        if root.snapshot().key_ids().contains(k) {
+                            Some(v)
+                        } else {
+                            None
+                        }
+                    },
+                ),
             )?;
 
             let snapshot: SnapshotMetadata = D::deserialize(&signed_snapshot.signed())?;
@@ -260,8 +300,13 @@
 
             signed_targets.verify(
                 root.targets().threshold(),
-                root.targets().key_ids(),
-                root.keys(),
+                root.keys().iter().filter_map(|(k, v)| {
+                    if root.targets().key_ids().contains(k) {
+                        Some(v)
+                    } else {
+                        None
+                    }
+                }),
             )?;
 
             let targets: TargetsMetadata = D::deserialize(&signed_targets.signed())?;
@@ -340,8 +385,16 @@
 
                 signed.verify(
                     delegation.threshold(),
-                    delegation.key_ids(),
-                    parent.keys(),
+                    parent.keys().iter().filter_map(
+                        |(k, v)| if delegation
+                            .key_ids()
+                            .contains(k)
+                        {
+                            Some(v)
+                        } else {
+                            None
+                        },
+                    ),
                 )?;
             }
 
@@ -852,7 +905,8 @@
 
         tuf.update_timestamp(timestamp).unwrap();
 
-        let meta_map = hashmap!(
+        let meta_map =
+            hashmap!(
             MetadataPath::from_role(&Role::Targets) =>
                 MetadataDescription::from_reader(&*vec![], 1, &[HashAlgorithm::Sha256]).unwrap(),
         );
@@ -863,12 +917,9 @@
 
         tuf.update_snapshot(snapshot).unwrap();
 
-        let targets = TargetsMetadata::new(
-            1,
-            Utc.ymd(2038, 1, 1).and_hms(0, 0, 0),
-            hashmap!(),
-            None,
-        ).unwrap();
+        let targets =
+            TargetsMetadata::new(1, Utc.ymd(2038, 1, 1).and_hms(0, 0, 0), hashmap!(), None)
+                .unwrap();
         let targets: SignedMetadata<JsonDataInterchange, TargetsMetadata> =
             SignedMetadata::new(&targets, &KEYS[2], SignatureScheme::Ed25519).unwrap();
 
@@ -911,7 +962,8 @@
 
         tuf.update_timestamp(timestamp).unwrap();
 
-        let meta_map = hashmap!(
+        let meta_map =
+            hashmap!(
             MetadataPath::from_role(&Role::Targets) =>
                 MetadataDescription::from_reader(&*vec![], 1, &[HashAlgorithm::Sha256]).unwrap(),
         );
@@ -922,12 +974,9 @@
 
         tuf.update_snapshot(snapshot).unwrap();
 
-        let targets = TargetsMetadata::new(
-            1,
-            Utc.ymd(2038, 1, 1).and_hms(0, 0, 0),
-            hashmap!(),
-            None,
-        ).unwrap();
+        let targets =
+            TargetsMetadata::new(1, Utc.ymd(2038, 1, 1).and_hms(0, 0, 0), hashmap!(), None)
+                .unwrap();
         let targets: SignedMetadata<JsonDataInterchange, TargetsMetadata> =
             SignedMetadata::new(&targets, &KEYS[3], SignatureScheme::Ed25519).unwrap();
 
@@ -967,7 +1016,8 @@
 
         tuf.update_timestamp(timestamp).unwrap();
 
-        let meta_map = hashmap!(
+        let meta_map =
+            hashmap!(
             MetadataPath::from_role(&Role::Targets) =>
                 MetadataDescription::from_reader(&*vec![], 2, &[HashAlgorithm::Sha256]).unwrap(),
         );
@@ -978,12 +1028,9 @@
 
         tuf.update_snapshot(snapshot).unwrap();
 
-        let targets = TargetsMetadata::new(
-            1,
-            Utc.ymd(2038, 1, 1).and_hms(0, 0, 0),
-            hashmap!(),
-            None,
-        ).unwrap();
+        let targets =
+            TargetsMetadata::new(1, Utc.ymd(2038, 1, 1).and_hms(0, 0, 0), hashmap!(), None)
+                .unwrap();
         let targets: SignedMetadata<JsonDataInterchange, TargetsMetadata> =
             SignedMetadata::new(&targets, &KEYS[2], SignatureScheme::Ed25519).unwrap();
 
diff --git a/src/util.rs b/src/util.rs
index 11bfc89..6724d53 100644
--- a/src/util.rs
+++ b/src/util.rs
@@ -5,11 +5,16 @@
 
 use crypto::{HashAlgorithm, HashValue};
 
+/// Wrapper to verify a byte stream as it is read.
+///
 /// Wraps a `Read` to ensure that the consumer can't read more than a capped maximum number of
 /// bytes. Also, this ensures that a minimum bitrate and returns an `Err` if it is not. Finally,
-/// when the underlying `Read` is fully consumed, the hash of the data is optional calculated. If
+/// when the underlying `Read` is fully consumed, the hash of the data is optionally calculated. If
 /// the calculated hash does not match the given hash, it will return an `Err`. Consumers of a
 /// `SafeReader` should purge and untrust all read bytes if this ever returns an `Err`.
+///
+/// It is **critical** that none of the bytes from this struct are used until it has been fully
+/// consumed as the data is untrusted.
 pub struct SafeReader<R: Read> {
     inner: R,
     max_size: u64,
@@ -21,6 +26,11 @@
 
 impl<R: Read> SafeReader<R> {
     /// Create a new `SafeReader`.
+    ///
+    /// The argument `hash_data` takes a `HashAlgorithm` and expected `HashValue`. The given
+    /// algorithm is used to hash the data as it is read. At the end of the stream, the digest is
+    /// calculated and compared against `HashValue`. If the two are not equal, it means the data
+    /// stream has been tampered with in some way.
     pub fn new(
         read: R,
         max_size: u64,
@@ -103,3 +113,119 @@
         }
     }
 }
+
+#[cfg(test)]
+mod test {
+    use super::*;
+
+    #[test]
+    fn valid_read() {
+        let bytes: &[u8] = &[0x00, 0x01, 0x02, 0x03];
+        let mut reader = SafeReader::new(bytes, bytes.len() as u64, 0, None);
+        let mut buf = Vec::new();
+        assert!(reader.read_to_end(&mut buf).is_ok());
+        assert_eq!(buf, bytes);
+    }
+
+    #[test]
+    fn valid_read_large_data() {
+        let bytes: &[u8] = &[0x00; 64 * 1024];
+        let mut reader = SafeReader::new(bytes, bytes.len() as u64, 0, None);
+        let mut buf = Vec::new();
+        assert!(reader.read_to_end(&mut buf).is_ok());
+        assert_eq!(buf, bytes);
+    }
+
+    #[test]
+    fn valid_read_below_max_size() {
+        let bytes: &[u8] = &[0x00, 0x01, 0x02, 0x03];
+        let mut reader = SafeReader::new(bytes, (bytes.len() as u64) + 1, 0, None);
+        let mut buf = Vec::new();
+        assert!(reader.read_to_end(&mut buf).is_ok());
+        assert_eq!(buf, bytes);
+    }
+
+    #[test]
+    fn invalid_read_above_max_size() {
+        let bytes: &[u8] = &[0x00, 0x01, 0x02, 0x03];
+        let mut reader = SafeReader::new(bytes, (bytes.len() as u64) - 1, 0, None);
+        let mut buf = Vec::new();
+        assert!(reader.read_to_end(&mut buf).is_err());
+    }
+
+    #[test]
+    fn invalid_read_above_max_size_large_data() {
+        let bytes: &[u8] = &[0x00; 64 * 1024];
+        let mut reader = SafeReader::new(bytes, (bytes.len() as u64) - 1, 0, None);
+        let mut buf = Vec::new();
+        assert!(reader.read_to_end(&mut buf).is_err());
+    }
+
+    #[test]
+    fn valid_read_good_hash() {
+        let bytes: &[u8] = &[0x00, 0x01, 0x02, 0x03];
+        let mut context = digest::Context::new(&SHA256);
+        context.update(&bytes);
+        let hash_value = HashValue::new(context.finish().as_ref().to_vec());
+        let mut reader = SafeReader::new(
+            bytes,
+            bytes.len() as u64,
+            0,
+            Some((&HashAlgorithm::Sha256, hash_value)),
+        );
+        let mut buf = Vec::new();
+        assert!(reader.read_to_end(&mut buf).is_ok());
+        assert_eq!(buf, bytes);
+    }
+
+    #[test]
+    fn invalid_read_bad_hash() {
+        let bytes: &[u8] = &[0x00, 0x01, 0x02, 0x03];
+        let mut context = digest::Context::new(&SHA256);
+        context.update(&bytes);
+        context.update(&[0xFF]); // evil bytes
+        let hash_value = HashValue::new(context.finish().as_ref().to_vec());
+        let mut reader = SafeReader::new(
+            bytes,
+            bytes.len() as u64,
+            0,
+            Some((&HashAlgorithm::Sha256, hash_value)),
+        );
+        let mut buf = Vec::new();
+        assert!(reader.read_to_end(&mut buf).is_err());
+    }
+
+    #[test]
+    fn valid_read_good_hash_large_data() {
+        let bytes: &[u8] = &[0x00; 64 * 1024];
+        let mut context = digest::Context::new(&SHA256);
+        context.update(&bytes);
+        let hash_value = HashValue::new(context.finish().as_ref().to_vec());
+        let mut reader = SafeReader::new(
+            bytes,
+            bytes.len() as u64,
+            0,
+            Some((&HashAlgorithm::Sha256, hash_value)),
+        );
+        let mut buf = Vec::new();
+        assert!(reader.read_to_end(&mut buf).is_ok());
+        assert_eq!(buf, bytes);
+    }
+
+    #[test]
+    fn invalid_read_bad_hash_large_data() {
+        let bytes: &[u8] = &[0x00; 64 * 1024];
+        let mut context = digest::Context::new(&SHA256);
+        context.update(&bytes);
+        context.update(&[0xFF]); // evil bytes
+        let hash_value = HashValue::new(context.finish().as_ref().to_vec());
+        let mut reader = SafeReader::new(
+            bytes,
+            bytes.len() as u64,
+            0,
+            Some((&HashAlgorithm::Sha256, hash_value)),
+        );
+        let mut buf = Vec::new();
+        assert!(reader.read_to_end(&mut buf).is_err());
+    }
+}
diff --git a/tests/simple_example.rs b/tests/simple_example.rs
index 0dd3172..dfffed9 100644
--- a/tests/simple_example.rs
+++ b/tests/simple_example.rs
@@ -5,7 +5,7 @@
 
 use chrono::prelude::*;
 use chrono::offset::Utc;
-use tuf::{Tuf, Error};
+use tuf::Error;
 use tuf::client::{Client, Config};
 use tuf::crypto::{PrivateKey, SignatureScheme, KeyId, HashAlgorithm};
 use tuf::interchange::{DataInterchange, JsonDataInterchange};
@@ -25,26 +25,17 @@
 fn main() {
     let mut remote = EphemeralRepository::<JsonDataInterchange>::new();
     let root_key_ids = init_server(&mut remote).unwrap();
-    init_client(root_key_ids, remote).unwrap();
+    init_client(&root_key_ids, remote).unwrap();
 }
 
 fn init_client(
-    root_key_ids: Vec<KeyId>,
-    mut remote: EphemeralRepository<JsonDataInterchange>,
+    root_key_ids: &[KeyId],
+    remote: EphemeralRepository<JsonDataInterchange>,
 ) -> Result<(), Error> {
     let local = EphemeralRepository::<JsonDataInterchange>::new();
     let config = Config::build().finish()?;
-    let root = remote.fetch_metadata(
-        &Role::Root,
-        &MetadataPath::from_role(&Role::Root),
-        &MetadataVersion::None,
-        config.max_root_size(),
-        config.min_bytes_per_second(),
-        None,
-    )?;
 
-    let tuf = Tuf::<JsonDataInterchange>::from_root_pinned(root, &root_key_ids)?;
-    let mut client = Client::new(tuf, config, local, remote)?;
+    let mut client = Client::with_root_pinned(root_key_ids, config, local, remote)?;
     match client.update_local() {
         Ok(_) => (),
         Err(e) => println!("{:?}", e),