change metadata verification api to be less error prone
fixes #120
diff --git a/src/crypto.rs b/src/crypto.rs
index 18546e0..ab21637 100644
--- a/src/crypto.rs
+++ b/src/crypto.rs
@@ -116,7 +116,8 @@
///
/// # Calculating
/// A `KeyId` is calculated as `sha256(spki(pub_key_bytes))` where `spki` is a function that takes
-/// any encoding for a public key an converts it into the `SubjectPublicKeyInfo` (SPKI) encoding.
+/// any encoding for a public key an converts it into the `SubjectPublicKeyInfo` (SPKI) DER
+/// encoding.
///
/// Note: Historically the TUF spec says that a key's ID should be calculated with
/// `sha256(cjson(encoded(pub_key_bytes)))`, but since there could be multiple supported data
@@ -125,24 +126,30 @@
/// # ASN.1
/// ```bash
/// PublicKey ::= CHOICE {
+/// -- This field is checked for consistency against `subjectPublicKey`.
+/// -- The OID determines how we attempt to parse the `BIT STRING`.
/// algorithm AlgorithmIdentifier,
-/// subjectPublicKey BIT STRING (CONTAINING PublicKeyChoice)
+/// -- Either:
+/// -- 1. Encapsulates an `RsaPublicKey`
+/// -- 2. Equals an `Ed25519PublicKey`
+/// subjectPublicKey BIT STRING
/// }
-///
+///
/// AlgorithmIdentifier ::= SEQUENCE {
-/// -- 1.2.840.113549.1.1.1 rsaEncryption(PKCS #1)
-/// -- 1.3.101.112 curveEd25519(EdDSA 25519 signature algorithm)
+/// -- Either:
+/// -- 1. 1.2.840.113549.1.1.1 rsaEncryption(PKCS #1)
+/// -- 2. 1.3.101.112 curveEd25519(EdDSA 25519 signature algorithm)
/// algorithm OBJECT IDENTIFIER,
+/// -- In our cases, this is always `NULL`.
/// parameters ANY DEFINED BY algorithm OPTIONAL
/// }
///
-/// PublicKeyChoice ::= CHOICE {
-/// rsa SEQUENCE {
-/// modulus INTEGER (1..MAX),
-/// exponent INTEGER (1..MAX)
-/// },
-/// ed25519 BIT STRING
+/// RsaPublicKey ::= SEQUENCE {
+/// modulus INTEGER (1..MAX),
+/// exponent INTEGER (1..MAX)
/// }
+///
+/// Ed25519PublicKey ::= BIT STRING
/// ```
#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct KeyId(Vec<u8>);
@@ -370,7 +377,7 @@
/// Create a private key from PKCS#8v2 DER bytes.
///
/// # Generating Keys
- ///
+ ///
/// If you use `cargo install tuf`, you will have access to the TUF CLI tool that will allow
/// you to generate keys. If you do not want to do this, the following can be used instead.
///
diff --git a/src/metadata.rs b/src/metadata.rs
index a0c44d4..e0816c6 100644
--- a/src/metadata.rs
+++ b/src/metadata.rs
@@ -302,7 +302,7 @@
/// fn main() {
/// let key_1: &[u8] = include_bytes!("./tests/ed25519/ed25519-1.pk8.der");
/// let key_1 = PrivateKey::from_pkcs8(&key_1).unwrap();
- ///
+ ///
/// // Note: This is for demonstration purposes only.
/// // You should never have multiple private keys on the same device.
/// let key_2: &[u8] = include_bytes!("./tests/ed25519/ed25519-2.pk8.der");
@@ -332,7 +332,9 @@
let raw = D::serialize(&self.signed)?;
let bytes = D::canonicalize(&raw)?;
let sig = private_key.sign(&bytes, scheme)?;
- self.signatures.retain(|s| s.key_id() != private_key.key_id());
+ self.signatures.retain(
+ |s| s.key_id() != private_key.key_id(),
+ );
self.signatures.push(sig);
Ok(())
}
@@ -409,37 +411,35 @@
///
/// assert!(timestamp.verify(
/// 1,
- /// &hashset!(key_1.key_id().clone()),
- /// &hashmap!(key_1.key_id().clone() => key_1.public().clone()),
+ /// vec![key_1.public()],
/// ).is_ok());
///
/// // fail with increased threshold
/// assert!(timestamp.verify(
/// 2,
- /// &hashset!(key_1.key_id().clone()),
- /// &hashmap!(key_1.key_id().clone() => key_1.public().clone()),
+ /// vec![key_1.public()],
/// ).is_err());
///
/// // fail when the keys aren't authorized
/// assert!(timestamp.verify(
/// 1,
- /// &hashset!(key_2.key_id().clone()),
- /// &hashmap!(key_1.key_id().clone() => key_1.public().clone()),
+ /// vec![key_2.public()],
/// ).is_err());
///
/// // fail when the keys don't exist
/// assert!(timestamp.verify(
/// 1,
- /// &hashset!(key_1.key_id().clone()),
- /// &hashmap!(key_2.key_id().clone() => key_2.public().clone()),
+ /// &[],
/// ).is_err());
/// }
- pub fn verify(
+ pub fn verify<'a, I>(
&self,
threshold: u32,
- authorized_key_ids: &HashSet<KeyId>,
- available_keys: &HashMap<KeyId, PublicKey>,
- ) -> Result<()> {
+ authorized_keys: I,
+ ) -> Result<()>
+ where
+ I: IntoIterator<Item = &'a PublicKey>
+ {
if self.signatures.len() < 1 {
return Err(Error::VerificationFailure(
"The metadata was not signed with any authorized keys."
@@ -453,19 +453,13 @@
));
}
+ let authorized_keys = authorized_keys.into_iter().map(|k| (k.key_id(), k)).collect::<HashMap<&KeyId, &PublicKey>>();
+
let canonical_bytes = D::canonicalize(&self.signed)?;
let mut signatures_needed = threshold;
for sig in self.signatures.iter() {
- if !authorized_key_ids.contains(sig.key_id()) {
- warn!(
- "Key ID {:?} is not authorized to sign metadata.",
- sig.key_id()
- );
- continue;
- }
-
- match available_keys.get(sig.key_id()) {
+ match authorized_keys.get(sig.key_id()) {
Some(ref pub_key) => {
match pub_key.verify(&canonical_bytes, &sig) {
Ok(()) => {
@@ -479,7 +473,7 @@
}
None => {
warn!(
- "Key ID {:?} was not found in the set of available keys.",
+ "Key ID {:?} was not found in the set of authorized keys.",
sig.key_id()
);
}
diff --git a/src/tuf.rs b/src/tuf.rs
index acbe74e..3c30336 100644
--- a/src/tuf.rs
+++ b/src/tuf.rs
@@ -43,8 +43,7 @@
let root = D::deserialize::<RootMetadata>(signed_root.signed())?;
let _ = signed_root.verify(
root.root().threshold(),
- root.root().key_ids(),
- root.keys(),
+ root.keys().iter().filter_map(|(k, v)| if root.root().key_ids().contains(k) { Some(v) } else { None }),
)?;
Ok(Tuf {
root: root,
@@ -85,8 +84,7 @@
pub fn update_root(&mut self, signed_root: SignedMetadata<D, RootMetadata>) -> Result<bool> {
signed_root.verify(
self.root.root().threshold(),
- self.root.root().key_ids(),
- self.root.keys(),
+ self.root.keys().iter().filter_map(|(k, v)| if self.root.root().key_ids().contains(k) { Some(v) } else { None }),
)?;
let root = D::deserialize::<RootMetadata>(signed_root.signed())?;
@@ -111,8 +109,7 @@
let _ = signed_root.verify(
root.root().threshold(),
- root.root().key_ids(),
- root.keys(),
+ root.keys().iter().filter_map(|(k, v)| if root.root().key_ids().contains(k) { Some(v) } else { None }),
)?;
self.purge_metadata();
@@ -128,8 +125,7 @@
) -> Result<bool> {
signed_timestamp.verify(
self.root.timestamp().threshold(),
- self.root.timestamp().key_ids(),
- self.root.keys(),
+ self.root.keys().iter().filter_map(|(k, v)| if self.root.timestamp().key_ids().contains(k) { Some(v) } else { None }),
)?;
let current_version = self.timestamp.as_ref().map(|t| t.version()).unwrap_or(0);
@@ -175,8 +171,7 @@
signed_snapshot.verify(
root.snapshot().threshold(),
- root.snapshot().key_ids(),
- root.keys(),
+ self.root.keys().iter().filter_map(|(k, v)| if root.snapshot().key_ids().contains(k) { Some(v) } else { None }),
)?;
let snapshot: SnapshotMetadata = D::deserialize(&signed_snapshot.signed())?;
@@ -260,8 +255,7 @@
signed_targets.verify(
root.targets().threshold(),
- root.targets().key_ids(),
- root.keys(),
+ root.keys().iter().filter_map(|(k, v)| if root.targets().key_ids().contains(k) { Some(v) } else { None }),
)?;
let targets: TargetsMetadata = D::deserialize(&signed_targets.signed())?;
@@ -340,8 +334,7 @@
signed.verify(
delegation.threshold(),
- delegation.key_ids(),
- parent.keys(),
+ parent.keys().iter().filter_map(|(k, v)| if delegation.key_ids().contains(k) { Some(v) } else { None }),
)?;
}