[archivist] Delete unused archive code

Tested: existing tests

Change-Id: If578621ac8d89820776dfa03336425c963fda466
Reviewed-on: https://fuchsia-review.googlesource.com/c/fuchsia/+/558966
Commit-Queue: Miguel Flores <miguelfrde@google.com>
Fuchsia-Auto-Submit: Miguel Flores <miguelfrde@google.com>
Reviewed-by: Christopher Johnson <crjohns@google.com>
diff --git a/src/diagnostics/archivist/BUILD.gn b/src/diagnostics/archivist/BUILD.gn
index 989ce3d..fea45a3 100644
--- a/src/diagnostics/archivist/BUILD.gn
+++ b/src/diagnostics/archivist/BUILD.gn
@@ -67,7 +67,6 @@
     "//third_party/rust_crates:anyhow",
     "//third_party/rust_crates:async-trait",
     "//third_party/rust_crates:byteorder",
-    "//third_party/rust_crates:chrono",
     "//third_party/rust_crates:futures",
     "//third_party/rust_crates:itertools",
     "//third_party/rust_crates:lazy_static",
@@ -90,7 +89,6 @@
 
   sources = [
     "src/accessor.rs",
-    "src/archive.rs",
     "src/archivist.rs",
     "src/configs.rs",
     "src/constants.rs",
diff --git a/src/diagnostics/archivist/configs/archivist_config.json b/src/diagnostics/archivist/configs/archivist_config.json
index 97c9436..2843ff2d 100644
--- a/src/diagnostics/archivist/configs/archivist_config.json
+++ b/src/diagnostics/archivist/configs/archivist_config.json
@@ -1,9 +1,6 @@
 {
-    "archive_path": "/data/archive",
     "logs": {
         "max_cached_original_bytes": 4194304
     },
-    "max_archive_size_bytes": 10485760,
-    "max_event_group_size_bytes": 262144,
     "num_threads": 4
 }
diff --git a/src/diagnostics/archivist/configs/embedding-config.json b/src/diagnostics/archivist/configs/embedding-config.json
index f56098b..51a3a77 100644
--- a/src/diagnostics/archivist/configs/embedding-config.json
+++ b/src/diagnostics/archivist/configs/embedding-config.json
@@ -2,7 +2,5 @@
     "logs": {
         "max_cached_original_bytes": 4194304
     },
-    "max_archive_size_bytes": 10485760,
-    "max_event_group_size_bytes": 262144,
     "num_threads": 1
 }
diff --git a/src/diagnostics/archivist/configs/root-config.json b/src/diagnostics/archivist/configs/root-config.json
index 0c2a1b2a..2843ff2d 100644
--- a/src/diagnostics/archivist/configs/root-config.json
+++ b/src/diagnostics/archivist/configs/root-config.json
@@ -2,7 +2,5 @@
     "logs": {
         "max_cached_original_bytes": 4194304
     },
-    "max_archive_size_bytes": 10485760,
-    "max_event_group_size_bytes": 262144,
     "num_threads": 4
 }
diff --git a/src/diagnostics/archivist/src/archive.rs b/src/diagnostics/archivist/src/archive.rs
deleted file mode 100644
index 068c1c7..0000000
--- a/src/diagnostics/archivist/src/archive.rs
+++ /dev/null
@@ -1,823 +0,0 @@
-// Copyright 2019 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-use {
-    crate::diagnostics,
-    anyhow::{format_err, Error},
-    chrono::prelude::*,
-    itertools::Itertools,
-    lazy_static::lazy_static,
-    regex::Regex,
-    serde::{Deserialize, Serialize},
-    serde_json::Deserializer,
-    std::collections::BTreeMap,
-    std::ffi::{OsStr, OsString},
-    std::fs,
-    std::io::Write,
-    std::path::{Path, PathBuf},
-};
-
-/// Archive represents the top-level directory tree for the Archivist's storage.
-pub struct Archive {
-    /// The path to the Archive on disk.
-    path: PathBuf,
-}
-
-/// Stats for a particular event group of files.
-#[derive(Debug, Eq, PartialEq)]
-pub struct EventFileGroupStats {
-    /// The number of files associated with this group.
-    pub file_count: usize,
-    /// The size of those files on disk.
-    pub size: u64,
-}
-
-const DATED_DIRECTORY_REGEX: &str = r"^(\d{4}-\d{2}-\d{2})$";
-const EVENT_PREFIX_REGEX: &str = r"^(\d{2}:\d{2}:\d{2}.\d{3})-";
-const EVENT_LOG_SUFFIX_REGEX: &str = r"event.log$";
-
-pub type EventFileGroupStatsMap = BTreeMap<String, EventFileGroupStats>;
-
-impl Archive {
-    /// Opens an Archive at the given path, returning an error if it does not exist.
-    pub fn open(path: impl Into<PathBuf>) -> Result<Self, Error> {
-        let path: PathBuf = path.into();
-        if path.is_dir() {
-            Ok(Archive { path })
-        } else {
-            Err(format_err!("{} is not a directory", path.display()))
-        }
-    }
-
-    /// Returns a vector of EventFileGroups and their associated stats from all dates covered by
-    /// this archive.
-    pub fn get_event_group_stats(&self) -> Result<EventFileGroupStatsMap, Error> {
-        let mut output = EventFileGroupStatsMap::new();
-        for date in self.get_dates()? {
-            for group in self.get_event_file_groups(&date)? {
-                let file_count = 1 + group.event_files.len();
-                let size = group.size()?;
-                output.insert(group.log_file_path(), EventFileGroupStats { file_count, size });
-            }
-        }
-        Ok(output)
-    }
-
-    /// Returns a vector of the dated directory names in the archive, in sorted order.
-    pub fn get_dates(&self) -> Result<Vec<String>, Error> {
-        lazy_static! {
-            static ref RE: Regex = Regex::new(DATED_DIRECTORY_REGEX).unwrap();
-        }
-
-        Ok(self
-            .path
-            .read_dir()?
-            .filter_map(|file| file.ok())
-            .filter_map(|entry| {
-                let name = entry.file_name().into_string().unwrap_or_default();
-                let is_dir = entry.file_type().and_then(|t| Ok(t.is_dir())).unwrap_or(false);
-                if RE.is_match(&name) && is_dir {
-                    Some(name)
-                } else {
-                    None
-                }
-            })
-            .sorted()
-            .collect())
-    }
-
-    /// Returns a vector of file groups in the given dated directory, in sorted order.
-    pub fn get_event_file_groups(&self, date: &str) -> Result<Vec<EventFileGroup>, Error> {
-        lazy_static! {
-            static ref GROUP_RE: Regex = Regex::new(EVENT_PREFIX_REGEX).unwrap();
-            static ref LOG_FILE_RE: Regex =
-                Regex::new(&(EVENT_PREFIX_REGEX.to_owned() + EVENT_LOG_SUFFIX_REGEX)).unwrap();
-        }
-
-        Ok(self
-            .path
-            .join(date)
-            .read_dir()?
-            .filter_map(|dir_entry| dir_entry.ok())
-            .filter_map(|entry| {
-                let is_file = entry.metadata().and_then(|meta| Ok(meta.is_file())).unwrap_or(false);
-                if !is_file {
-                    return None;
-                }
-                let name = entry.file_name().into_string().unwrap_or_default();
-                let captures = if let Some(captures) = GROUP_RE.captures(&name) {
-                    captures
-                } else {
-                    return None;
-                };
-                if LOG_FILE_RE.is_match(&name) {
-                    Some((captures[1].to_owned(), EventFileGroup::new(Some(entry.path()), vec![])))
-                } else {
-                    Some((captures[1].to_owned(), EventFileGroup::new(None, vec![entry.path()])))
-                }
-            })
-            .sorted_by(|a, b| Ord::cmp(&a.0, &b.0))
-            .group_by(|x| x.0.clone())
-            .into_iter()
-            .filter_map(|(_, entries)| {
-                let ret = entries.map(|(_, entry)| entry).fold(
-                    EventFileGroup::new(None, vec![]),
-                    |mut acc, next| {
-                        acc.accumulate(next);
-                        acc
-                    },
-                );
-
-                match ret.log_file {
-                    Some(_) => Some(ret),
-                    _ => None,
-                }
-            })
-            .collect())
-    }
-
-    /// Get the path to the Archive directory.
-    pub fn get_path(&self) -> &Path {
-        return &self.path;
-    }
-}
-
-/// Represents information about a group of event files.
-#[derive(Debug, PartialEq, Eq)]
-pub struct EventFileGroup {
-    /// The file containing a log of events for the group.
-    log_file: Option<PathBuf>,
-
-    /// The event files referenced in the log.
-    event_files: Vec<PathBuf>,
-}
-
-pub type EventError = serde_json::error::Error;
-
-impl EventFileGroup {
-    /// Constructs a new wrapper for a group of event files.
-    fn new(log_file: Option<PathBuf>, event_files: Vec<PathBuf>) -> Self {
-        EventFileGroup { log_file, event_files }
-    }
-
-    /// Supports folding multiple partially filled out groups together.
-    fn accumulate(&mut self, other: EventFileGroup) {
-        match self.log_file {
-            None => {
-                self.log_file = other.log_file;
-            }
-            _ => (),
-        };
-
-        self.event_files.extend(other.event_files.into_iter());
-    }
-
-    /// Deletes this group from disk.
-    ///
-    /// Returns stats on the files removed on success.
-    pub fn delete(self) -> Result<EventFileGroupStats, Error> {
-        let size = self.size()?;
-        // There is 1 log file + each event file removed by this operation.
-        let file_count = 1 + self.event_files.len();
-
-        vec![self.log_file.unwrap()]
-            .into_iter()
-            .chain(self.event_files.into_iter())
-            .map(|path| -> Result<(), Error> {
-                fs::remove_file(&path)?;
-                Ok(())
-            })
-            .collect::<Result<(), Error>>()?;
-
-        Ok(EventFileGroupStats { file_count, size })
-    }
-
-    /// Gets the path to the log file for this group.
-    pub fn log_file_path(&self) -> String {
-        self.log_file.as_ref().expect("missing log file path").to_string_lossy().to_string()
-    }
-
-    /// Returns the size of all event files from this group on disk.
-    pub fn size(&self) -> Result<u64, Error> {
-        let log_file = match &self.log_file {
-            None => {
-                return Err(format_err!("Log file is not specified"));
-            }
-            Some(log_file) => log_file.clone(),
-        };
-
-        itertools::chain(&[log_file], self.event_files.iter())
-            .map(|path| {
-                fs::metadata(&path)
-                    .or_else(|_| Err(format_err!("Failed to get size for {:?}", path)))
-            })
-            .map(|meta| {
-                meta.and_then(|value| {
-                    if value.is_file() {
-                        Ok(value.len())
-                    } else {
-                        Err(format_err!("Path is not a file"))
-                    }
-                })
-            })
-            .fold_results(0, std::ops::Add::add)
-    }
-
-    /// Returns an iterator over the events stored in the log file.
-    pub fn events(&self) -> Result<impl Iterator<Item = Result<Event, EventError>>, Error> {
-        let file =
-            fs::File::open(&self.log_file.as_ref().ok_or(format_err!("Log file not specified"))?)?;
-        Ok(Deserializer::from_reader(file).into_iter::<Event>())
-    }
-
-    /// Returns the path to the parent directory containing this group.
-    pub fn parent_directory(&self) -> Result<&Path, Error> {
-        self.log_file
-            .as_ref()
-            .ok_or(format_err!("Log file not specified"))?
-            .parent()
-            .ok_or(format_err!("Log file has no parent directory"))
-    }
-}
-
-/// Represents a single event in the log.
-#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
-pub struct Event {
-    timestamp_nanos: u64,
-    event_type: String,
-    relative_moniker: String,
-    event_files: Vec<String>,
-}
-
-fn datetime_to_timestamp<T: TimeZone>(t: &DateTime<T>) -> u64 {
-    (t.timestamp() * 1_000_000_000 + t.timestamp_subsec_nanos() as i64) as u64
-}
-
-impl Event {
-    /// Create a new Event at the current time.
-    pub fn new(event_type: impl ToString, relative_moniker: impl ToString) -> Self {
-        Self::new_with_time(Utc::now(), event_type, relative_moniker)
-    }
-
-    /// Create a new Event at the given time.
-    pub fn new_with_time<T: TimeZone>(
-        time: DateTime<T>,
-        event_type: impl ToString,
-        relative_moniker: impl ToString,
-    ) -> Self {
-        Event {
-            timestamp_nanos: datetime_to_timestamp(&time),
-            event_type: event_type.to_string(),
-            relative_moniker: relative_moniker.to_string(),
-            event_files: vec![],
-        }
-    }
-
-    /// Get the timestamp of this event in the given timezone.
-    pub fn get_timestamp<T: TimeZone>(&self, time_zone: T) -> DateTime<T> {
-        let seconds = self.timestamp_nanos / 1_000_000_000;
-        let nanos = self.timestamp_nanos % 1_000_000_000;
-        time_zone.timestamp(seconds as i64, nanos as u32)
-    }
-
-    /// Get the vector of event files for this event.
-    pub fn get_event_files(&self) -> &Vec<String> {
-        &self.event_files
-    }
-
-    /// Add an event file to this event.
-    pub fn add_event_file(&mut self, file: impl AsRef<OsStr>) {
-        self.event_files.push(file.as_ref().to_string_lossy().to_string());
-    }
-}
-
-/// Structure that wraps an Archive and supports writing to it.
-pub struct ArchiveWriter {
-    /// The opened Archive to write to.
-    archive: Archive,
-
-    /// A writer for the currently opened event file group.
-    open_log: EventFileGroupWriter,
-
-    group_stats: EventFileGroupStatsMap,
-}
-
-impl ArchiveWriter {
-    /// Open a directory as an archive.
-    ///
-    /// If the directory does not exist, it will be created.
-    pub fn open(path: impl Into<PathBuf>) -> Result<Self, Error> {
-        let path: PathBuf = path.into();
-        if !path.exists() {
-            fs::create_dir_all(&path)?;
-        }
-
-        let archive = Archive::open(&path)?;
-        let open_log = EventFileGroupWriter::new(path)?;
-
-        let mut group_stats = archive.get_event_group_stats()?;
-
-        group_stats.remove(&open_log.get_log_file_path().to_string_lossy().to_string());
-        diagnostics::set_group_stats(&group_stats);
-
-        Ok(ArchiveWriter { archive, open_log, group_stats })
-    }
-
-    /// Get the readable Archive from this writer.
-    pub fn get_archive(&self) -> &Archive {
-        &self.archive
-    }
-
-    /// Get the currently opened log writer for this Archive.
-    pub fn get_log(&mut self) -> &mut EventFileGroupWriter {
-        &mut self.open_log
-    }
-
-    /// Rotates the log by closing the current EventFileGroup and opening a new one.
-    ///
-    /// Returns the name and stats for the just-closed EventFileGroup.
-    pub fn rotate_log(&mut self) -> Result<(PathBuf, EventFileGroupStats), Error> {
-        let mut temp_log = EventFileGroupWriter::new(self.archive.get_path())?;
-        std::mem::swap(&mut self.open_log, &mut temp_log);
-        temp_log.close()
-    }
-
-    pub fn add_group_stat(&mut self, log_file_path: &Path, stat: EventFileGroupStats) {
-        self.group_stats.insert(log_file_path.to_string_lossy().to_string(), stat);
-        diagnostics::set_group_stats(&self.group_stats);
-    }
-
-    pub fn remove_group_stat(&mut self, log_file_path: &Path) {
-        self.group_stats.remove(&log_file_path.to_string_lossy().to_string());
-        diagnostics::set_group_stats(&self.group_stats);
-    }
-
-    pub fn archived_size(&self) -> u64 {
-        let mut ret = 0;
-        for (_, v) in &self.group_stats {
-            ret += v.size;
-        }
-        ret
-    }
-}
-
-/// A writer that wraps a particular group of event files.
-///
-/// This struct supports writing events to the log file with associated event files through
-/// |EventBuilder|.
-pub struct EventFileGroupWriter {
-    /// An opened file to write log events to.
-    log_file: fs::File,
-
-    /// The path to the log file.
-    log_file_path: PathBuf,
-
-    /// The path to the directory containing this file group.
-    directory_path: PathBuf,
-
-    /// The prefix for files related to this file group.
-    file_prefix: String,
-
-    /// The number of records written to the log file.
-    records_written: usize,
-
-    /// The number of bytes written for this group, including event files.
-    bytes_stored: usize,
-
-    /// The number of files written for this group, including event files.
-    files_stored: usize,
-}
-
-impl EventFileGroupWriter {
-    /// Create a new writable event file group.
-    ///
-    /// This opens or creates a dated directory in the archive and initializes a log file for the
-    /// event file group.
-    pub fn new(archive_path: impl AsRef<Path>) -> Result<Self, Error> {
-        EventFileGroupWriter::new_with_time(Utc::now(), archive_path)
-    }
-
-    /// Creates a new writable event file group using the given time for dating the directories and
-    /// files.
-    fn new_with_time(time: DateTime<Utc>, archive_path: impl AsRef<Path>) -> Result<Self, Error> {
-        let directory_path = archive_path.as_ref().join(time.format("%Y-%m-%d").to_string());
-
-        fs::create_dir_all(&directory_path)?;
-
-        let file_prefix = time.format("%H:%M:%S%.3f-").to_string();
-        let log_file_path = directory_path.join(file_prefix.clone() + "event.log");
-        let log_file = fs::File::create(&log_file_path)?;
-
-        Ok(EventFileGroupWriter {
-            log_file,
-            log_file_path,
-            directory_path,
-            file_prefix,
-            records_written: 0,
-            bytes_stored: 0,
-            files_stored: 1,
-        })
-    }
-
-    /// Create a new event builder for adding an event to this group.
-    pub fn new_event(
-        &mut self,
-        event_type: impl ToString,
-        relative_moniker: impl ToString,
-    ) -> EventBuilder<'_> {
-        EventBuilder {
-            writer: self,
-            event: Event::new(event_type, relative_moniker),
-            event_files: Ok(vec![]),
-            event_file_size: 0,
-        }
-    }
-
-    /// Gets the path to the log file for this group.
-    pub fn get_log_file_path(&self) -> &Path {
-        &self.log_file_path
-    }
-
-    /// Gets the stats for the event file group.
-    pub fn get_stats(&self) -> EventFileGroupStats {
-        EventFileGroupStats { file_count: self.files_stored, size: self.bytes_stored as u64 }
-    }
-
-    /// Write an event to the log.
-    ///
-    /// Returns the number of bytes written on success.
-    fn write_event(&mut self, event: &Event, extra_files_size: usize) -> Result<usize, Error> {
-        let value = serde_json::to_string(&event)? + "\n";
-        self.log_file.write_all(value.as_ref())?;
-        self.bytes_stored += value.len() + extra_files_size;
-        self.records_written += 1;
-        self.files_stored += event.event_files.len();
-        Ok(value.len())
-    }
-
-    /// Synchronize the log with underlying storage.
-    fn sync(&mut self) -> Result<(), Error> {
-        Ok(self.log_file.sync_all()?)
-    }
-
-    /// Close this EventFileGroup, returning stats of what was written.
-    fn close(mut self) -> Result<(PathBuf, EventFileGroupStats), Error> {
-        self.sync()?;
-        Ok((
-            self.log_file_path,
-            EventFileGroupStats { file_count: self.files_stored, size: self.bytes_stored as u64 },
-        ))
-    }
-}
-
-/// This struct provides a builder interface for adding event information to an individual log
-/// entry before adding it to the log.
-pub struct EventBuilder<'a> {
-    /// The writer this is building an event for.
-    writer: &'a mut EventFileGroupWriter,
-
-    /// The partial event being built.
-    event: Event,
-
-    /// The list of event files that were created so far. If this contains Error, writing
-    /// event files failed. Building will return the error.
-    event_files: Result<Vec<PathBuf>, Error>,
-
-    /// The total number of bytes written into event files.
-    event_file_size: usize,
-}
-
-fn delete_files(files: &Vec<PathBuf>) -> Result<(), Error> {
-    files
-        .iter()
-        .map(|file| -> Result<(), Error> { Ok(fs::remove_file(&file)?) })
-        .fold_results((), |_, _| ())
-}
-
-impl<'a> EventBuilder<'a> {
-    /// Build the event and write it to the log.
-    ///
-    /// Returns stats on success or the Error on failure.
-    /// If this method return an error, all event files on disk will be cleaned up.
-    pub fn build(mut self) -> Result<EventFileGroupStats, Error> {
-        let file_count;
-        if let Ok(event_files) = self.event_files.as_ref() {
-            file_count = event_files.len();
-            for path in event_files.iter() {
-                self.event.add_event_file(
-                    path.file_name().ok_or_else(|| format_err!("missing file name"))?,
-                );
-            }
-        } else {
-            return Err(self.event_files.unwrap_err());
-        }
-
-        match self.writer.write_event(&self.event, self.event_file_size) {
-            Ok(bytes) => {
-                Ok(EventFileGroupStats { file_count, size: (self.event_file_size + bytes) as u64 })
-            }
-            Err(e) => {
-                self.invalidate(e);
-                Err(self.event_files.unwrap_err())
-            }
-        }
-    }
-
-    /// Add an event file to the event.
-    ///
-    /// This method takes the name of a file and its contents and writes them into the archive.
-    pub fn add_event_file(mut self, name: impl AsRef<OsStr>, contents: &[u8]) -> Self {
-        if let Ok(file_vector) = self.event_files.as_mut() {
-            let mut file_name = OsString::from(format!(
-                "{}{}-",
-                self.writer.file_prefix, self.writer.records_written
-            ));
-            file_name.push(name);
-            let path = self.writer.directory_path.join(file_name);
-            if let Err(e) = fs::write(&path, contents) {
-                self.invalidate(Error::from(e));
-            } else {
-                self.event_file_size += contents.len();
-                file_vector.push(path);
-            }
-        }
-
-        self
-    }
-
-    /// Invalidates this EventBuilder, meaning the value will not be written to the log.
-    fn invalidate(&mut self, error: Error) {
-        delete_files(&self.event_files.as_ref().unwrap_or(&vec![]))
-            .expect("Failed to delete files");
-        self.event_files = Err(error)
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use {
-        super::*,
-        std::{io::Write, iter::FromIterator},
-    };
-
-    #[test]
-    fn archive_open() {
-        let path: PathBuf;
-        {
-            let dir = tempfile::tempdir().unwrap();
-            path = dir.path().to_path_buf();
-            assert_eq!(true, Archive::open(&path).is_ok());
-        }
-        assert_eq!(false, Archive::open(&path).is_ok());
-    }
-
-    #[test]
-    fn archive_get_dates() {
-        let dir = tempfile::tempdir().unwrap();
-        fs::create_dir(dir.path().join("2019-05-08")).unwrap();
-        fs::create_dir(dir.path().join("2019-04-10")).unwrap();
-        fs::create_dir(dir.path().join("2019-04-incorrect-format")).unwrap();
-        // Create a file with the correct format. It will not be included since it is not a
-        // directory.
-        fs::File::create(dir.path().join("2019-05-09")).unwrap();
-
-        let archive = Archive::open(dir.path()).unwrap();
-        assert_eq!(
-            vec!["2019-04-10".to_string(), "2019-05-08".to_string()],
-            archive.get_dates().unwrap()
-        );
-    }
-
-    fn write_test_to_file<T: AsRef<Path>>(path: T) {
-        let mut file = fs::File::create(path).expect("failed to create file");
-        write!(file, "test").expect("failed to write file");
-        file.sync_all().expect("failed to sync file");
-    }
-
-    #[test]
-    fn archive_get_file_groups() {
-        let dir = tempfile::tempdir().unwrap();
-        let dated_dir_path = dir.path().join("2019-05-08");
-        fs::create_dir(&dated_dir_path).unwrap();
-
-        // Event group with only log file.
-        let event_log_file_name = dated_dir_path.join("10:30:00.000-event.log");
-        write_test_to_file(&event_log_file_name);
-
-        // Event group with event files.
-        let aux_event_log_file_name = dated_dir_path.join("11:30:00.000-event.log");
-        let aux_file_1 = dated_dir_path.join("11:30:00.000-aux_file_1.info");
-        let aux_file_2 = dated_dir_path.join("11:30:00.000-aux_file_2.info");
-        write_test_to_file(&aux_event_log_file_name);
-        write_test_to_file(&aux_file_1);
-        write_test_to_file(&aux_file_2);
-
-        // Event group missing log file (invalid).
-        fs::File::create(dated_dir_path.join("12:30:00.000-aux_file_1.info")).unwrap();
-        fs::File::create(dated_dir_path.join("12:30:00.000-aux_file_2.info")).unwrap();
-
-        // Name does not match pattern (invalid).
-        fs::File::create(dated_dir_path.join("13:30:00-event.log")).unwrap();
-
-        // Directory rather than file (invalid).
-        fs::create_dir(dated_dir_path.join("14:30:00.000-event.log")).unwrap();
-
-        let archive = Archive::open(dir.path()).unwrap();
-        assert_eq!(
-            vec![
-                EventFileGroup::new(Some(event_log_file_name.clone()), vec![]),
-                EventFileGroup::new(
-                    Some(aux_event_log_file_name.clone()),
-                    vec![aux_file_1.clone(), aux_file_2.clone()]
-                )
-            ],
-            archive.get_event_file_groups("2019-05-08").unwrap()
-        );
-
-        assert_eq!(
-            BTreeMap::from_iter(
-                vec![
-                    (
-                        event_log_file_name.to_string_lossy().to_string(),
-                        EventFileGroupStats { file_count: 1, size: 4 }
-                    ),
-                    (
-                        aux_event_log_file_name.to_string_lossy().to_string(),
-                        EventFileGroupStats { file_count: 3, size: 4 * 3 }
-                    )
-                ]
-                .into_iter()
-            ),
-            archive.get_event_group_stats().unwrap()
-        );
-
-        for group in archive.get_event_file_groups("2019-05-08").unwrap() {
-            group.delete().unwrap();
-        }
-
-        assert_eq!(0, archive.get_event_file_groups("2019-05-08").unwrap().len());
-
-        // Open an empty directory.
-        fs::create_dir(dir.path().join("2019-05-07")).unwrap();
-        assert_eq!(0, archive.get_event_file_groups("2019-05-07").unwrap().len());
-
-        // Open a missing directory
-        assert_eq!(true, archive.get_event_file_groups("2019-05-06").is_err());
-    }
-
-    #[test]
-    fn event_file_group_size() {
-        let dir = tempfile::tempdir().unwrap();
-        write_test_to_file(dir.path().join("a"));
-        write_test_to_file(dir.path().join("b"));
-        write_test_to_file(dir.path().join("c"));
-
-        assert_eq!(
-            12,
-            EventFileGroup::new(
-                Some(dir.path().join("a")),
-                vec![dir.path().join("b"), dir.path().join("c")]
-            )
-            .size()
-            .expect("failed to get size")
-        );
-
-        assert_eq!(
-            4,
-            EventFileGroup::new(Some(dir.path().join("a")), vec![],)
-                .size()
-                .expect("failed to get size")
-        );
-
-        // Absent file "d" causes error.
-        assert_eq!(
-            true,
-            EventFileGroup::new(Some(dir.path().join("a")), vec![dir.path().join("d")],)
-                .size()
-                .is_err()
-        );
-
-        // Missing log file.
-        assert_eq!(true, EventFileGroup::new(None, vec![dir.path().join("b")],).size().is_err());
-
-        // Log file is actually a directory.
-        assert_eq!(
-            true,
-            EventFileGroup::new(Some(dir.path().to_path_buf()), vec![dir.path().join("b")],)
-                .size()
-                .is_err()
-        );
-    }
-
-    #[test]
-    fn event_creation() {
-        let time = Utc::now();
-        let event = Event::new_with_time(time, "START", "a/b/component.cmx:1234");
-        assert_eq!(time, event.get_timestamp(Utc));
-        assert_eq!(
-            Event {
-                timestamp_nanos: datetime_to_timestamp(&time),
-                relative_moniker: "a/b/component.cmx:1234".into(),
-                event_type: "START".to_string(),
-                event_files: vec![],
-            },
-            event
-        );
-    }
-
-    #[test]
-    fn event_ordering() {
-        let event1 = Event::new("START", "a/b/c.cmx:123");
-        let event2 = Event::new("END", "a/b/c.cmx:123");
-        let (time1, time2) = (event1.get_timestamp(Utc), event2.get_timestamp(Utc));
-        assert!(time1 <= time2, "Expected {:?} before {:?}", time1, time2);
-    }
-
-    #[test]
-    fn event_event_files() {
-        let mut event = Event::new("START", "a/b/c.cmx:123");
-        event.add_event_file("f1");
-        assert_eq!(&vec!["f1"], event.get_event_files());
-    }
-
-    #[test]
-    fn event_file_group_writer() {
-        let dir = tempfile::tempdir().unwrap();
-        let mut writer = EventFileGroupWriter::new_with_time(
-            Utc.ymd(2019, 05, 08).and_hms_milli(12, 30, 14, 31),
-            dir.path(),
-        )
-        .expect("failed to create writer");
-        assert!(writer.sync().is_ok());
-
-        let meta = fs::metadata(dir.path().join("2019-05-08"));
-        assert!(meta.is_ok());
-        assert!(meta.unwrap().is_dir());
-
-        let meta = fs::metadata(dir.path().join("2019-05-08").join("12:30:14.031-event.log"));
-        assert!(meta.is_ok());
-        assert!(meta.unwrap().is_file());
-
-        assert!(writer.new_event("START", "a/b/test.cmx:0").build().is_ok());
-        assert!(writer
-            .new_event("EXIT", "a/b/test.cmx:0")
-            .add_event_file("root.inspect", b"INSP TEST")
-            .build()
-            .is_ok());
-
-        let extra_file_path = dir.path().join("2019-05-08").join("12:30:14.031-1-root.inspect");
-        let meta = fs::metadata(&extra_file_path);
-        assert!(meta.is_ok());
-        assert!(meta.unwrap().is_file());
-        assert_eq!("INSP TEST", fs::read_to_string(&extra_file_path).unwrap());
-    }
-
-    #[test]
-    fn archive_writer() {
-        let dir = tempfile::tempdir().unwrap();
-        let mut archive =
-            ArchiveWriter::open(dir.path().join("archive")).expect("failed to create archive");
-
-        archive
-            .get_log()
-            .new_event("START", "a/b/test.cmx:0")
-            .build()
-            .expect("failed to write log");
-        archive
-            .get_log()
-            .new_event("STOP", "a/b/test.cmx:0")
-            .add_event_file("root.inspect", b"test")
-            .build()
-            .expect("failed to write log");
-
-        let mut events = vec![];
-        archive.get_archive().get_dates().unwrap().into_iter().for_each(|date| {
-            archive.get_archive().get_event_file_groups(&date).unwrap().into_iter().for_each(
-                |group| {
-                    group.events().unwrap().for_each(|event| {
-                        events.push(event.unwrap());
-                    })
-                },
-            );
-        });
-
-        assert_eq!(2, events.len());
-
-        let (_, stats) = archive.rotate_log().unwrap();
-        assert_eq!(2, stats.file_count);
-        assert_ne!(0, stats.size);
-
-        let mut stats = archive
-            .get_log()
-            .new_event("STOP", "a/b/test.cmx:0")
-            .add_event_file("root.inspect", b"test")
-            .build()
-            .expect("failed to write log");
-
-        // Check the stats returned by the log; we add one to the file count for the log file
-        // itself.
-        stats.file_count += 1;
-        assert_eq!(stats, archive.get_log().get_stats());
-    }
-}
diff --git a/src/diagnostics/archivist/src/archivist.rs b/src/diagnostics/archivist/src/archivist.rs
index 2992c9f..75604c3 100644
--- a/src/diagnostics/archivist/src/archivist.rs
+++ b/src/diagnostics/archivist/src/archivist.rs
@@ -5,17 +5,13 @@
 use {
     crate::{
         accessor::ArchiveAccessor,
-        archive, configs, constants,
-        constants::INSPECT_LOG_WINDOW_SIZE,
+        configs, constants,
         container::ComponentIdentity,
         diagnostics,
         events::{
             source_registry::EventSourceRegistry,
             sources::{StaticEventStream, UnattributedLogSinkSource},
-            types::{
-                ComponentEvent, ComponentEventStream, DiagnosticsReadyEvent, EventMetadata,
-                EventSource,
-            },
+            types::{ComponentEvent, ComponentEventStream, DiagnosticsReadyEvent, EventSource},
         },
         logs::{budget::BudgetManager, redact::Redactor, socket::LogMessageSocket},
         pipeline::Pipeline,
@@ -30,10 +26,9 @@
     fuchsia_async::{self as fasync, Task},
     fuchsia_component::{
         client::connect_to_protocol,
-        server::{ServiceFs, ServiceObj, ServiceObjTrait},
+        server::{ServiceFs, ServiceObj},
     },
     fuchsia_inspect::{component, health::Reporter},
-    fuchsia_inspect_contrib::{inspect_log, nodes::BoundedListNode},
     fuchsia_runtime::{take_startup_handle, HandleInfo, HandleType},
     fuchsia_zircon as zx,
     futures::{
@@ -41,12 +36,8 @@
         future::{self, abortable},
         prelude::*,
     },
-    io_util,
     parking_lot::RwLock,
-    std::{
-        path::{Path, PathBuf},
-        sync::Arc,
-    },
+    std::{path::Path, sync::Arc},
     tracing::{debug, error, info, warn},
 };
 
@@ -113,28 +104,13 @@
         let mut fs = ServiceFs::new();
         diagnostics::serve(&mut fs)?;
 
-        let writer = archivist_configuration.archive_path.as_ref().and_then(|archive_path| {
-            maybe_create_archive(&mut fs, archive_path)
-                .or_else(|e| {
-                    // TODO(fxbug.dev/57271): this is not expected in regular builds of the archivist. It's
-                    // happening when starting the zircon_guest (fx shell guest launch zircon_guest)
-                    // We'd normally fail if we couldn't create the archive, but for now we include
-                    // a warning.
-                    warn!(
-                        path = %archive_path.display(), ?e,
-                        "Failed to create archive"
-                    );
-                    Err(e)
-                })
-                .ok()
-        });
-
         let pipelines_node = diagnostics::root().create_child("pipelines");
         let feedback_pipeline_node = pipelines_node.create_child("feedback");
         let legacy_pipeline_node = pipelines_node.create_child("legacy_metrics");
-        let pipelines_path = &archivist_configuration.pipelines_path;
-        let feedback_path = format!("{}/feedback", pipelines_path.display());
-        let legacy_metrics_path = format!("{}/legacy_metrics", pipelines_path.display());
+        let feedback_path =
+            format!("{}/feedback", archivist_configuration.pipelines_path.display());
+        let legacy_metrics_path =
+            format!("{}/legacy_metrics", archivist_configuration.pipelines_path.display());
         let mut feedback_config = configs::PipelineConfig::from_directory(
             &feedback_path,
             configs::EmptyBehavior::DoNotFilter,
@@ -205,7 +181,6 @@
         // diagnostics data N times if we have N pipelines. We should be
         // storing a single copy regardless of the number of pipelines.
         let archivist = Archivist::new(
-            archivist_configuration,
             vec![
                 all_access_pipeline.clone(),
                 feedback_pipeline.clone(),
@@ -213,7 +188,6 @@
             ],
             diagnostics_repo,
             logs_budget,
-            writer,
         )?;
 
         let all_accessor_stats = Arc::new(diagnostics::AccessorStats::new(
@@ -535,10 +509,6 @@
 /// that are populated by the archivist server and exposed in the
 /// service sessions.
 pub struct Archivist {
-    /// Writer for the archive. If a path was not configured it will be `None`.
-    writer: Option<archive::ArchiveWriter>,
-    log_node: BoundedListNode,
-    configuration: configs::Config,
     diagnostics_pipelines: Arc<Vec<Arc<RwLock<Pipeline>>>>,
     pub diagnostics_repo: DataRepo,
 
@@ -548,23 +518,11 @@
 
 impl Archivist {
     pub fn new(
-        configuration: configs::Config,
         diagnostics_pipelines: Vec<Arc<RwLock<Pipeline>>>,
         diagnostics_repo: DataRepo,
         logs_budget: BudgetManager,
-        writer: Option<archive::ArchiveWriter>,
     ) -> Result<Self, Error> {
-        let mut log_node = BoundedListNode::new(
-            diagnostics::root().create_child("events"),
-            INSPECT_LOG_WINDOW_SIZE,
-        );
-
-        inspect_log!(log_node, event: "Archivist started");
-
         Ok(Archivist {
-            writer,
-            log_node,
-            configuration,
             diagnostics_pipelines: Arc::new(diagnostics_pipelines),
             diagnostics_repo,
             logs_budget,
@@ -577,11 +535,7 @@
         log_sender: mpsc::UnboundedSender<Task<()>>,
     ) {
         while let Some(event) = events.next().await {
-            let res = self.process_event(event, &log_sender).await;
-            res.unwrap_or_else(|e| {
-                inspect_log!(self.log_node, event: "Failed to log event", result: format!("{:?}", e));
-                error!(?e, "Failed to log event");
-            });
+            self.process_event(event, &log_sender).await;
         }
     }
 
@@ -643,28 +597,23 @@
         &mut self,
         event: ComponentEvent,
         log_sender: &mpsc::UnboundedSender<Task<()>>,
-    ) -> Result<(), Error> {
+    ) {
         match event {
             ComponentEvent::Start(start) => {
-                let archived_metadata = start.metadata.clone();
                 debug!(identity = %start.metadata.identity, "Adding new component.");
                 self.add_new_component(start.metadata.identity, start.metadata.timestamp, None);
-                self.archive_event("START", archived_metadata).await
             }
             ComponentEvent::Running(running) => {
-                let archived_metadata = running.metadata.clone();
                 debug!(identity = %running.metadata.identity, "Component is running.");
                 self.add_new_component(
                     running.metadata.identity,
                     running.metadata.timestamp,
                     Some(running.component_start_time),
                 );
-                self.archive_event("RUNNING", archived_metadata.clone()).await
             }
             ComponentEvent::Stop(stop) => {
                 debug!(identity = %stop.metadata.identity, "Component stopped");
                 self.mark_component_stopped(&stop.metadata.identity);
-                self.archive_event("STOP", stop.metadata).await
             }
             ComponentEvent::DiagnosticsReady(diagnostics_ready) => {
                 debug!(
@@ -672,144 +621,14 @@
                     "Diagnostics directory is ready.",
                 );
                 self.populate_inspect_repo(diagnostics_ready).await;
-                Ok(())
             }
             ComponentEvent::LogSinkRequested(event) => {
                 let data_repo = &self.diagnostics_repo;
                 let container = data_repo.write().get_log_container(event.metadata.identity);
                 container.handle_log_sink(event.requests, log_sender.clone());
-                Ok(())
             }
         }
     }
-
-    async fn archive_event(
-        &mut self,
-        _event_name: &str,
-        _event_data: EventMetadata,
-    ) -> Result<(), Error> {
-        let writer = if let Some(w) = self.writer.as_mut() {
-            w
-        } else {
-            return Ok(());
-        };
-
-        let max_archive_size_bytes = self.configuration.max_archive_size_bytes;
-        let max_event_group_size_bytes = self.configuration.max_event_group_size_bytes;
-
-        // TODO(fxbug.dev/53939): Get inspect data from repository before removing
-        // for post-mortem inspection.
-        //let log = writer.get_log().new_event(event_name, event_data.component_id);
-        // if let Some(data_map) = event_data.component_data_map {
-        //     for (path, object) in data_map {
-        //         match object {
-        //             InspectData::Empty
-        //             | InspectData::DeprecatedFidl(_)
-        //             | InspectData::Tree(_, None) => {}
-        //             InspectData::Vmo(vmo) | InspectData::Tree(_, Some(vmo)) => {
-        //                 let mut contents = vec![0u8; vmo.get_size()? as usize];
-        //                 vmo.read(&mut contents[..], 0)?;
-
-        //                 // Truncate the bytes down to the last non-zero 4096-byte page of data.
-        //                 // TODO(fxbug.dev/4703): Handle truncation of VMOs without reading the whole thing.
-        //                 let mut last_nonzero = 0;
-        //                 for (i, v) in contents.iter().enumerate() {
-        //                     if *v != 0 {
-        //                         last_nonzero = i;
-        //                     }
-        //                 }
-        //                 if last_nonzero % 4096 != 0 {
-        //                     last_nonzero = last_nonzero + 4096 - last_nonzero % 4096;
-        //                 }
-        //                 contents.resize(last_nonzero, 0);
-
-        //                 log = log.add_event_file(path, &contents);
-        //             }
-        //             InspectData::File(contents) => {
-        //                 log = log.add_event_file(path, &contents);
-        //             }
-        //         }
-        //     }
-        // }
-
-        let current_group_stats = writer.get_log().get_stats();
-
-        if current_group_stats.size >= max_event_group_size_bytes {
-            let (path, stats) = writer.rotate_log()?;
-            inspect_log!(self.log_node, event:"Rotated log",
-                     new_path: path.to_string_lossy().to_string());
-            writer.add_group_stat(&path, stats);
-        }
-
-        let archived_size = writer.archived_size();
-        let mut current_archive_size = current_group_stats.size + archived_size;
-        if current_archive_size > max_archive_size_bytes {
-            let dates = match writer.get_archive().get_dates() {
-                Ok(dates) => dates,
-                Err(e) => {
-                    warn!("Garbage collection failure");
-                    inspect_log!(self.log_node, event: "Failed to get dates for garbage collection",
-                             reason: format!("{:?}", e));
-                    vec![]
-                }
-            };
-
-            for date in dates {
-                let groups = match writer.get_archive().get_event_file_groups(&date) {
-                    Ok(groups) => groups,
-                    Err(e) => {
-                        warn!("Garbage collection failure");
-                        inspect_log!(self.log_node, event: "Failed to get event file",
-                                 date: &date,
-                                 reason: format!("{:?}", e));
-                        vec![]
-                    }
-                };
-
-                for group in groups {
-                    let path = group.log_file_path();
-                    match group.delete() {
-                        Err(e) => {
-                            inspect_log!(self.log_node, event: "Failed to remove group",
-                                 path: &path,
-                                 reason: format!(
-                                     "{:?}", e));
-                            continue;
-                        }
-                        Ok(stat) => {
-                            current_archive_size -= stat.size;
-                            writer.remove_group_stat(&PathBuf::from(&path));
-                            inspect_log!(self.log_node, event: "Garbage collected group",
-                                     path: &path,
-                                     removed_files: stat.file_count as u64,
-                                     removed_bytes: stat.size as u64);
-                        }
-                    };
-
-                    if current_archive_size < max_archive_size_bytes {
-                        return Ok(());
-                    }
-                }
-            }
-        }
-
-        Ok(())
-    }
-}
-
-fn maybe_create_archive<ServiceObjTy: ServiceObjTrait>(
-    fs: &mut ServiceFs<ServiceObjTy>,
-    archive_path: &PathBuf,
-) -> Result<archive::ArchiveWriter, Error> {
-    let writer = archive::ArchiveWriter::open(archive_path.clone())?;
-    fs.add_remote(
-        "archive",
-        io_util::open_directory_in_namespace(
-            &archive_path.to_string_lossy(),
-            io_util::OPEN_RIGHT_READABLE | io_util::OPEN_RIGHT_WRITABLE,
-        )?,
-    );
-    Ok(writer)
 }
 
 #[cfg(test)]
@@ -826,9 +645,6 @@
 
     fn init_archivist() -> ArchivistBuilder {
         let config = configs::Config {
-            archive_path: None,
-            max_archive_size_bytes: 10,
-            max_event_group_size_bytes: 10,
             num_threads: 1,
             logs: configs::LogsConfig {
                 max_cached_original_bytes: LEGACY_DEFAULT_MAXIMUM_CACHED_LOGS_BYTES,
diff --git a/src/diagnostics/archivist/src/configs.rs b/src/diagnostics/archivist/src/configs.rs
index 36d7f99..0195800 100644
--- a/src/diagnostics/archivist/src/configs.rs
+++ b/src/diagnostics/archivist/src/configs.rs
@@ -22,15 +22,6 @@
 
 #[derive(Deserialize, Debug, PartialEq, Eq)]
 pub struct Config {
-    /// Path to which archived data will be written. No storage will be performed if left empty.
-    pub archive_path: Option<PathBuf>,
-
-    /// The maximum size the archive can be.
-    pub max_archive_size_bytes: u64,
-
-    /// The maximum size of a single event file group.
-    pub max_event_group_size_bytes: u64,
-
     /// Number of threads the archivist has available to use.
     pub num_threads: usize,
 
@@ -238,16 +229,12 @@
                   "logs": {
                     "max_cached_original_bytes": 500
                   },
-                  "max_archive_size_bytes": 10485760,
-                  "max_event_group_size_bytes": 262144,
                   "num_threads": 4
                 }"#;
 
         write_test_config_to_file(&test_config_file_name, test_config);
         let parsed_config = parse_config(&test_config_file_name).unwrap();
         assert_eq!(parsed_config.logs.max_cached_original_bytes, 500);
-        assert_eq!(parsed_config.max_archive_size_bytes, 10485760);
-        assert_eq!(parsed_config.max_event_group_size_bytes, 262144);
         assert_eq!(parsed_config.num_threads, 4);
     }
 
@@ -263,16 +250,12 @@
                   "logs": {
                     "max_cached_original_bytes": 500
                   },
-                  "max_archive_size_bytes": 10485760,
-                  "max_event_group_size_bytes": 262144,
                   "num_threads": 1
                 }"#;
 
         write_test_config_to_file(&test_config_file_name, test_config);
         let parsed_config = parse_config(&test_config_file_name).unwrap();
         assert_eq!(parsed_config.logs.max_cached_original_bytes, 500);
-        assert_eq!(parsed_config.max_archive_size_bytes, 10485760);
-        assert_eq!(parsed_config.max_event_group_size_bytes, 262144);
         assert_eq!(parsed_config.num_threads, 1);
     }
 
@@ -285,7 +268,7 @@
         let test_config_file_name = config_path.join("test_config.json");
         let test_config = r#"
                 {
-                  "max_archive_size_bytes": 10485760,
+                  "num_threads": 4,
                   "bad_field": "hello world",
                 }"#;
 
diff --git a/src/diagnostics/archivist/src/constants.rs b/src/diagnostics/archivist/src/constants.rs
index 170cb87..e57cbbc 100644
--- a/src/diagnostics/archivist/src/constants.rs
+++ b/src/diagnostics/archivist/src/constants.rs
@@ -36,9 +36,6 @@
 #[cfg(test)]
 pub(crate) const LEGACY_DEFAULT_MAXIMUM_CACHED_LOGS_BYTES: usize = 4 * 1024 * 1024;
 
-/// Keep only the 50 most recent events.
-pub const INSPECT_LOG_WINDOW_SIZE: usize = 50;
-
 /// The root Archivist's moniker in the component topology, used for attributing our own logs.
 // TODO(fxbug.dev/50105,fxbug.dev/64197): update this to reflect updated monikers received in events
 pub const ARCHIVIST_MONIKER: &str = "./archivist:0";
diff --git a/src/diagnostics/archivist/src/diagnostics.rs b/src/diagnostics/archivist/src/diagnostics.rs
index 43f71c5..ff7fa66 100644
--- a/src/diagnostics/archivist/src/diagnostics.rs
+++ b/src/diagnostics/archivist/src/diagnostics.rs
@@ -3,7 +3,6 @@
 // found in the LICENSE file.
 
 use {
-    crate::archive::EventFileGroupStatsMap,
     anyhow::Error,
     fuchsia_component::server::{ServiceFs, ServiceObjTrait},
     fuchsia_inspect::{
@@ -12,49 +11,13 @@
         UintLinearHistogramProperty, UintProperty,
     },
     fuchsia_zircon::{self as zx, Duration},
-    lazy_static::lazy_static,
     parking_lot::Mutex,
     std::collections::BTreeMap,
     std::sync::Arc,
 };
 
-lazy_static! {
-    static ref GROUPS: Arc<Mutex<Groups>> = Arc::new(Mutex::new(Groups::new(
-        component::inspector().root().create_child("archived_events")
-    )));
-}
-
 const INSPECTOR_SIZE: usize = 2 * 1024 * 1024 /* 2MB */;
 
-enum GroupData {
-    Node(Node),
-    Count(UintProperty),
-}
-
-struct Groups {
-    node: Node,
-    children: Vec<GroupData>,
-}
-
-impl Groups {
-    fn new(node: Node) -> Self {
-        Groups { node, children: vec![] }
-    }
-
-    fn replace(&mut self, stats: &EventFileGroupStatsMap) {
-        self.children.clear();
-        for (name, stat) in stats {
-            let node = self.node.create_child(name);
-            let files = node.create_uint("file_count", stat.file_count as u64);
-            let size = node.create_uint("size_in_bytes", stat.size);
-
-            self.children.push(GroupData::Node(node));
-            self.children.push(GroupData::Count(files));
-            self.children.push(GroupData::Count(size));
-        }
-    }
-}
-
 pub fn init() {
     component::init_inspector_with_size(INSPECTOR_SIZE);
     component::health().set_starting_up();
@@ -70,10 +33,6 @@
     Ok(())
 }
 
-pub(crate) fn set_group_stats(stats: &EventFileGroupStatsMap) {
-    GROUPS.lock().replace(stats);
-}
-
 pub struct AccessorStats {
     /// Inspect node for tracking usage/health metrics of diagnostics platform.
     pub archive_accessor_node: Node,
@@ -532,9 +491,7 @@
 mod test {
     use {
         super::*,
-        crate::archive::EventFileGroupStats,
         fuchsia_inspect::{assert_data_tree, health::Reporter, testing::AnyProperty, Inspector},
-        std::iter::FromIterator,
     };
 
     #[test]
@@ -569,30 +526,6 @@
     }
 
     #[test]
-    fn group_stats() {
-        let inspector = Inspector::new();
-        let mut group = Groups::new(inspector.root().create_child("archived_events"));
-        group.replace(&EventFileGroupStatsMap::from_iter(vec![
-            ("a/b".to_string(), EventFileGroupStats { file_count: 1, size: 2 }),
-            ("c/d".to_string(), EventFileGroupStats { file_count: 3, size: 4 }),
-        ]));
-
-        assert_data_tree!(inspector,
-        root: contains {
-            archived_events: {
-               "a/b": {
-                    file_count: 1u64,
-                    size_in_bytes: 2u64
-               },
-               "c/d": {
-                   file_count: 3u64,
-                   size_in_bytes: 4u64
-               }
-            }
-        });
-    }
-
-    #[test]
     fn processing_time_tracker() {
         let inspector = Inspector::new();
         let mut tracker = ProcessingTimeTracker::new(inspector.root().create_child("test"));
diff --git a/src/diagnostics/archivist/src/lib.rs b/src/diagnostics/archivist/src/lib.rs
index 2b03f67..1f640ff 100644
--- a/src/diagnostics/archivist/src/lib.rs
+++ b/src/diagnostics/archivist/src/lib.rs
@@ -3,7 +3,6 @@
 // found in the LICENSE file.
 
 pub mod accessor;
-pub mod archive;
 pub mod archivist;
 pub mod configs;
 pub mod constants;
diff --git a/src/diagnostics/archivist/tests/logs-budget/config/small-caches-config.json b/src/diagnostics/archivist/tests/logs-budget/config/small-caches-config.json
index 9803bd0..10de167 100644
--- a/src/diagnostics/archivist/tests/logs-budget/config/small-caches-config.json
+++ b/src/diagnostics/archivist/tests/logs-budget/config/small-caches-config.json
@@ -2,7 +2,5 @@
     "logs": {
         "max_cached_original_bytes": 3000
     },
-    "max_archive_size_bytes": 10485760,
-    "max_event_group_size_bytes": 262144,
     "num_threads": 1
 }
diff --git a/src/diagnostics/archivist/tests/v2/configs/archivist_config.json b/src/diagnostics/archivist/tests/v2/configs/archivist_config.json
index db504af..6b4b258 100644
--- a/src/diagnostics/archivist/tests/v2/configs/archivist_config.json
+++ b/src/diagnostics/archivist/tests/v2/configs/archivist_config.json
@@ -1,10 +1,7 @@
 {
-    "archive_path": "/data/archive",
     "logs": {
         "max_cached_original_bytes": 4194304
     },
-    "max_archive_size_bytes": 10485760,
-    "max_event_group_size_bytes": 262144,
     "num_threads": 4,
     "pipelines_path": "/pkg/data/config/pipelines"
 }
diff --git a/src/diagnostics/archivist/tests/v2/meta/archivist_integration_tests.cml b/src/diagnostics/archivist/tests/v2/meta/archivist_integration_tests.cml
index 9548f6f..d5dd260 100644
--- a/src/diagnostics/archivist/tests/v2/meta/archivist_integration_tests.cml
+++ b/src/diagnostics/archivist/tests/v2/meta/archivist_integration_tests.cml
@@ -30,11 +30,6 @@
             subdir: "archivist",
         },
         {
-            storage: "data",
-            from: "parent",
-            to: [ "#fuchsia_component_test_collection" ],
-        },
-        {
             event: "capability_requested",
             from: "framework",
             to: "#fuchsia_component_test_collection",
diff --git a/src/diagnostics/archivist/tests/v2/meta/test.shard.cml b/src/diagnostics/archivist/tests/v2/meta/test.shard.cml
index daafa9e..59b4126 100644
--- a/src/diagnostics/archivist/tests/v2/meta/test.shard.cml
+++ b/src/diagnostics/archivist/tests/v2/meta/test.shard.cml
@@ -13,10 +13,6 @@
             rights: [ "r*" ],
             path: "/config/data",
         },
-        {
-            storage: "data",
-            path: "/data",
-        },
     ],
     expose: [
         {
diff --git a/src/diagnostics/archivist/tests/v2/src/inspect/out_directory.rs b/src/diagnostics/archivist/tests/v2/src/inspect/out_directory.rs
index 9e582ce..9b1c4f2 100644
--- a/src/diagnostics/archivist/tests/v2/src/inspect/out_directory.rs
+++ b/src/diagnostics/archivist/tests/v2/src/inspect/out_directory.rs
@@ -37,8 +37,7 @@
     // archivist but unfortunately snapshot is the only supported mode at the moment.
     loop {
         if let Ok(mut result) = read_entries(hub_out_path).await {
-            let mut expected =
-                vec!["archive".to_string(), "diagnostics".to_string(), "svc".to_string()];
+            let mut expected = vec!["diagnostics".to_string(), "svc".to_string()];
             result.sort();
             expected.sort();
             if result == expected {
@@ -52,9 +51,6 @@
     let expected = vec!["fuchsia.inspect.Tree".to_string()];
     assert_eq!(expected, result,);
 
-    let diagnostics_entries = read_entries(&hub_out_path.join("archive")).await.unwrap();
-    assert_eq!(diagnostics_entries.len(), 1);
-
     Ok(())
 }
 
diff --git a/src/diagnostics/archivist/tests/v2/src/test_topology.rs b/src/diagnostics/archivist/tests/v2/src/test_topology.rs
index e6d1edf..56d42d2 100644
--- a/src/diagnostics/archivist/tests/v2/src/test_topology.rs
+++ b/src/diagnostics/archivist/tests/v2/src/test_topology.rs
@@ -85,11 +85,6 @@
             targets: vec![RouteEndpoint::AboveRoot],
         })?
         .add_route(CapabilityRoute {
-            capability: Capability::storage("data", "/data"),
-            source: RouteEndpoint::AboveRoot,
-            targets: vec![RouteEndpoint::component("test/archivist")],
-        })?
-        .add_route(CapabilityRoute {
             capability: Capability::protocol("fuchsia.logger.LogSink"),
             source: RouteEndpoint::component("test/archivist"),
             targets: vec![RouteEndpoint::AboveRoot],