[release] Snap to aed3611a81

Change-Id: If4a05eda3c755b21238c6556eb07e59a282cc881
diff --git a/.clang-tidy b/.clang-tidy
index 2ee0d75..6805227 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -5,6 +5,7 @@
 
   clang-diagnostic-*,
   -clang-diagnostic-unused-command-line-argument,
+  -clang-diagnostic-deprecated-declarations,
 
   google-*,
   -google-runtime-references,
diff --git a/BUILD.gn b/BUILD.gn
index 7f65b40..43f54fc 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -10,11 +10,6 @@
   extra_package_labels = []
 }
 
-install_host_tools("host") {
-  deps = [ "$cobalt_root/src/bin/error_calculator/src:bin" ]
-  outputs = [ "error_calculator" ]
-}
-
 config("cobalt_config") {
   include_dirs = [
     "$cobalt_root",
@@ -29,6 +24,13 @@
       "//zircon/system/ulib/syslog/include",
     ]
   }
+
+  # TODO(https://fxbug.dev/321745113): remove -Wno-deprecated-declarations once the deprecated string_hashes field is removed.
+  cflags = [ "-Wno-deprecated-declarations" ]
+}
+
+config("no_deprecated_pragma_config") {
+  cflags = [ "-Wno-deprecated-pragma" ]
 }
 
 group("tests") {
@@ -78,7 +80,6 @@
   deps = [
     ":cobalt_core_unittests",
     "$cobalt_root/src/bin/config_change_validator/src:bin",
-    "$cobalt_root/src/bin/error_calculator/src:bin",
   ]
 
   if (!is_fuchsia_tree) {
diff --git a/METADATA.textproto b/METADATA.textproto
index 58492b2..946c326 100644
--- a/METADATA.textproto
+++ b/METADATA.textproto
@@ -1,3 +1,9 @@
 presubmits: {
   review_notify: "tq-cobalt-reviews@google.com"
 }
+
+trackers: {
+  issue_tracker: {
+    component_id: 1352589
+  }
+}
diff --git a/OWNERS b/OWNERS
index 2647c1f..0e411ae 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,7 +1,7 @@
+aniviali@google.com
 azani@google.com
 camrdale@google.com
 frousseau@google.com
+pankhurst@google.com
 stevefung@google.com
-zmbush@google.com
-
-# COMPONENT: Cobalt
+tgales@google.com
diff --git a/build/config/linux/BUILD.gn b/build/config/linux/BUILD.gn
index 212e189..5cd7729 100644
--- a/build/config/linux/BUILD.gn
+++ b/build/config/linux/BUILD.gn
@@ -23,7 +23,7 @@
     "-Wl,-rpath=\$ORIGIN/",
   ]
   if (host_os == "mac") {
-    # TODO(fxbug.dev/27079): When building binaries for Linux on macOS, we need to use
+    # TODO(https://fxbug.dev/42101568): When building binaries for Linux on macOS, we need to use
     # lld as a linker, hence this flag. This is not needed on Linux since our
     # Clang is configured to use lld as a default linker, but we cannot use the
     # same option on macOS since default linker is currently a per-toolchain,
@@ -36,7 +36,7 @@
     ":target",
   ]
 
-  # TODO(fxbug.dev/26846): The implicitly linked static libc++.a depends on these.
+  # TODO(https://fxbug.dev/42101309): The implicitly linked static libc++.a depends on these.
   libs = [
     "dl",
     "pthread",
diff --git a/build/go/build.py b/build/go/build.py
index d88b1ec..fad8878 100755
--- a/build/go/build.py
+++ b/build/go/build.py
@@ -115,7 +115,7 @@
           # EEXIST occurs if two gopath entries share the same parent name
           if e.errno != errno.EEXIST:
             raise
-        # TODO(fxbug.dev/3037): the following check might not be necessary anymore.
+        # TODO(https://fxbug.dev/42105225): the following check might not be necessary anymore.
         tgt = os.path.join(dstdir, os.path.basename(dst))
         # The source tree is effectively read-only once the build begins.
         # Therefore it is an error if tgt is in the source tree. At first
diff --git a/build/go/gen_library_metadata.py b/build/go/gen_library_metadata.py
index 4571c43..be512aa 100755
--- a/build/go/gen_library_metadata.py
+++ b/build/go/gen_library_metadata.py
@@ -79,7 +79,7 @@
 
     current_sources = []
     if args.sources:
-        # TODO(fxbug.dev/3037): verify that the sources are in a single folder.
+        # TODO(https://fxbug.dev/42105225): verify that the sources are in a single folder.
         for source in args.sources:
             current_sources.append(Source(os.path.join(name, source),
                                           os.path.join(args.source_dir, source),
diff --git a/build/go/go_binary.gni b/build/go/go_binary.gni
index 8bc2ae3..d32f326 100644
--- a/build/go/go_binary.gni
+++ b/build/go/go_binary.gni
@@ -11,7 +11,7 @@
   forward_variables_from(invoker, [ "visibility" ])
 
   go_build(target_name) {
-    # TODO(fxbug.dev/58755): Deprecate `gopackage` in favor of `library`.
+    # TODO(https://fxbug.dev/42136747): Deprecate `gopackage` in favor of `library`.
     if (defined(invoker.gopackage)) {
       gopackages = [ invoker.gopackage ]
     }
diff --git a/build/go/go_build.gni b/build/go/go_build.gni
index 7c98bbd..03799f5 100644
--- a/build/go/go_build.gni
+++ b/build/go/go_build.gni
@@ -82,14 +82,14 @@
     }
     args = []
 
-    # TODO(fxbug.dev/58755): Delete `gopackages` in favor of `library`.
+    # TODO(https://fxbug.dev/42136747): Delete `gopackages` in favor of `library`.
     if (defined(invoker.gopackages) == defined(invoker.library)) {
       assert(false, "Exactly one of gopackages or library must be set")
     } else if (defined(invoker.gopackages)) {
       gopackages = invoker.gopackages
 
       # Multi-package support was never implemented and is no longer planned as
-      # `gopackages` is slated for deletion as part of https://fxbug.dev/58755.
+      # `gopackages` is slated for deletion as part of https://fxbug.dev/42136747.
       assert(gopackages == [ gopackages[0] ],
              "gopackages only supports one package")
       foreach(gopackage, gopackages) {
diff --git a/build/go/go_library.gni b/build/go/go_library.gni
index cee0812..1cf3a8e 100644
--- a/build/go/go_library.gni
+++ b/build/go/go_library.gni
@@ -24,7 +24,7 @@
 #
 #   sources (optional)
 #     List of source files, relative to source_dir.
-#     TODO(fxbug.dev/3037): make this attribute required.
+#     TODO(https://fxbug.dev/42105225): make this attribute required.
 #
 #   deps (optional)
 #     List of labels for Go libraries this target depends on.
diff --git a/build/rust/config.gni b/build/rust/config.gni
index 3dc34b2..1df263d 100644
--- a/build/rust/config.gni
+++ b/build/rust/config.gni
@@ -42,7 +42,7 @@
 _sysroot = sysroot
 sysroot_deps = []
 
-# TODO(fxbug.dev/3039): sysroot.gni should provide the correct label and path to the Fuchsia sysroot.
+# TODO(https://fxbug.dev/42105247): sysroot.gni should provide the correct label and path to the Fuchsia sysroot.
 if (is_fuchsia) {
   sysroot_deps = [ "//sdk:zircon_sysroot_export" ]
   _sysroot = rebase_path(
diff --git a/cobalt.ensure b/cobalt.ensure
index 594b036..606ce14 100644
--- a/cobalt.ensure
+++ b/cobalt.ensure
@@ -32,11 +32,11 @@
 # The following dependencies diverge from those used in fuchsia for some reason.
 
 # Updating to the fuchsia version causes the build failure: `undefined symbol: __sanitizer_cov_trace_pc_guard_init`
-# TODO(b/278929805): Remove this exception once issues are resolved
+# TODO(https://fxbug.dev/278929805): Remove this exception once issues are resolved
 @Subdir bin
 gn/gn/${platform} git_revision:239533d2d91a04b3317ca9101cf7189f4e651e4d
 
 # Godepfile is not present in the fuchsia prebuilts file
-# TODO(b/278929805): Switch to go's builtin depfile generator
+# TODO(https://fxbug.dev/278929805): Switch to go's builtin depfile generator
 @Subdir golang/bin
 fuchsia/tools/godepfile/${platform} git_revision:6922d7833617841e853a0be52a285f6cd07a0a10
diff --git a/cobaltb.py b/cobaltb.py
index 2c62ba0..c85e2f8 100755
--- a/cobaltb.py
+++ b/cobaltb.py
@@ -19,7 +19,6 @@
 import tempfile
 
 import tools.clang_tidy as clang_tidy
-import tools.error_calculator as error_calculator
 import tools.gitfmt as gitfmt
 import tools.gnlint as gnlint
 import tools.golint as golint
@@ -115,19 +114,6 @@
   subprocess.check_call(['./setup.sh'])
 
 
-def _calculate_error(args):
-  bin_dir = args.bin_dir
-  if not bin_dir:
-    bin_dir = out_dir(args)
-  error_calculator_bin = os.path.join(bin_dir, 'error_calculator')
-  config_parser_bin = os.path.join(bin_dir, 'config_parser')
-
-  error_calculator.generate_registry(
-      args.registry_proto, CONFIG_SUBMODULE_PATH, config_parser_bin
-  )
-  error_calculator.estimate_from_args(error_calculator_bin, args)
-
-
 def _compdb(args):
   # Copy the compile_commands.json to the top level for use in IDEs (CLion).
   subprocess.check_call([
@@ -866,17 +852,6 @@
   )
   sub_parser.set_defaults(func=_compdb)
 
-  ########################################################
-  # privacy command
-  ########################################################
-  sub_parser = subparsers.add_parser(
-      'calculate_error',
-      parents=[parent_parser],
-      help='Estimates the error for a Cobalt report with privacy',
-  )
-  error_calculator.add_parse_args(sub_parser)
-  sub_parser.set_defaults(func=_calculate_error)
-
   args = parser.parse_args()
   global _verbose_count
   _verbose_count = args.verbose_count
diff --git a/src/algorithms/experimental/archived/krr_integer_encoder.cc b/src/algorithms/experimental/archived/krr_integer_encoder.cc
index 778353d..fda0db2 100644
--- a/src/algorithms/experimental/archived/krr_integer_encoder.cc
+++ b/src/algorithms/experimental/archived/krr_integer_encoder.cc
@@ -59,7 +59,7 @@
   return left;
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 uint32_t ArchivedKrrIntegerEncoder::RandomRound(int64_t val, uint32_t left_index) {
   if (left_index == boundaries_.size() - 1) {
     return left_index;
diff --git a/src/algorithms/experimental/archived/occurrence_wise_histogram_encoder.cc b/src/algorithms/experimental/archived/occurrence_wise_histogram_encoder.cc
index 530caad..ba27ab1 100644
--- a/src/algorithms/experimental/archived/occurrence_wise_histogram_encoder.cc
+++ b/src/algorithms/experimental/archived/occurrence_wise_histogram_encoder.cc
@@ -8,7 +8,7 @@
 namespace cobalt {
 
 ArchivedOccurrenceWiseHistogramEncoder::ArchivedOccurrenceWiseHistogramEncoder(
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     BitGeneratorInterface<uint32_t>* gen, uint32_t num_buckets, uint64_t max_count, Probability p)
     : num_buckets_(num_buckets), max_count_(max_count) {
   randomizer_ = std::make_unique<ArchivedResponseRandomizer>(gen, num_buckets - 1, p);
diff --git a/src/algorithms/experimental/archived/response_randomizer.h b/src/algorithms/experimental/archived/response_randomizer.h
index ac5f9e7..f6c053c 100644
--- a/src/algorithms/experimental/archived/response_randomizer.h
+++ b/src/algorithms/experimental/archived/response_randomizer.h
@@ -25,7 +25,7 @@
   // (with probability (1 - |p|), the method returns the input |index|.
   //
   // Inputs larger than |max_index| are replaced with |max_index| before encoding.
-  // TODO(fxbug.dev/87115): Consider other ways of handling bad input.
+  // TODO(https://fxbug.dev/42168201): Consider other ways of handling bad input.
   uint32_t Encode(uint32_t index);
 
  private:
diff --git a/src/algorithms/experimental/archived/two_dim_rappor_histogram_encoder.cc b/src/algorithms/experimental/archived/two_dim_rappor_histogram_encoder.cc
index f1a90cb..340d882 100644
--- a/src/algorithms/experimental/archived/two_dim_rappor_histogram_encoder.cc
+++ b/src/algorithms/experimental/archived/two_dim_rappor_histogram_encoder.cc
@@ -9,7 +9,7 @@
 namespace cobalt {
 
 ArchivedTwoDimRapporHistogramEncoder::ArchivedTwoDimRapporHistogramEncoder(
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     BitGeneratorInterface<uint32_t>* gen, uint32_t num_buckets, uint64_t max_count, Probability p)
     : gen_(gen), num_buckets_(num_buckets), max_count_(max_count), p_(p) {}
 
@@ -39,7 +39,7 @@
 }
 
 ArchivedTwoDimRapporHistogramSumEstimator::ArchivedTwoDimRapporHistogramSumEstimator(
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     uint32_t num_buckets, uint64_t max_count, Probability p)
     : num_buckets_(num_buckets), max_count_(max_count), p_(p) {}
 
diff --git a/src/algorithms/experimental/histogram_encoder.cc b/src/algorithms/experimental/histogram_encoder.cc
index fb10000..4e6c825 100644
--- a/src/algorithms/experimental/histogram_encoder.cc
+++ b/src/algorithms/experimental/histogram_encoder.cc
@@ -40,7 +40,7 @@
 
 OccurrenceWiseHistogramEncoder::OccurrenceWiseHistogramEncoder(
     BitGeneratorInterface<uint32_t>* gen,
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     uint32_t num_buckets, uint64_t max_count, Probability p)
     : num_buckets_(num_buckets), max_count_(max_count) {
   randomizer_ = std::make_unique<ResponseRandomizer>(gen, num_buckets - 1, p);
@@ -70,7 +70,7 @@
 
 TwoDimRapporHistogramEncoder::TwoDimRapporHistogramEncoder(
     BitGeneratorInterface<uint32_t>* gen,
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     uint32_t num_buckets, uint64_t max_count, Probability p)
     : gen_(gen), num_buckets_(num_buckets), max_count_(max_count), p_(p) {}
 
@@ -99,7 +99,7 @@
   return encoded;
 }
 
-// TODO(b/278930401):NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 TwoDimRapporHistogramSumEstimator::TwoDimRapporHistogramSumEstimator(uint32_t num_buckets,
                                                                      uint64_t max_count,
                                                                      Probability p)
@@ -147,7 +147,7 @@
 }
 
 SinglePassTwoDimRapporHistogramEncoder::SinglePassTwoDimRapporHistogramEncoder(
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     BitGeneratorInterface<uint32_t>* gen, uint32_t num_buckets, uint64_t max_count, Probability p)
     : gen_(gen), num_buckets_(num_buckets), max_count_(max_count), p_(p) {}
 
@@ -209,7 +209,7 @@
 }
 
 SinglePassTwoDimRapporHistogramSumEstimator::SinglePassTwoDimRapporHistogramSumEstimator(
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     uint32_t num_buckets, uint64_t max_count, Probability p)
     : num_buckets_(num_buckets), max_count_(max_count), p_(p) {}
 
@@ -254,7 +254,7 @@
 
 TwoDimBinomialHistogramEncoder::TwoDimBinomialHistogramEncoder(
     BitGeneratorInterface<uint32_t>* gen,
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     uint32_t num_buckets, uint64_t max_count, Probability p)
     : gen_(gen), num_buckets_(num_buckets), max_count_(max_count), p_(p) {}
 
@@ -283,7 +283,7 @@
   return encoded;
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 TwoDimBinomialHistogramSumEstimator::TwoDimBinomialHistogramSumEstimator(uint32_t num_buckets,
                                                                          uint64_t max_count,
                                                                          Probability p)
diff --git a/src/algorithms/experimental/integer_encoder.cc b/src/algorithms/experimental/integer_encoder.cc
index 14ee7be..1d27bbb 100644
--- a/src/algorithms/experimental/integer_encoder.cc
+++ b/src/algorithms/experimental/integer_encoder.cc
@@ -58,7 +58,7 @@
   return left;
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 uint32_t IntegerEncoder::RandomRound(int64_t val, uint32_t left_index) {
   if (left_index == boundaries_.size() - 1) {
     return left_index;
diff --git a/src/algorithms/experimental/randomized_response.h b/src/algorithms/experimental/randomized_response.h
index a990e18..16e9412 100644
--- a/src/algorithms/experimental/randomized_response.h
+++ b/src/algorithms/experimental/randomized_response.h
@@ -24,7 +24,7 @@
   // (with probability (1 - |p|), the method returns the input |index|.
   //
   // Inputs larger than |max_index| are replaced with |max_index| before encoding.
-  // TODO(fxbug.dev/87115): Consider other ways of handling bad input.
+  // TODO(https://fxbug.dev/42168201): Consider other ways of handling bad input.
   uint32_t Encode(uint32_t index);
 
  private:
diff --git a/src/algorithms/privacy/hash.cc b/src/algorithms/privacy/hash.cc
index d9ed611..9123d9c 100644
--- a/src/algorithms/privacy/hash.cc
+++ b/src/algorithms/privacy/hash.cc
@@ -12,13 +12,13 @@
 
 namespace {
 size_t TruncateDigest(uint64_t digest, size_t max) {
-  // TODO(b/278917456): Use a truncation method that preserves the uniformity of
+  // TODO(https://fxbug.dev/278917456): Use a truncation method that preserves the uniformity of
   // the distribution even if max is not a power of 2.
   return digest % max;
 }
 }  // namespace
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 uint64_t Hash64WithSeed(const uint8_t* data, size_t len, uint64_t seed) {
   std::vector<uint8_t> seeded_data(sizeof(uint64_t) + len);
   *(reinterpret_cast<uint64_t*>(seeded_data.data())) = seed;
diff --git a/src/algorithms/privacy/numeric_encoding.cc b/src/algorithms/privacy/numeric_encoding.cc
index 6c52cee..d386b13 100644
--- a/src/algorithms/privacy/numeric_encoding.cc
+++ b/src/algorithms/privacy/numeric_encoding.cc
@@ -53,7 +53,7 @@
                          num_index_points);
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 void HistogramBucketAndCountFromIndex(uint64_t index, uint64_t max_count, uint64_t num_index_points,
                                       uint32_t* bucket_index, double* bucket_count) {
   *bucket_index = static_cast<uint32_t>(index / num_index_points);
diff --git a/src/bin/config_change_validator/src/config_change_validator_main.go b/src/bin/config_change_validator/src/config_change_validator_main.go
index e3c098f..5ff349e 100644
--- a/src/bin/config_change_validator/src/config_change_validator_main.go
+++ b/src/bin/config_change_validator/src/config_change_validator_main.go
@@ -20,7 +20,7 @@
 )
 
 var (
-	// TODO(b/278914731): Remove once these flags aren't used.
+	// TODO(https://fxbug.dev/278914731): Remove once these flags aren't used.
 	oldCfg = flag.String("old_config", "", "Path of the old generated config. DEPRECATED")
 	newCfg = flag.String("new_config", "", "Path of the new generated config. DEPRECATED")
 
diff --git a/src/bin/config_change_validator/src/validator/projects.go b/src/bin/config_change_validator/src/validator/projects.go
index 34cb305..46c7d88 100644
--- a/src/bin/config_change_validator/src/validator/projects.go
+++ b/src/bin/config_change_validator/src/validator/projects.go
@@ -10,30 +10,28 @@
 )
 
 func CompareProjects(oldCfg, newCfg *config.ProjectConfig, ignoreCompatChecks bool) error {
-	newMetrics := map[string]*config.MetricDefinition{}
-	newMetricIds := map[uint32]bool{}
+	newMetrics := map[uint32]*config.MetricDefinition{}
 	for _, metric := range newCfg.Metrics {
-		newMetrics[metric.MetricName] = metric
-		newMetricIds[metric.Id] = true
+		newMetrics[metric.Id] = metric
 	}
 
-	oldMetrics := map[string]*config.MetricDefinition{}
+	oldMetrics := map[uint32]*config.MetricDefinition{}
 	newlyDeletedMetricIds := map[uint32]string{}
 	for _, metric := range oldCfg.Metrics {
-		oldMetrics[metric.MetricName] = metric
-		_, ok := newMetricIds[metric.Id]
+		oldMetrics[metric.Id] = metric
+		_, ok := newMetrics[metric.Id]
 		if !ok {
 			newlyDeletedMetricIds[metric.Id] = metric.MetricName
 		}
 	}
 
 	// Validation for all metrics.
-	for name, oldMetric := range oldMetrics {
-		newMetric, ok := newMetrics[name]
+	for oldMetricId, oldMetric := range oldMetrics {
+		newMetric, ok := newMetrics[oldMetricId]
 		if ok {
 			err := CompareMetrics(oldMetric, newMetric, ignoreCompatChecks)
 			if err != nil {
-				return fmt.Errorf("for metric named '%s': %v", name, err)
+				return fmt.Errorf("for metric named '%s': %v", oldMetric.MetricName, err)
 			}
 		}
 	}
diff --git a/src/bin/config_change_validator/src/validator/projects_test.go b/src/bin/config_change_validator/src/validator/projects_test.go
index c844071..5fed745 100644
--- a/src/bin/config_change_validator/src/validator/projects_test.go
+++ b/src/bin/config_change_validator/src/validator/projects_test.go
@@ -64,3 +64,60 @@
 		t.Errorf("rejected change when deleted metric IDs were added, got %v", err)
 	}
 }
+
+func TestMovedMetricsWithDeletedReports(t *testing.T) {
+	oldCfg := &config.ProjectConfig{
+		Metrics: []*config.MetricDefinition{
+			{
+				Id:         1,
+				MetricName: "metric_a",
+				CustomerId: 1,
+				ProjectId:  2,
+				MetricType: config.MetricDefinition_INTEGER,
+				MetaData: &config.MetricDefinition_Metadata{
+					MaxReleaseStage: config.ReleaseStage_GA,
+				},
+				Reports: []*config.ReportDefinition{
+					{
+						Id:         1,
+						ReportName: "report_1",
+					},
+					{
+						Id:         2,
+						ReportName: "report_2",
+					},
+				},
+			},
+		},
+	}
+	newCfg := &config.ProjectConfig{
+		Metrics: []*config.MetricDefinition{
+			{
+				Id:         2,
+				MetricName: "metric_a",
+				CustomerId: 1,
+				ProjectId:  2,
+				MetricType: config.MetricDefinition_INTEGER,
+				MetaData: &config.MetricDefinition_Metadata{
+					MaxReleaseStage: config.ReleaseStage_GA,
+				},
+				Reports: []*config.ReportDefinition{
+					{
+						Id:         1,
+						ReportName: "report_1",
+					},
+				},
+			},
+		},
+		DeletedMetricIds: []uint32{1},
+	}
+
+	err := CompareProjects(oldCfg, newCfg, false)
+	if err != nil {
+		t.Errorf("rejected valid change, got %v", err)
+	}
+
+	if err = CompareProjects(oldCfg, newCfg, true); err != nil {
+		t.Errorf("rejected valid change when ignoreCompatChecks=true, got %v", err)
+	}
+}
diff --git a/src/bin/config_parser/src/BUILD.gn b/src/bin/config_parser/src/BUILD.gn
index ed560fb..3f6b1f5 100644
--- a/src/bin/config_parser/src/BUILD.gn
+++ b/src/bin/config_parser/src/BUILD.gn
@@ -94,7 +94,8 @@
 
   sources = [
     "accounting.go",
-    "error_calculator.go",
+    "poisson.go",
+    "privacy_calculations.go",
     "privacy_encoding_params.go",
     "refined_privacy_encoding_params.go",
   ]
@@ -102,7 +103,8 @@
   # Add test files
   sources += [
     "accounting_test.go",
-    "error_calculator_test.go",
+    "poisson_test.go",
+    "privacy_calculations_test.go",
     "privacy_encoding_params_test.go",
     "refined_privacy_encoding_params_test.go",
   ]
@@ -119,6 +121,7 @@
   deps = [
     "$cobalt_root/src/registry:cobalt_registry_proto_go",
     "//third_party/golibs:github.com/golang/glog",
+    "//third_party/golibs:github.com/google/go-cmp",
   ]
 }
 
diff --git a/src/bin/config_parser/src/config_parser/config_parser.go b/src/bin/config_parser/src/config_parser/config_parser.go
index 8c9fe52..3b1e493 100644
--- a/src/bin/config_parser/src/config_parser/config_parser.go
+++ b/src/bin/config_parser/src/config_parser/config_parser.go
@@ -4,7 +4,7 @@
 
 // This file contains flag definitions for the config_parser package as well
 // as all the functions which make direct use of these flags.
-// TODO(b/278917650): Refactor and test. This is half of the logic that was ripped out
+// TODO(https://fxbug.dev/278917650): Refactor and test. This is half of the logic that was ripped out
 // config_parser_main.go and it still needs to be refactored.
 package config_parser
 
diff --git a/src/bin/config_parser/src/config_parser/config_reader.go b/src/bin/config_parser/src/config_parser/config_reader.go
index 303d239..1d3204a 100644
--- a/src/bin/config_parser/src/config_parser/config_reader.go
+++ b/src/bin/config_parser/src/config_parser/config_reader.go
@@ -272,6 +272,9 @@
 	p.ProjectName = c.ProjectName
 	p.ProjectId = c.ProjectId
 	p.ProjectContact = c.Contact
+	if c.AppPackageIdentifier != "" {
+		p.AppPackageIdentifier = c.AppPackageIdentifier
+	}
 	if c.ProjectConfigFile != nil {
 		p.Metrics = c.ProjectConfigFile.MetricDefinitions
 
diff --git a/src/bin/config_parser/src/config_parser/config_reader_test.go b/src/bin/config_parser/src/config_parser/config_reader_test.go
index df5c5a1..64e0505 100644
--- a/src/bin/config_parser/src/config_parser/config_reader_test.go
+++ b/src/bin/config_parser/src/config_parser/config_reader_test.go
@@ -217,12 +217,13 @@
 			DeletedProjectIds: []uint32{4, 5},
 		},
 		{
-			CustomerName:      "customer2",
-			CustomerId:        2,
-			ProjectName:       "project1",
-			ProjectId:         1,
-			Contact:           "project1@customer2.com",
-			DeletedProjectIds: []uint32{2},
+			CustomerName:         "customer2",
+			CustomerId:           2,
+			ProjectName:          "project1",
+			ProjectId:            1,
+			Contact:              "project1@customer2.com",
+			AppPackageIdentifier: "com.customer2.project1",
+			DeletedProjectIds:    []uint32{2},
 		},
 		{
 			CustomerName:      "customer5",
@@ -256,9 +257,10 @@
 				DeletedProjectIds: []uint32{2},
 				Projects: []*config.ProjectConfig{
 					{
-						ProjectName:    "project1",
-						ProjectId:      1,
-						ProjectContact: "project1@customer2.com",
+						ProjectName:          "project1",
+						ProjectId:            1,
+						ProjectContact:       "project1@customer2.com",
+						AppPackageIdentifier: "com.customer2.project1",
 					},
 				},
 			},
diff --git a/src/bin/config_parser/src/config_parser/populate_privacy_params.go b/src/bin/config_parser/src/config_parser/populate_privacy_params.go
index a4887d8..40dc594 100644
--- a/src/bin/config_parser/src/config_parser/populate_privacy_params.go
+++ b/src/bin/config_parser/src/config_parser/populate_privacy_params.go
@@ -4,7 +4,7 @@
 	// Reports that use the Poisson encoding scheme must manually specify privacy
 	// encoding parameters.
 	//
-	// TODO(b/278932979): update this comment once Poisson encoding parameters are
+	// TODO(https://fxbug.dev/278932979): update this comment once Poisson encoding parameters are
 	// populated by the registry parser.
 	return nil
 }
diff --git a/src/bin/config_parser/src/config_parser/project_config.go b/src/bin/config_parser/src/config_parser/project_config.go
index cc7a0be..4537332 100644
--- a/src/bin/config_parser/src/config_parser/project_config.go
+++ b/src/bin/config_parser/src/config_parser/project_config.go
@@ -23,6 +23,7 @@
 	ProjectName                   string
 	ProjectId                     uint32
 	Contact                       string
+	AppPackageIdentifier          string
 	CustomerExperimentsNamespaces []interface{}
 	ProjectExperimentsNamespaces  []interface{}
 	DeletedProjectIds             []uint32
diff --git a/src/bin/config_parser/src/config_parser/project_list.go b/src/bin/config_parser/src/config_parser/project_list.go
index 64c759d..8feea31 100644
--- a/src/bin/config_parser/src/config_parser/project_list.go
+++ b/src/bin/config_parser/src/config_parser/project_list.go
@@ -154,6 +154,10 @@
 	}
 	c.Contact = p.ProjectContact
 
+	if p.AppPackageIdentifier != "" {
+		c.AppPackageIdentifier = p.AppPackageIdentifier
+	}
+
 	c.ProjectExperimentsNamespaces = []interface{}{}
 	if len(p.ExperimentsNamespaces) > 0 {
 		for _, en := range p.ExperimentsNamespaces {
diff --git a/src/bin/config_parser/src/config_parser/project_list_test.go b/src/bin/config_parser/src/config_parser/project_list_test.go
index ce17a3e..d3b7f63 100644
--- a/src/bin/config_parser/src/config_parser/project_list_test.go
+++ b/src/bin/config_parser/src/config_parser/project_list_test.go
@@ -32,6 +32,7 @@
   - project_name: ledger
     project_id: 10
     project_contact: ben
+    app_package_identifier: test.project.ledger
   deleted_project_ids: [11, 1]
 - customer_name: test_project2
   customer_id: 26
@@ -63,6 +64,7 @@
 			ProjectName:                   "ledger",
 			ProjectId:                     10,
 			Contact:                       "ben",
+			AppPackageIdentifier:          "test.project.ledger",
 			CustomerExperimentsNamespaces: []interface{}{"black.green.yellow"},
 			ProjectExperimentsNamespaces:  []interface{}{},
 			DeletedProjectIds:             []uint32{11, 1},
diff --git a/src/bin/config_parser/src/config_parser_main.go b/src/bin/config_parser/src/config_parser_main.go
index eebc710..9fbb32a 100644
--- a/src/bin/config_parser/src/config_parser_main.go
+++ b/src/bin/config_parser/src/config_parser_main.go
@@ -96,7 +96,7 @@
 
 	// Compute and write fields for each ReportDefinition in |config_datas|.
 	//
-	// TODO(b/278932979): update this comment once Poisson encoding parameters are
+	// TODO(https://fxbug.dev/278932979): update this comment once Poisson encoding parameters are
 	// populated by the registry parser.
 	if err := config_parser.PopulatePrivacyParams(config_datas); err != nil {
 		fail("populate privacy parameters", err)
diff --git a/src/bin/config_parser/src/config_validator/metric_definitions.go b/src/bin/config_parser/src/config_validator/metric_definitions.go
index 834765c..e53a2d1 100644
--- a/src/bin/config_parser/src/config_validator/metric_definitions.go
+++ b/src/bin/config_parser/src/config_validator/metric_definitions.go
@@ -265,7 +265,7 @@
 		return fmt.Errorf("no int_buckets specified for metric of type INTEGER_HISTOGRAM")
 	}
 
-	// TODO(b/278917650): Validate bucket definition.
+	// TODO(https://fxbug.dev/278917650): Validate bucket definition.
 
 	for _, r := range m.Reports {
 		if m.IntBuckets != nil && r.IntBuckets != nil {
diff --git a/src/bin/config_parser/src/config_validator/report_definitions.go b/src/bin/config_parser/src/config_validator/report_definitions.go
index 348ec39..e3b1d7f 100644
--- a/src/bin/config_parser/src/config_validator/report_definitions.go
+++ b/src/bin/config_parser/src/config_validator/report_definitions.go
@@ -10,6 +10,7 @@
 	"flag"
 	"fmt"
 	"math"
+	"privacy"
 )
 
 // This file contains logic to validate list of ReportDefinition protos in MetricDefinition protos.
@@ -17,6 +18,7 @@
 var (
 	noHourlyReports        = flag.Bool("no_hourly_reports", false, "Don't allow reports that send data hourly in the registry.")
 	allowReportingInterval = flag.Bool("allow_reporting_interval", false, "Allow the `reporting_interval` to be set in reports.")
+	validatePoissonMean    = flag.Bool("validate_poisson_mean", false, "Validate that the specified poisson_mean is sufficient to provide the specified privacy level.")
 )
 
 // errIllegalHourlyReport is returned in validations when no_hourly_reports is set and an hourly report is encountered.
@@ -241,6 +243,14 @@
 		reportErrors.addError("reporting_interval", err)
 	}
 
+	if err := validateExemptFromConsent(r); err != nil {
+		reportErrors.addError("exempt_from_consent", err)
+	}
+
+	if err := validateMaxPrivateIndex(m, r); err != nil {
+		reportErrors.addError("metric_dimensions, int_buckets, or num_index_points", err)
+	}
+
 	switch r.ReportType {
 
 	case config.ReportDefinition_STRING_COUNTS:
@@ -253,7 +263,7 @@
 		}
 	}
 
-	if err := validatePrivacyMechanismAndConfig(r); err != nil {
+	if err := validatePrivacyMechanismAndConfig(m, r); err != nil {
 		reportErrors.addError("other", err)
 	}
 
@@ -374,7 +384,8 @@
 		config.ReportDefinition_UNIQUE_DEVICE_NUMERIC_STATS,
 		config.ReportDefinition_UNIQUE_DEVICE_STRING_COUNTS:
 		return nil
-	case config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS:
+	case config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
+		config.ReportDefinition_STRING_COUNTS:
 		if r.ReportingInterval == config.ReportDefinition_DAYS_1 {
 			return nil
 		}
@@ -382,8 +393,7 @@
 	case config.ReportDefinition_HOURLY_VALUE_HISTOGRAMS,
 		config.ReportDefinition_FLEETWIDE_HISTOGRAMS,
 		config.ReportDefinition_FLEETWIDE_MEANS,
-		config.ReportDefinition_HOURLY_VALUE_NUMERIC_STATS,
-		config.ReportDefinition_STRING_COUNTS:
+		config.ReportDefinition_HOURLY_VALUE_NUMERIC_STATS:
 		return errIllegalHourlyReport
 	}
 	return fmt.Errorf("report of type %v is not expected", r.ReportType)
@@ -397,13 +407,40 @@
 	case config.ReportDefinition_REPORTING_INTERVAL_UNSET:
 		return nil
 	default:
-		if allowed && r.ReportType == config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS {
+		if allowed && (r.ReportType == config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS || r.ReportType == config.ReportDefinition_STRING_COUNTS) {
 			return nil
 		}
 		return fmt.Errorf("setting `reporting_interval` is not allowed for type %v", r.ReportType)
 	}
 }
 
+// Validates the `exempt_from_consent` field.
+func validateExemptFromConsent(r *config.ReportDefinition) error {
+	if !r.ExemptFromConsent {
+		return nil
+	}
+	if r.PrivacyLevel == config.ReportDefinition_NO_ADDED_PRIVACY {
+		return fmt.Errorf("setting `exempt_from_consent` is not allowed unless differential privacy is enabled")
+	}
+
+	return nil
+}
+
+// Validates that the max private index value fits in a signed int32.
+func validateMaxPrivateIndex(m *config.MetricDefinition, r *config.ReportDefinition) error {
+	if r.PrivacyLevel == config.ReportDefinition_NO_ADDED_PRIVACY {
+		return nil
+	}
+	numPrivateIndices, err := privacy.GetNumPrivateIndices(m, r)
+	if err != nil {
+		return err
+	}
+	if numPrivateIndices >= math.MaxInt32 {
+		return fmt.Errorf("privacy enabled reports can not handle more than a signed int32 of possible values, try reducing the number of dimensions/events, histogram buckets, or num_index_points, current number of indices is: %v", numPrivateIndices)
+	}
+	return nil
+}
+
 /////////////////////////////////////////////////////////////////
 // Validation for specific report types:
 /////////////////////////////////////////////////////////////////
@@ -547,7 +584,7 @@
 	return nil
 }
 
-func validatePrivacyMechanismAndConfig(r *config.ReportDefinition) error {
+func validatePrivacyMechanismAndConfig(m *config.MetricDefinition, r *config.ReportDefinition) error {
 	switch r.PrivacyMechanism {
 	case config.ReportDefinition_DE_IDENTIFICATION:
 		if r.PrivacyConfig != nil {
@@ -557,7 +594,15 @@
 		if r.PrivacyConfig != nil {
 			switch op := r.PrivacyConfig.(type) {
 			case *config.ReportDefinition_ShuffledDp:
-				return validateShuffledDpConfig(op.ShuffledDp)
+				if err := validateShuffledDpConfig(op.ShuffledDp); err != nil {
+					return err
+				}
+
+				if *validatePoissonMean {
+					return validateReportPoissonMean(m, r)
+				}
+
+				return nil
 			default:
 				return fmt.Errorf("you specified the wrong privacy config, you should specify shuffled_dp privacy config when select SHUFFLED_DIFFERENTIAL_PRIVACY")
 			}
@@ -568,6 +613,19 @@
 	return nil
 }
 
+func validateReportPoissonMean(m *config.MetricDefinition, r *config.ReportDefinition) error {
+	p, err := privacy.GetPrivacyParamsForReport(m, r)
+	if err != nil {
+		return err
+	}
+
+	if err := p.Validate(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
 func validateShuffledDpConfig(c *config.ReportDefinition_ShuffledDifferentialPrivacyConfig) error {
 	if c.Epsilon <= 0 {
 		return fmt.Errorf("epsilon is %f, must be > 0", c.Epsilon)
diff --git a/src/bin/config_parser/src/config_validator/report_definitions_test.go b/src/bin/config_parser/src/config_validator/report_definitions_test.go
index a98bbe8..691f343 100644
--- a/src/bin/config_parser/src/config_validator/report_definitions_test.go
+++ b/src/bin/config_parser/src/config_validator/report_definitions_test.go
@@ -630,6 +630,129 @@
 	}
 }
 
+func TestValidateExemptFromConsent(t *testing.T) {
+	r := makeValidReportWithType(config.ReportDefinition_UNIQUE_DEVICE_COUNTS)
+	r.ExemptFromConsent = true
+	r.PrivacyLevel = config.ReportDefinition_NO_ADDED_PRIVACY
+	if err := validateExemptFromConsent(r); err == nil {
+		t.Error("Accepted report with no differential privacy and exempt_from_consent set")
+	}
+	r.PrivacyLevel = config.ReportDefinition_LOW_PRIVACY
+	if err := validateExemptFromConsent(r); err != nil {
+		t.Errorf("Rejected report with valid exempt_from_consent: %v", err)
+	}
+}
+
+func TestValidateMaxPrivateIndex(t *testing.T) {
+	m := makeValidMetric(config.MetricDefinition_OCCURRENCE)
+	r := makeValidReportWithType(config.ReportDefinition_UNIQUE_DEVICE_COUNTS)
+	r.PrivacyLevel = config.ReportDefinition_NO_ADDED_PRIVACY
+	if err := validateMaxPrivateIndex(m, r); err != nil {
+		t.Errorf("Rejected report with valid max private index: %v", err)
+	}
+	r.PrivacyLevel = config.ReportDefinition_HIGH_PRIVACY
+	if err := validateMaxPrivateIndex(m, r); err != nil {
+		t.Errorf("Rejected private UNIQUE_DEVICE_COUNTS report with valid max private index: %v", err)
+	}
+	m.MetricDimensions = append(m.MetricDimensions, &config.MetricDefinition_MetricDimension{
+		Dimension:    "Dimension with lots of event codes",
+		MaxEventCode: math.MaxInt32 + 1, // > INT32_MAX
+	})
+	if err := validateMaxPrivateIndex(m, r); err == nil {
+		t.Error("Accepted private UNIQUE_DEVICE_COUNTS report with too many private indices")
+	}
+
+	m = makeValidMetric(config.MetricDefinition_OCCURRENCE)
+	m.MetricDimensions = append(m.MetricDimensions, &config.MetricDefinition_MetricDimension{
+		Dimension:    "Dimension with lots of event codes",
+		MaxEventCode: 1000,
+	})
+	r = makeValidReportWithType(config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS)
+	r.PrivacyLevel = config.ReportDefinition_HIGH_PRIVACY
+	r.NumIndexPoints = 10
+	if err := validateMaxPrivateIndex(m, r); err != nil {
+		t.Errorf("Rejected private FLEETWIDE_OCCURRENCE_COUNTS report with valid max private index: %v", err)
+	}
+	r.NumIndexPoints = math.MaxInt32 / 10 // * 1,000 * 10 will be > INT32_MAX
+	if err := validateMaxPrivateIndex(m, r); err == nil {
+		t.Error("Accepted private FLEETWIDE_OCCURRENCE_COUNTS report with too many private indices")
+	}
+
+	m = makeValidMetric(config.MetricDefinition_INTEGER)
+	m.MetricDimensions = append(m.MetricDimensions, &config.MetricDefinition_MetricDimension{
+		Dimension:    "Dimension with lots of event codes",
+		MaxEventCode: 1000,
+	})
+	r = makeValidReportWithType(config.ReportDefinition_FLEETWIDE_MEANS)
+	r.PrivacyLevel = config.ReportDefinition_HIGH_PRIVACY
+	r.NumIndexPoints = 10
+	if err := validateMaxPrivateIndex(m, r); err != nil {
+		t.Errorf("Rejected private FLEETWIDE_MEANS report with valid max private index: %v", err)
+	}
+	r.NumIndexPoints = math.MaxInt32 / 100 // * 1,000 * 10 will be > INT32_MAX
+	if err := validateMaxPrivateIndex(m, r); err == nil {
+		t.Error("Accepted private FLEETWIDE_MEANS report with too many private indices")
+	}
+
+	r = makeValidReportWithType(config.ReportDefinition_UNIQUE_DEVICE_HISTOGRAMS)
+	r.PrivacyLevel = config.ReportDefinition_HIGH_PRIVACY
+	r.IntBuckets = &config.IntegerBuckets{
+		Buckets: &config.IntegerBuckets_Linear{
+			Linear: &config.LinearIntegerBuckets{
+				NumBuckets: 1000,
+			},
+		},
+	}
+	if err := validateMaxPrivateIndex(m, r); err != nil {
+		t.Errorf("Rejected private UNIQUE_DEVICE_HISTOGRAMS report with valid max private index: %v", err)
+	}
+	r.IntBuckets.GetLinear().NumBuckets = 10000000 // 10,000,000 * 1,000 > INT32_MAX
+	if err := validateMaxPrivateIndex(m, r); err == nil {
+		t.Error("Accepted private UNIQUE_DEVICE_HISTOGRAMS report with too many private indices")
+	}
+
+	m = makeValidMetric(config.MetricDefinition_INTEGER_HISTOGRAM)
+	m.MetricDimensions = append(m.MetricDimensions, &config.MetricDefinition_MetricDimension{
+		Dimension:    "Dimension with lots of event codes",
+		MaxEventCode: 1000,
+	})
+	m.IntBuckets = &config.IntegerBuckets{
+		Buckets: &config.IntegerBuckets_Linear{
+			Linear: &config.LinearIntegerBuckets{
+				NumBuckets: 1000,
+			},
+		},
+	}
+	r = makeValidReportWithType(config.ReportDefinition_FLEETWIDE_HISTOGRAMS)
+	r.PrivacyLevel = config.ReportDefinition_HIGH_PRIVACY
+	r.NumIndexPoints = 1000
+	if err := validateMaxPrivateIndex(m, r); err != nil {
+		t.Errorf("Rejected private FLEETWIDE_HISTOGRAMS report with valid max private index: %v", err)
+	}
+	m.IntBuckets.GetLinear().NumBuckets = 10000 // 10,000 * 1,000 * 1,000 > INT32_MAX
+	if err := validateMaxPrivateIndex(m, r); err == nil {
+		t.Error("Accepted private FLEETWIDE_HISTOGRAMS report with too many private indices")
+	}
+
+	m = makeValidMetric(config.MetricDefinition_STRING)
+	m.MetricDimensions = append(m.MetricDimensions, &config.MetricDefinition_MetricDimension{
+		Dimension:    "Dimension with lots of event codes",
+		MaxEventCode: 1000,
+	})
+	r = makeValidReportWithType(config.ReportDefinition_STRING_COUNTS)
+	r.PrivacyLevel = config.ReportDefinition_HIGH_PRIVACY
+	if err := validateMaxPrivateIndex(m, r); err != nil {
+		t.Errorf("Rejected private STRING_COUNTS report with valid max private index: %v", err)
+	}
+	m.MetricDimensions = append(m.MetricDimensions, &config.MetricDefinition_MetricDimension{
+		Dimension:    "Dimension with lots of event codes",
+		MaxEventCode: 1000000, // 1,000,000 * 1,000 * 50 (string count-min constants) will be > INT32_MAX
+	})
+	if err := validateMaxPrivateIndex(m, r); err == nil {
+		t.Error("Accepted private STRING_COUNTS report with too many private indices")
+	}
+}
+
 func TestValidateNoHourlyReports(t *testing.T) {
 	m := makeValidMetric(config.MetricDefinition_OCCURRENCE)
 	dailyReportTypes := []config.ReportDefinition_ReportType{
@@ -687,6 +810,34 @@
 	if err := validateNoHourlyReports(m, r); err != nil {
 		t.Errorf("Rejected daily FLEETWIDE_OCCURRENCE_COUNTS report with daily reporting interval: %v", err)
 	}
+
+	// Test STRING_COUNTS, which may define a non-hourly reporting
+	// interval.
+	r = makeValidReportWithType(config.ReportDefinition_STRING_COUNTS)
+	// Test unset reporting interval.
+	err = validateNoHourlyReports(m, r)
+	if err == nil {
+		t.Error("Accepted hourly STRING_COUNTS report with default reporting interval")
+	}
+	if !errors.Is(err, errIllegalHourlyReport) {
+		t.Errorf("Incorrect error return type: %v", err)
+	}
+
+	// Test HOURS_1 reporting interval.
+	r.ReportingInterval = config.ReportDefinition_HOURS_1
+	err = validateNoHourlyReports(m, r)
+	if err == nil {
+		t.Error("Accepted hourly STRING_COUNTS report with hourly reporting interval")
+	}
+	if !errors.Is(err, errIllegalHourlyReport) {
+		t.Errorf("Incorrect error return type: %v", err)
+	}
+
+	// Test DAYS_1 reporting interval.
+	r.ReportingInterval = config.ReportDefinition_DAYS_1
+	if err := validateNoHourlyReports(m, r); err != nil {
+		t.Errorf("Rejected daily STRING_COUNTS report with daily reporting interval: %v", err)
+	}
 }
 
 func TestValidateEnabledReportingInterval(t *testing.T) {
@@ -695,31 +846,37 @@
 		config.ReportDefinition_FLEETWIDE_HISTOGRAMS,
 		config.ReportDefinition_FLEETWIDE_MEANS,
 		config.ReportDefinition_HOURLY_VALUE_NUMERIC_STATS,
-		config.ReportDefinition_STRING_COUNTS,
 		config.ReportDefinition_UNIQUE_DEVICE_COUNTS,
 		config.ReportDefinition_UNIQUE_DEVICE_HISTOGRAMS,
 		config.ReportDefinition_UNIQUE_DEVICE_NUMERIC_STATS,
 		config.ReportDefinition_UNIQUE_DEVICE_STRING_COUNTS,
 	}
 
+	supportedReportTypes := []config.ReportDefinition_ReportType{
+		config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
+		config.ReportDefinition_STRING_COUNTS,
+	}
+
 	reportingIntervalValues := []config.ReportDefinition_ReportingInterval{
 		config.ReportDefinition_HOURS_1,
 		config.ReportDefinition_DAYS_1,
 	}
 
-	// Check supported report types.
-	r := makeValidReportWithType(config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS)
+	for _, reportType := range supportedReportTypes {
+		// Check supported report types.
+		r := makeValidReportWithType(reportType)
 
-	// Unset reporting interval is valid.
-	if err := validateReportingInterval(r, true); err != nil {
-		t.Errorf("Rejected report with no reporting interval set: %v", err)
-	}
-
-	// All reporting interval values are valid.
-	for _, reportingInterval := range reportingIntervalValues {
-		r.ReportingInterval = reportingInterval
+		// Unset reporting interval is valid.
 		if err := validateReportingInterval(r, true); err != nil {
-			t.Errorf("Rejected report with valid %v reporting interval set: %v", reportingInterval, err)
+			t.Errorf("Rejected report with no reporting interval set: %v", err)
+		}
+
+		// All reporting interval values are valid.
+		for _, reportingInterval := range reportingIntervalValues {
+			r.ReportingInterval = reportingInterval
+			if err := validateReportingInterval(r, true); err != nil {
+				t.Errorf("Rejected report with valid %v reporting interval set: %v", reportingInterval, err)
+			}
 		}
 	}
 
@@ -802,18 +959,19 @@
 
 func TestValidatePrivacyMechanismAndConfig(t *testing.T) {
 	// Default valid metric uses DEBUG max_release_stage.
+	m := makeValidMetric(config.MetricDefinition_OCCURRENCE)
 	r := makeValidReportWithType(config.ReportDefinition_UNIQUE_DEVICE_COUNTS)
-	if err := validatePrivacyMechanismAndConfig(r); err != nil {
+	if err := validatePrivacyMechanismAndConfig(m, r); err != nil {
 		t.Errorf("Rejected report with no privacy mechanism: %v", err)
 	}
 
 	r.PrivacyMechanism = config.ReportDefinition_DE_IDENTIFICATION
-	if err := validatePrivacyMechanismAndConfig(r); err != nil {
+	if err := validatePrivacyMechanismAndConfig(m, r); err != nil {
 		t.Errorf("Rejected report with privacy mechanism DE_IDENTIFICATION: %v", err)
 	}
 
 	r.PrivacyMechanism = config.ReportDefinition_SHUFFLED_DIFFERENTIAL_PRIVACY
-	if err := validatePrivacyMechanismAndConfig(r); err == nil {
+	if err := validatePrivacyMechanismAndConfig(m, r); err == nil {
 		t.Errorf("Didn't rejected report with privacy mechanism SHUFFLED_DIFFERENTIAL_PRIVACY and empty config: %v", err)
 	}
 
@@ -825,11 +983,36 @@
 			PoissonMean:        1,
 		},
 	}
-	if err := validatePrivacyMechanismAndConfig(r); err != nil {
+	if err := validatePrivacyMechanismAndConfig(m, r); err != nil {
 		t.Errorf("Rejected report with privacy mechanism SHUFFLED_DIFFERENTIAL_PRIVACY and not empty config: %v", err)
 	}
 }
 
+func TestValidateShuffledDpReportPrivacy(t *testing.T) {
+	m := makeValidMetric(config.MetricDefinition_OCCURRENCE)
+	r := makeValidReportWithType(config.ReportDefinition_UNIQUE_DEVICE_COUNTS)
+
+	r.LocalAggregationProcedure = config.ReportDefinition_AT_LEAST_ONCE
+	r.PrivacyMechanism = config.ReportDefinition_SHUFFLED_DIFFERENTIAL_PRIVACY
+	r.PrivacyConfig = &config.ReportDefinition_ShuffledDp{
+		ShuffledDp: &config.ReportDefinition_ShuffledDifferentialPrivacyConfig{
+			Epsilon:            1,
+			Delta:              1e-10,
+			ReportingThreshold: 1000,
+			PoissonMean:        0.085937,
+		},
+	}
+
+	if err := validateReportPoissonMean(m, r); err != nil {
+		t.Errorf("Rejected report with valid privacy parameters with: %v", err)
+	}
+
+	r.PrivacyConfig.(*config.ReportDefinition_ShuffledDp).ShuffledDp.PoissonMean = 0.07
+	if err := validateReportPoissonMean(m, r); err == nil {
+		t.Error("Accepted report with invalid privacy parameters")
+	}
+}
+
 func TestValidateShuffledDpConfigWithValidConfig(t *testing.T) {
 	c := &config.ReportDefinition_ShuffledDifferentialPrivacyConfig{
 		Epsilon:            1,
diff --git a/src/bin/config_parser/src/privacy/accounting.go b/src/bin/config_parser/src/privacy/accounting.go
index 55b4fb0..dd94195 100644
--- a/src/bin/config_parser/src/privacy/accounting.go
+++ b/src/bin/config_parser/src/privacy/accounting.go
@@ -7,13 +7,14 @@
 
 import (
 	"fmt"
-	"gonum.org/v1/gonum/dsp/fourier"
 	"math"
 	"math/cmplx"
+
+	"gonum.org/v1/gonum/dsp/fourier"
 )
 
 // ProbabilityMassFunction represents a mass function of a discrete probability distribution
-// TODO(b/296499681): Consider using only the shiftedDistribution type representation of ProbabilityMassFunction.
+// TODO(https://fxbug.dev/296499681): Consider using only the shiftedDistribution type representation of ProbabilityMassFunction.
 type ProbabilityMassFunction map[int]float64
 
 // PrivacyLossDistribution represents a privacy loss distribution defined by probability mass function that is
@@ -140,7 +141,13 @@
 	// Compute final array length, so it will contain enough space for all coefficients for target discretization
 	minKey1, maxKey1 := minMaxKeyValues(pmf1)
 	minKey2, maxKey2 := minMaxKeyValues(pmf2)
-	finalArrayLength := maxKey1 + maxKey2 + 1 - minKey1 - minKey2
+	minArrayLength := maxKey1 + maxKey2 + 1 - minKey1 - minKey2
+	// The running time of fft operations depends upon the length of the array and
+	// the size of the largest prime divisor of the length of the array. We use
+	// the next factor of 2 to improve performance.
+	//
+	// TODO(https://fxbug.dev/296499492): Find something better similar to scipy.fft.next_fast_len
+	finalArrayLength := int(math.Pow(2, math.Ceil(math.Log2(float64(minArrayLength)))))
 
 	// For applying FFT we convert each map to an array, shifting all values to the smallest value.
 	shiftedDistr1, err := convertMapToShiftedArray(pmf1, finalArrayLength)
@@ -152,8 +159,6 @@
 		return nil, err
 	}
 
-	// TODO(b/296499492): Is it possible to optimize this by choosing a slightly larger array length,
-	// similar to scipy.fft.next_fast_len in Python?
 	fft := fourier.NewFFT(int(finalArrayLength))
 	coeff1 := fft.Coefficients(nil, shiftedDistr1.array)
 	coeff2 := fft.Coefficients(nil, shiftedDistr2.array)
@@ -196,7 +201,7 @@
 		return lowerBound, upperBound
 	}
 
-	// TODO(b/195348085) : Explain the choice of orders.
+	// TODO(https://fxbug.dev/195348085): Explain the choice of orders.
 	for order := -20; order <= 20; order++ {
 		if order == 0 {
 			continue
@@ -204,7 +209,7 @@
 		rescaledOrder := float64(order) / float64(arraySize)
 		// to compute moment-generating function we compute corresponding moment (expectation of e^{o*t})
 		momentValue := 0.0
-		// TODO(b/296499687): Is it possible to use LogSumExp (like scipy.special.logsumexp in python)?
+		// TODO(https://fxbug.dev/296499687): Is it possible to use LogSumExp (like scipy.special.logsumexp in python)?
 		for value, prob := range distrArray {
 			momentValue += math.Exp(float64(value)*rescaledOrder) * prob
 		}
@@ -247,9 +252,13 @@
 
 	// Final array length defines how many Fourier coefficients we will compute.
 	// It should be large enough to store distrArray (the initial array).
-	// TODO(b/296499492): Is it possible to optimize this by choosing a slightly larger array length,
-	// similar to scipy.fft.next_fast_len in Python?
-	finalArrayLength := maxBound - minBound + 1
+	minArrayLength := maxBound - minBound + 1
+	// The running time of fft operations depends upon the length of the array and
+	// the size of the largest prime divisor of the length of the array. We use
+	// the next factor of 2 to improve performance.
+	//
+	// TODO(https://fxbug.dev/296499492): Find something better similar to scipy.fft.next_fast_len
+	finalArrayLength := int(math.Pow(2, math.Ceil(math.Log2(float64(minArrayLength)))))
 
 	// For applying FFT we convert each map to an array, shifting all values to the smallest value.
 	shiftedDistr, err := convertMapToShiftedArray(pmf, finalArrayLength)
@@ -377,7 +386,7 @@
 	newInfiniteMass := pld1.infiniteMass + pld2.infiniteMass - pld1.infiniteMass*pld2.infiniteMass
 	if pld1.pessimisticEstimation {
 		// Truncated mass gets added to infiniteMass.
-		// TODO(b/296500421): Only add the mass that is actually truncated instead of `truncatedMass`.
+		// TODO(https://fxbug.dev/296500421): Only add the mass that is actually truncated instead of `truncatedMass`.
 		newInfiniteMass += truncatedMass
 	}
 
diff --git a/src/bin/config_parser/src/privacy/error_calculator.go b/src/bin/config_parser/src/privacy/error_calculator.go
deleted file mode 100644
index 07d6c78..0000000
--- a/src/bin/config_parser/src/privacy/error_calculator.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package privacy
-
-import (
-	"config"
-	"fmt"
-	"math"
-)
-
-type ErrorCalculator struct {
-	ParamsCalc PrivacyEncodingParamsCalculator
-}
-
-// Public factory method for creating an ErrorCalculator given a
-// PrivacyEncodingParamsCalculator.
-func NewErrorCalculator(paramsCalc PrivacyEncodingParamsCalculator) *ErrorCalculator {
-	return &ErrorCalculator{paramsCalc}
-}
-
-// Public factory method for creating an ErrorCalculator given the file path
-// of the PrivacyEncodingParams.
-func NewErrorCalculatorFromPrivacyParams(privacyParamsPath string) (*ErrorCalculator, error) {
-	paramsCalculator, err := NewPrivacyEncodingParamsCalculator(privacyParamsPath)
-	if err != nil {
-		return nil, err
-	}
-	errorCalculator := NewErrorCalculator(*paramsCalculator)
-	if err != nil {
-		return nil, err
-	}
-	return errorCalculator, nil
-}
-
-// Given a |metric|, |report|, and |params|, estimates the report row error.
-//
-// Row error is temporarily -1 unless |epsilon| is 0 due to deprecated Rappor
-// encoding scheme.
-//
-// TODO(b/278932979): update this comment once error calculation is implemented for
-// Poisson encoding scheme.
-func (e *ErrorCalculator) Estimate(metric *config.MetricDefinition, report *config.ReportDefinition, epsilon float64, population uint64, minDenominatorEstimate uint64) (estimate float64, err error) {
-	if epsilon == 0 {
-		return 0, nil
-	}
-
-	var errorEstimate float64
-	switch report.GetReportType() {
-	case config.ReportDefinition_UNIQUE_DEVICE_HISTOGRAMS:
-		fallthrough
-	case config.ReportDefinition_UNIQUE_DEVICE_COUNTS:
-		fallthrough
-	case config.ReportDefinition_HOURLY_VALUE_HISTOGRAMS:
-		fallthrough
-	case config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS:
-		fallthrough
-	case config.ReportDefinition_FLEETWIDE_HISTOGRAMS:
-		fallthrough
-	case config.ReportDefinition_UNIQUE_DEVICE_NUMERIC_STATS:
-		fallthrough
-	case config.ReportDefinition_HOURLY_VALUE_NUMERIC_STATS:
-		fallthrough
-	case config.ReportDefinition_FLEETWIDE_MEANS:
-		fallthrough
-	default:
-		reportType := config.ReportDefinition_ReportType_name[int32(report.GetReportType())]
-		return -1, fmt.Errorf("error estimation is not supported for reports of type %s", reportType)
-	}
-
-	if math.IsNaN(errorEstimate) || math.IsInf(errorEstimate, 0) {
-		return errorEstimate, fmt.Errorf("error estimation failed to return valid result due to an invalid or missing field")
-	}
-	return errorEstimate, nil
-}
-
-func meanReportConfigurationError(report *config.ReportDefinition, minDenominatorEstimate uint64) error {
-	if minDenominatorEstimate == 0 {
-		return fmt.Errorf("user estimate for lower bound on unnoised denominator required for %s", report.GetReportType())
-	}
-	if report.MaxValue == 0 && report.MinValue == 0 {
-		return fmt.Errorf("MinValue and MaxValue required to estimate error for %s", report.GetReportType())
-	}
-	return nil
-}
diff --git a/src/bin/config_parser/src/privacy/error_calculator_test.go b/src/bin/config_parser/src/privacy/error_calculator_test.go
deleted file mode 100644
index 788ef58..0000000
--- a/src/bin/config_parser/src/privacy/error_calculator_test.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-package privacy
-
-import (
-	"config"
-	"testing"
-
-	"github.com/golang/glog"
-)
-
-// The try-bots expect glog to be imported, but we do not use it.
-var _ = glog.Info
-
-func TestEstimateWithEpsilon(t *testing.T) {
-	eventVectorBufferMax := uint64(5)
-
-	privacyParamsCalculator, err := NewPrivacyEncodingParamsCalculatorForTesting(testParamRecords)
-	if err != nil {
-		t.Fatal("Failed to create PrivacyEncodingParamsCalculator")
-	}
-	errorCalculator := NewErrorCalculator(*privacyParamsCalculator)
-
-	testMetric := config.MetricDefinition{
-		MetricName: "Occurrence",
-		MetricType: config.MetricDefinition_OCCURRENCE,
-	}
-
-	uniqueDeviceCount := config.ReportDefinition{
-		ReportName:                "UniqueDeviceCounts",
-		ReportType:                config.ReportDefinition_UNIQUE_DEVICE_COUNTS,
-		LocalAggregationProcedure: config.ReportDefinition_SELECT_FIRST,
-	}
-	uniqueDeviceCountWithBufferMax := config.ReportDefinition{
-		ReportName:                "UniqueDeviceCountsWithBufferMax",
-		ReportType:                config.ReportDefinition_UNIQUE_DEVICE_COUNTS,
-		LocalAggregationProcedure: config.ReportDefinition_SELECT_FIRST,
-		EventVectorBufferMax:      eventVectorBufferMax,
-	}
-	uniqueDeviceHistogram := config.ReportDefinition{
-		ReportName: "UniqueDeviceHistograms",
-		ReportType: config.ReportDefinition_UNIQUE_DEVICE_HISTOGRAMS,
-	}
-	hourlyValueHistogram := config.ReportDefinition{
-		ReportName: "HourlyValueHistograms",
-		ReportType: config.ReportDefinition_HOURLY_VALUE_HISTOGRAMS,
-	}
-	fleetwideOccurrenceCount := config.ReportDefinition{
-		ReportName: "FleetwideOccurrenceCounts",
-		ReportType: config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
-		MaxValue:   5,
-		MinValue:   1,
-	}
-	fleetwideOccurrenceCountHighMax := config.ReportDefinition{
-		ReportName: "FleetwideOccurrenceCountsWithHighMaxValue",
-		ReportType: config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
-		MaxValue:   20,
-		MinValue:   1,
-	}
-	fleetwideOccurrenceCountNegativeValue := config.ReportDefinition{
-		ReportName: "FleetwideOccurrenceCountsNegativeValue",
-		ReportType: config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
-		MaxValue:   -1,
-		MinValue:   -5,
-	}
-	fleetwideOccurrenceCountWithBufferMax := config.ReportDefinition{
-		ReportName:           "FleetwideOccurrenceCountsWithBufferMax",
-		ReportType:           config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
-		MaxValue:             5,
-		MinValue:             1,
-		EventVectorBufferMax: eventVectorBufferMax,
-	}
-	fleetwideHistogram := config.ReportDefinition{
-		ReportName: "FleetwideHistograms",
-		ReportType: config.ReportDefinition_FLEETWIDE_HISTOGRAMS,
-		MaxCount:   5,
-	}
-	fleetwideHistogramHighMax := config.ReportDefinition{
-		ReportName: "FleetwideHistogramsWithHighMaxCount",
-		ReportType: config.ReportDefinition_FLEETWIDE_HISTOGRAMS,
-		MaxCount:   20,
-	}
-	uniqueDeviceNumericStats := config.ReportDefinition{
-		ReportName: "UniqueDeviceNumericStats",
-		ReportType: config.ReportDefinition_UNIQUE_DEVICE_NUMERIC_STATS,
-		MinValue:   0,
-		MaxValue:   1,
-	}
-	uniqueDeviceNumericStatsWithBufferMax := config.ReportDefinition{
-		ReportName:           "UniqueDeviceNumericStatsWithBufferMax",
-		ReportType:           config.ReportDefinition_UNIQUE_DEVICE_NUMERIC_STATS,
-		MinValue:             0,
-		MaxValue:             1,
-		EventVectorBufferMax: eventVectorBufferMax,
-	}
-	uniqueDeviceNumericStatsMissingMaxValue := config.ReportDefinition{
-		ReportName: "UniqueDeviceNumericStatsMissingMaxValue",
-		ReportType: config.ReportDefinition_UNIQUE_DEVICE_NUMERIC_STATS,
-	}
-	hourlyValueNumericStats := config.ReportDefinition{
-		ReportName: "HourlyValueNumericStats",
-		ReportType: config.ReportDefinition_HOURLY_VALUE_NUMERIC_STATS,
-		MinValue:   0,
-		MaxValue:   1,
-	}
-	fleetwideMeans := config.ReportDefinition{
-		ReportName: "FleetwideMeans",
-		ReportType: config.ReportDefinition_FLEETWIDE_MEANS,
-		MinValue:   0,
-		MaxValue:   5,
-		MaxCount:   5,
-	}
-	fleetwideMeansHighMaxCount := config.ReportDefinition{
-		ReportName: "FleetwideMeansHighMaxCount",
-		ReportType: config.ReportDefinition_FLEETWIDE_MEANS,
-		MinValue:   0,
-		MaxValue:   5,
-		MaxCount:   20,
-	}
-	fleetwideMeansMissingMaxValue := config.ReportDefinition{
-		ReportName: "FleetwideMeansMissingMaxValue",
-		ReportType: config.ReportDefinition_FLEETWIDE_MEANS,
-	}
-	fleetwideHistogramMissingMaxCount := config.ReportDefinition{
-		ReportName: "FleetwideHistogramMissingMaxCount",
-		ReportType: config.ReportDefinition_FLEETWIDE_HISTOGRAMS,
-	}
-	fleetwideOccurrenceCountMissingMaxValue := config.ReportDefinition{
-		ReportName: "FleetwideOccurrenceCounts",
-		ReportType: config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
-	}
-	fleetwideMeansWithBufferMax := config.ReportDefinition{
-		ReportName:           "FleetwideMeansWithBufferMax",
-		ReportType:           config.ReportDefinition_FLEETWIDE_MEANS,
-		MinValue:             0,
-		MaxValue:             5,
-		MaxCount:             5,
-		EventVectorBufferMax: eventVectorBufferMax,
-	}
-	unsupportedReport := config.ReportDefinition{
-		ReportName: "UnsupportedReport",
-	}
-
-	type args struct {
-		metric                 *config.MetricDefinition
-		report                 *config.ReportDefinition
-		epsilon                float64
-		population             uint64
-		minDenominatorEstimate uint64
-	}
-	var tests = []struct {
-		input    args
-		valid    bool
-		expected float64
-	}{
-		// Single contribution reports
-		{args{&testMetric, &uniqueDeviceCount, 1, 10000, 0}, false, -1},
-		{args{&testMetric, &uniqueDeviceCount, 10, 10000, 0}, false, -1},
-		{args{&testMetric, &uniqueDeviceCount, 1, 20000, 0}, false, -1},
-		{args{&testMetric, &uniqueDeviceHistogram, 1, 10000, 0}, false, -1},
-		{args{&testMetric, &uniqueDeviceHistogram, 10, 10000, 0}, false, -1},
-		{args{&testMetric, &uniqueDeviceHistogram, 1, 20000, 0}, false, -1},
-		{args{&testMetric, &hourlyValueHistogram, 1, 10000, 0}, false, -1},
-		{args{&testMetric, &hourlyValueHistogram, 10, 10000, 0}, false, -1},
-		{args{&testMetric, &hourlyValueHistogram, 1, 20000, 0}, false, -1},
-
-		// Multi-contribution reports
-		{args{&testMetric, &fleetwideOccurrenceCount, 1, 10000, 0}, false, -1},
-		{args{&testMetric, &fleetwideOccurrenceCount, 10, 10000, 0}, false, -1},
-		{args{&testMetric, &fleetwideOccurrenceCount, 1, 20000, 0}, false, -1},
-		{args{&testMetric, &fleetwideOccurrenceCountHighMax, 1, 10000, 0}, false, -1},
-		{args{&testMetric, &fleetwideOccurrenceCountNegativeValue, 1, 10000, 0}, false, -1},
-		{args{&testMetric, &fleetwideHistogram, 1, 10000, 0}, false, -1},
-		{args{&testMetric, &fleetwideHistogram, 10, 10000, 0}, false, -1},
-		{args{&testMetric, &fleetwideHistogram, 1, 20000, 0}, false, -1},
-		{args{&testMetric, &fleetwideHistogramHighMax, 1, 10000, 0}, false, -1},
-
-		// Mean reports
-		{args{&testMetric, &uniqueDeviceNumericStats, 1, 10000, 500}, false, -1},
-		{args{&testMetric, &uniqueDeviceNumericStats, 10, 10000, 500}, false, -1},
-		{args{&testMetric, &uniqueDeviceNumericStats, 1, 20000, 500}, false, -1},
-		{args{&testMetric, &uniqueDeviceNumericStats, 1, 10000, 2000}, false, -1},
-		{args{&testMetric, &hourlyValueNumericStats, 1, 10000, 2000}, false, -1},
-		{args{&testMetric, &hourlyValueNumericStats, 10, 10000, 2000}, false, -1},
-		{args{&testMetric, &hourlyValueNumericStats, 1, 20000, 2000}, false, -1},
-		{args{&testMetric, &hourlyValueNumericStats, 1, 10000, 5000}, false, -1},
-
-		{args{&testMetric, &fleetwideMeans, 1, 10000, 500}, false, -1},
-		{args{&testMetric, &fleetwideMeans, 10, 10000, 500}, false, -1},
-		{args{&testMetric, &fleetwideMeans, 1, 20000, 500}, false, -1},
-		{args{&testMetric, &fleetwideMeans, 1, 10000, 2000}, false, -1},
-		{args{&testMetric, &fleetwideMeansHighMaxCount, 1, 10000, 500}, false, -1},
-		{args{&testMetric, &fleetwideMeansHighMaxCount, 10, 10000, 500}, false, -1},
-		{args{&testMetric, &fleetwideMeansHighMaxCount, 1, 20000, 500}, false, -1},
-		{args{&testMetric, &fleetwideMeansHighMaxCount, 1, 10000, 2000}, false, -1},
-
-		{args{&testMetric, &uniqueDeviceCount, 0, 100000, 0}, true, 0},
-
-		// Reports with EventVectorBufferMax set.
-		{args{&testMetric, &uniqueDeviceCountWithBufferMax, 1, 10000, 0}, false, -1},
-		{args{&testMetric, &fleetwideOccurrenceCountWithBufferMax, 1, 10000, 0}, false, -1},
-		{args{&testMetric, &uniqueDeviceNumericStatsWithBufferMax, 1, 10000, 500}, false, -1},
-		{args{&testMetric, &fleetwideMeansWithBufferMax, 1, 10000, 500}, false, -1},
-
-		// Invalid input
-		{args{&testMetric, &uniqueDeviceNumericStats, 1, 10000, 0}, false, 0},
-		{args{&testMetric, &hourlyValueNumericStats, 1, 10000, 0}, false, 0},
-		{args{&testMetric, &uniqueDeviceNumericStatsMissingMaxValue, 1, 10000, 500}, false, 0},
-		{args{&testMetric, &fleetwideMeansMissingMaxValue, 1, 10000, 500}, false, 0},
-		{args{&testMetric, &fleetwideHistogramMissingMaxCount, 1, 10000, 500}, false, 0},
-		{args{&testMetric, &fleetwideOccurrenceCountMissingMaxValue, 1, 10000, 500}, false, 0},
-
-		// This report type is not currently supported.
-		{args{&testMetric, &unsupportedReport, 1, 10000, 0}, false, 0},
-	}
-
-	for _, test := range tests {
-		input := test.input
-		result, err := errorCalculator.Estimate(input.metric, input.report, input.epsilon, input.population, input.minDenominatorEstimate)
-		if test.valid && err != nil {
-			t.Errorf("Estimate failed for report %v: %v", input.report.ReportName, err)
-		} else if !test.valid && err == nil {
-			t.Errorf("Estimate accepted invalid report: %v", input.report.ReportName)
-		} else if test.valid && result != test.expected {
-			t.Errorf("Estimate for report %v (epsilon: %v, population: %v, minDenominator: %v): expected %v, got %v", input.report.ReportName, input.epsilon, input.population, input.minDenominatorEstimate, test.expected, result)
-		}
-	}
-}
diff --git a/src/bin/config_parser/src/privacy/poisson.go b/src/bin/config_parser/src/privacy/poisson.go
new file mode 100644
index 0000000..f008872
--- /dev/null
+++ b/src/bin/config_parser/src/privacy/poisson.go
@@ -0,0 +1,261 @@
+// Copyright 2023 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package privacy
+
+import (
+	"fmt"
+	"math"
+
+	"gonum.org/v1/gonum/mathext"
+)
+
+type poissonDist struct {
+	lambda float64
+}
+
+func newPoissonDist(lambda float64) (*poissonDist, error) {
+	if lambda < 0 {
+		return nil, fmt.Errorf("Poisson parameter must be greater than 0.")
+	}
+	return &poissonDist{lambda: lambda}, nil
+}
+
+// Computes the probability of an output less than or equal to |x|.
+func (d poissonDist) cdf(x float64) float64 {
+	if x < 0 {
+		return 0
+	}
+
+	return mathext.GammaIncRegComp(math.Floor(x+1), d.lambda)
+}
+
+// Computes the probability of an output greater than or equal to |x|.
+//
+// This is equivalent to 1 - d.cdf(x), but may be more numerically stable.
+func (d poissonDist) sf(x float64) float64 {
+	if x < 0 {
+		return 1
+	}
+
+	return mathext.GammaIncReg(math.Floor(x+1), d.lambda)
+}
+
+// Computes the probability of an output of |x| for a poisson distribution with parameter |lambda|.
+func (d poissonDist) pmf(x float64) float64 {
+	if x < 0 || math.Floor(x) != x {
+		return 0
+	}
+
+	// Since the pmf for the Poisson distribution includes division by a factorial
+	// it is hard to compute directly. This is why we compute the logarithm of the
+	// pmf instead.
+	lg, _ := math.Lgamma(math.Floor(x) + 1.0) // Computes log(x!)
+	lpmf := x*math.Log(d.lambda) - d.lambda - lg
+	return math.Exp(lpmf)
+}
+
+// Assuming fn is true for integers in the range [0, n] and false for all
+// integers greater than n. This function will return n.
+func findBoundary(fn func(x float64) bool) float64 {
+	c1, c2 := 0.0, 1.0
+
+	for {
+		if !fn(c2) {
+			break
+		}
+
+		c1 = c2
+		c2 = c2 * 2
+	}
+
+	for c1 != c2 {
+		mid := math.Ceil((c1 + c2) / 2)
+
+		if fn(mid) {
+			c1 = mid
+		} else {
+			c2 = mid - 1
+		}
+	}
+
+	return c1
+}
+
+// Assuming fn(x) is true for lowGuess <= x < n (for some n) and false for
+// x >= n, this function will return some value y such that fn(y) is false and
+// n - precision < y < n + precision.
+func findBoundaryFloat(fn func(x float64) bool, lowGuess float64, highGuess float64, precision float64) float64 {
+	l := lowGuess
+	h := highGuess
+
+	for {
+		if !fn(h) {
+			break
+		}
+
+		l = h
+		h = h * 2
+	}
+
+	for (h - l) > precision {
+		mid := l + (h-l)/2
+		if fn(mid) {
+			l = mid
+		} else {
+			h = mid
+		}
+	}
+
+	return h
+}
+
+// Creates a privacy loss distribution relative to one observation being added.
+//
+// The Poisson mechanism for computing a scalar-valued function f outputs
+// the sum of the true value of the function and a noise drawn from the Poisson
+// distribution. Recall that the Poisson distribution with parameter p has
+// probability density function exp(-p) * p^x / x! at x for any non-negative
+// integer x. We only consider f with sensitivity 1 for classes defined here.
+//
+// The privacy loss distribution of the Poisson mechanism w.r.t. adding one is
+// equivalent to the privacy loss distribution between the Poisson distribution
+// and the same distribution shifted by +1. Specifically, the privacy loss
+// distribution is generated as follows: first pick x according to the Poisson
+// distribution and, let the privacy loss be ln(pmf(x) / pmf(x - 1)), which
+// is equal to ln(p / x).
+func newPoissonAddPld(lambda, discretization, truncation float64) *PrivacyLossDistribution {
+	dist, _ := newPoissonDist(lambda)
+	lower := findBoundary(func(x float64) bool { return dist.cdf(x-1) <= truncation/2 })
+	if lower < 1.0 {
+		lower = 1.0
+	}
+
+	upper := 1 + findBoundary(func(x float64) bool { return (dist.sf(x)) > truncation/2 })
+
+	pmf := make(ProbabilityMassFunction)
+
+	// Upper truncated tail
+	privacyLoss := math.Log(lambda / upper)
+	roundedPrivacyLoss := int(math.Ceil(privacyLoss / discretization))
+	pmf[roundedPrivacyLoss] = dist.sf(upper)
+
+	for x := lower; x <= upper; x++ {
+		privacyLoss := math.Log(lambda / x)
+		roundedPrivacyLoss := int(math.Ceil(privacyLoss / discretization))
+		val, ok := pmf[roundedPrivacyLoss]
+		if !ok {
+			val = 0.0
+		}
+		pmf[roundedPrivacyLoss] = val + dist.pmf(x)
+
+	}
+
+	// Lower truncated tail
+	infiniteMass := dist.cdf(lower - 1)
+
+	return &PrivacyLossDistribution{
+		discretization:        discretization,
+		infiniteMass:          infiniteMass,
+		pmf:                   pmf,
+		pessimisticEstimation: true,
+	}
+}
+
+// Creates a privacy loss distribution relative to one observation being removed.
+//
+// The Poisson mechanism for computing a scalar-valued function f simply outputs
+// the sum of the true value of the function and a noise drawn from the Poisson
+// distribution. Recall that the Poisson distribution with parameter p has
+// probability density function exp(-p) * p^x / x! at x for any non-negative
+// integer x. We only consider f with sensitivity 1 for classes defined here.
+//
+// The privacy loss distribution of the Poisson mechanism w.r.t. removing one is
+// equivalent to the privacy loss distribution between the Poisson distribution
+// and the same distribution shifted by -1. Specifically, the privacy loss
+// distribution is generated as follows: first pick x according to the Poisson
+// distribution. Then, let the privacy loss be ln(pmf(x) / pmf(x + 1)), which
+// is equal to ln((x + 1) / p).
+func newPoissonRemovePld(lambda, discretization, truncation float64) *PrivacyLossDistribution {
+	dist, _ := newPoissonDist(lambda)
+	lower := findBoundary(func(x float64) bool { return dist.cdf(x-2) <= truncation/2 })
+	upper := 1 + findBoundary(func(x float64) bool { return (dist.sf(x - 1)) > truncation/2 })
+
+	pmf := make(ProbabilityMassFunction)
+
+	// Lower truncated tail
+	privacyLoss := math.Log(lower / lambda)
+	roundedPrivacyLoss := int(math.Ceil(privacyLoss / discretization))
+	pmf[roundedPrivacyLoss] = dist.cdf(lower - 2)
+
+	for x := lower; x <= upper; x++ {
+		privacyLoss := math.Log(x / lambda)
+		roundedPrivacyLoss := int(math.Ceil(privacyLoss / discretization))
+		val, ok := pmf[roundedPrivacyLoss]
+		if !ok {
+			val = 0.0
+		}
+		pmf[roundedPrivacyLoss] = val + dist.pmf(x-1)
+	}
+
+	// Upper truncated tail
+	infiniteMass := dist.sf(upper - 1)
+
+	return &PrivacyLossDistribution{
+		discretization:        discretization,
+		infiniteMass:          infiniteMass,
+		pmf:                   pmf,
+		pessimisticEstimation: true,
+	}
+}
+
+// Creates a privacy loss distribution using the specified parameter for the Poisson mechanism.
+func newPoissonPld(lambda float64, sparsity uint64, discretization float64, truncation float64) (*PrivacyLossDistribution, error) {
+	add := newPoissonAddPld(lambda, discretization, truncation)
+	rm := newPoissonRemovePld(lambda, discretization, truncation)
+	pld, err := composeHeterogeneousPLD(add, rm, truncation)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return composeHomogeneousPLD(pld, sparsity, truncation)
+}
+
+type PoissonParameterCalculator struct {
+	discretization float64
+	truncation     float64
+	precision      float64
+}
+
+// Builds a new PoissonParameterCalculator with default discretization and truncation parameters.
+func NewPoissonParameterCalculator() PoissonParameterCalculator {
+	return PoissonParameterCalculator{
+		discretization: 1e-5,
+		truncation:     1e-15,
+		precision:      1e-5,
+	}
+}
+
+// Checks if the provided lambda for the specified sparsity provides privacy at the specified epsilon and delta..
+func (c PoissonParameterCalculator) IsPrivate(lambda float64, sparsity uint64, epsilon float64, delta float64) bool {
+	if lambda == 0 {
+		return false
+	}
+
+	pld, err := newPoissonPld(lambda, sparsity, c.discretization, c.truncation)
+	if err != nil {
+		return false
+	}
+
+	return pld.getDeltaForPrivacyLossDistribution(epsilon) < delta
+}
+
+func (c PoissonParameterCalculator) GetBestPoissonMean(sparsity uint64, epsilon float64, delta float64) float64 {
+	fn := func(x float64) bool {
+		return !c.IsPrivate(x, sparsity, epsilon, delta)
+	}
+
+	return findBoundaryFloat(fn, 0.0, 1.0, c.precision)
+}
diff --git a/src/bin/config_parser/src/privacy/poisson_test.go b/src/bin/config_parser/src/privacy/poisson_test.go
new file mode 100644
index 0000000..5b09f88
--- /dev/null
+++ b/src/bin/config_parser/src/privacy/poisson_test.go
@@ -0,0 +1,198 @@
+// Copyright 2023 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package privacy
+
+import (
+	"math"
+	"testing"
+
+	"github.com/google/go-cmp/cmp"
+)
+
+func cmpFloat() cmp.Option {
+	return cmpFloatDelta(1e-10)
+}
+
+func cmpFloatDelta(precision float64) cmp.Option {
+	return cmp.Comparer(func(x, y float64) bool {
+		return math.Abs(x-y) < precision
+	})
+}
+
+func TestPoissonDist(t *testing.T) {
+	if d, err := newPoissonDist(-1.0); d != nil || err == nil {
+		t.Error("Created a poisson distribution with negative parameter.")
+	}
+
+	lambda := 10.0
+	dist, err := newPoissonDist(lambda)
+	if err != nil {
+		t.Fatalf("Could not create poisson distribution: %v", err)
+	}
+
+	length := 20
+	goldCdf := make([]float64, length)
+	goldPmf := make([]float64, length)
+	gotCdf := make([]float64, length)
+	gotPmf := make([]float64, length)
+
+	for k := int(0); k < length; k++ {
+		gotCdf[k] = dist.cdf(float64(k))
+		gotPmf[k] = dist.pmf(float64(k))
+
+		if k == 0 {
+			goldPmf[0] = math.Exp(-lambda)
+			goldCdf[0] = goldPmf[0]
+		} else {
+			goldPmf[k] = goldPmf[k-1] * lambda / float64(k)
+			goldCdf[k] = goldCdf[k-1] + goldPmf[k]
+		}
+
+	}
+
+	if s := cmp.Diff(goldPmf, gotPmf, cmpFloat()); len(s) > 0 {
+		t.Errorf("goldPmf != gotPmf:\n%s", s)
+	}
+
+	if s := cmp.Diff(goldCdf, gotCdf, cmpFloat()); len(s) > 0 {
+		t.Errorf("goldCdf != gotCdf:\n%s", s)
+	}
+}
+
+func TestFindBoundary(t *testing.T) {
+	expected := 5.0
+	fn := func(x float64) bool { return x <= 5.5 }
+
+	if got := findBoundary(fn); got != expected {
+		t.Errorf("Expected findBoundary(fn) = %v but got %v.", expected, got)
+	}
+}
+
+func getBoundaryFn(boundary float64) func(float64) bool {
+	return func(x float64) bool { return x < boundary }
+}
+
+func TestFindBoundaryFloat(t *testing.T) {
+	low := 0.0
+	high := 10.0
+	precision := 0.01
+
+	expected := 3.0
+
+	x := findBoundaryFloat(getBoundaryFn(expected), low, high, precision)
+	if !cmp.Equal(x, expected, cmpFloatDelta(precision)) {
+		t.Errorf("findBoundaryError expected %v but got %v", expected, x)
+	}
+
+	expected = 20.0
+	x = findBoundaryFloat(getBoundaryFn(expected), low, high, precision)
+	if !cmp.Equal(x, expected, cmpFloatDelta(precision)) {
+		t.Errorf("findBoundaryError expected %v but got %v", expected, x)
+	}
+}
+
+func TestPldAddOne(t *testing.T) {
+	discretization := 0.01
+	truncation := 0.01
+	lambda := 10.0
+	res := newPoissonAddPld(lambda, discretization, truncation)
+
+	if !cmp.Equal(res.discretization, discretization, cmpFloat()) {
+		t.Errorf("PoissonAddPld() discretization got %f, expected %f", res.discretization, discretization)
+	}
+
+	goldInfiniteMass := 0.0027693957155115775
+	if !cmp.Equal(res.infiniteMass, goldInfiniteMass, cmpFloat()) {
+		t.Errorf("PoissonAddPld() infiniteMass got %f, expected %f", res.infiniteMass, goldInfiniteMass)
+	}
+
+	if !res.pessimisticEstimation {
+		t.Errorf("PoissonAddPld() pessimisticEstimation got false")
+	}
+
+	expectedPmf := make(ProbabilityMassFunction)
+	expectedPmf[-64] = 0.007186504603854357
+	expectedPmf[-58] = 0.007091108993195334
+	expectedPmf[-53] = 0.012763996187751505
+	expectedPmf[-47] = 0.021698793519177594
+	expectedPmf[-40] = 0.034718069630684245
+	expectedPmf[-33] = 0.05207710444602615
+	expectedPmf[-26] = 0.07290794622443707
+	expectedPmf[-18] = 0.09478033009176803
+	expectedPmf[-9] = 0.11373639611012128
+	expectedPmf[0] = 0.12511003572113372
+	expectedPmf[11] = 0.12511003572113372
+	expectedPmf[23] = 0.11259903214902009
+	expectedPmf[36] = 0.090079225719216
+	expectedPmf[52] = 0.06305545800345125
+	expectedPmf[70] = 0.03783327480207079
+	expectedPmf[92] = 0.01891663740103538
+	expectedPmf[121] = 0.007566654960414144
+
+	if diff := cmp.Diff(expectedPmf, res.pmf, cmpFloat()); diff != "" {
+		t.Errorf("%v.PoissonAddPld() pmf diff (-want +got):\n%s", res.pmf, diff)
+	}
+}
+
+func TestPldRemoveOne(t *testing.T) {
+	discretization := 0.01
+	truncation := 0.01
+	lambda := 10.0
+	res := newPoissonRemovePld(lambda, discretization, truncation)
+
+	if !cmp.Equal(res.discretization, discretization, cmpFloat()) {
+		t.Errorf("PoissonRemovePld() discretization got %f, expected %f", res.discretization, discretization)
+	}
+
+	goldInfiniteMass := 0.0034543419758568334
+	if !cmp.Equal(res.infiniteMass, goldInfiniteMass, cmpFloat()) {
+		t.Errorf("PoissonRemovePld() infiniteMass got %f, expected %f", res.infiniteMass, goldInfiniteMass)
+	}
+
+	if !res.pessimisticEstimation {
+		t.Errorf("PoissonRemovePld() pessimisticEstimation got false")
+	}
+
+	expectedPmf := make(ProbabilityMassFunction)
+	expectedPmf[-91] = 0.010336050675925721
+	expectedPmf[-69] = 0.01891663740103538
+	expectedPmf[-51] = 0.03783327480207079
+	expectedPmf[-35] = 0.06305545800345125
+	expectedPmf[-22] = 0.090079225719216
+	expectedPmf[-10] = 0.11259903214902009
+	expectedPmf[0] = 0.12511003572113372
+	expectedPmf[10] = 0.12511003572113372
+	expectedPmf[19] = 0.11373639611012128
+	expectedPmf[27] = 0.09478033009176803
+	expectedPmf[34] = 0.07290794622443707
+	expectedPmf[41] = 0.05207710444602615
+	expectedPmf[48] = 0.034718069630684245
+	expectedPmf[54] = 0.021698793519177594
+	expectedPmf[59] = 0.012763996187751505
+	expectedPmf[65] = 0.007091108993195334
+	expectedPmf[70] = 0.003732162627997529
+
+	if diff := cmp.Diff(expectedPmf, res.pmf, cmpFloat()); diff != "" {
+		t.Errorf("%v.PoissonRemovePld() pmf diff (-want +got):\n%s", res.pmf, diff)
+	}
+}
+
+func TestIsPrivate(t *testing.T) {
+	c := NewPoissonParameterCalculator()
+
+	lambda := 85.937
+	epsilon := 1.0
+	delta := 1e-10
+	sparsity := uint64(1)
+
+	if !c.IsPrivate(lambda, sparsity, epsilon, delta) {
+		t.Errorf("IsPrivate unexpectedly false")
+	}
+
+	lambda = lambda - 10
+	if c.IsPrivate(lambda, sparsity, epsilon, delta) {
+		t.Errorf("IsPrivate unexpectedly true")
+	}
+}
diff --git a/src/bin/config_parser/src/privacy/privacy_calculations.go b/src/bin/config_parser/src/privacy/privacy_calculations.go
new file mode 100644
index 0000000..2ab61fa
--- /dev/null
+++ b/src/bin/config_parser/src/privacy/privacy_calculations.go
@@ -0,0 +1,111 @@
+// Copyright 2020 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Calculations needed for privacy enabled reports.
+
+package privacy
+
+import (
+	"config"
+	"fmt"
+)
+
+// Returns the maximum number of event vectors for which a device stores local aggregates.
+// This is either the event_vector_buffer_max value specified in the ReportDefinition, or
+// (if that field is unset) the total number of valid event vectors for the parent metric.
+func GetEventVectorBufferMax(metric *config.MetricDefinition, report *config.ReportDefinition) (bufferMax uint64) {
+	if report.EventVectorBufferMax != 0 {
+		return report.EventVectorBufferMax
+	}
+	return GetNumEventVectors(metric)
+}
+
+// Returns the total number of valid event vectors for a MetricDefinition.
+func GetNumEventVectors(metric *config.MetricDefinition) (numEventVectors uint64) {
+	numEventVectors = 1
+	for _, dim := range metric.MetricDimensions {
+		numEventVectors *= uint64(numEventCodes(dim))
+	}
+	return numEventVectors
+}
+
+// A helper function returning the number of valid event codes for a MetricDimension.
+func numEventCodes(dim *config.MetricDefinition_MetricDimension) (numEventCodes uint32) {
+	if dim.MaxEventCode != 0 {
+		return dim.MaxEventCode + 1
+	}
+	return uint32(len(dim.EventCodes))
+}
+
+// Returns the number of histogram buckets in an IntegerBuckets.
+func GetNumHistogramBuckets(buckets *config.IntegerBuckets) (numBuckets uint64, err error) {
+	switch buckets.Buckets.(type) {
+	case *config.IntegerBuckets_Exponential:
+		numBuckets, err = uint64(buckets.GetExponential().GetNumBuckets()), nil
+	case *config.IntegerBuckets_Linear:
+		numBuckets, err = uint64(buckets.GetLinear().GetNumBuckets()), nil
+	case nil:
+		err = fmt.Errorf("IntegerBuckets type not set")
+	default:
+		err = fmt.Errorf("unexpected IntegerBuckets type")
+	}
+	return numBuckets, err
+}
+
+// Returns the dimensions of a CountMin sketch for a report of type StringCounts, or an error if
+// the report is of a different type. Currently these dimensions are hard-coded.
+func GetCountMinSketchDimensionsForReport(report *config.ReportDefinition) (numCellsPerHash int32, numHashes int32, err error) {
+	switch report.ReportType {
+	case config.ReportDefinition_STRING_COUNTS,
+		config.ReportDefinition_UNIQUE_DEVICE_STRING_COUNTS:
+		{
+			numCellsPerHash = 10
+			numHashes = 5
+			err = nil
+		}
+	default:
+		err = fmt.Errorf("expected report of type StringCounts or UniqueDeviceStringCounts, found %v",
+			report.ReportType)
+	}
+	return numCellsPerHash, numHashes, err
+}
+
+// Returns the total number of possible private index values for a report.
+func GetNumPrivateIndices(metric *config.MetricDefinition, report *config.ReportDefinition) (numPrivateIndices uint64, err error) {
+	numPrivateIndices = 0
+	err = nil
+	switch report.GetReportType() {
+	case config.ReportDefinition_UNIQUE_DEVICE_COUNTS:
+		numPrivateIndices = GetNumEventVectors(metric)
+	case config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
+		config.ReportDefinition_HOURLY_VALUE_NUMERIC_STATS,
+		config.ReportDefinition_UNIQUE_DEVICE_NUMERIC_STATS:
+		numPrivateIndices = GetNumEventVectors(metric) * uint64(report.GetNumIndexPoints())
+	case config.ReportDefinition_FLEETWIDE_MEANS:
+		numPrivateIndices = 2 * GetNumEventVectors(metric) * uint64(report.GetNumIndexPoints())
+	case config.ReportDefinition_HOURLY_VALUE_HISTOGRAMS,
+		config.ReportDefinition_UNIQUE_DEVICE_HISTOGRAMS:
+		var numBuckets uint64
+		numBuckets, err = GetNumHistogramBuckets(report.GetIntBuckets())
+		numPrivateIndices = GetNumEventVectors(metric) * numBuckets
+	case config.ReportDefinition_FLEETWIDE_HISTOGRAMS:
+		var numBuckets uint64
+		switch metric.GetMetricType() {
+		case config.MetricDefinition_INTEGER:
+			numBuckets, err = GetNumHistogramBuckets(report.GetIntBuckets())
+			numPrivateIndices = GetNumEventVectors(metric) * numBuckets * uint64(report.GetNumIndexPoints())
+		case config.MetricDefinition_INTEGER_HISTOGRAM:
+			numBuckets, err = GetNumHistogramBuckets(metric.GetIntBuckets())
+			numPrivateIndices = GetNumEventVectors(metric) * numBuckets * uint64(report.GetNumIndexPoints())
+		}
+	case config.ReportDefinition_STRING_COUNTS,
+		config.ReportDefinition_UNIQUE_DEVICE_STRING_COUNTS:
+		var numCellsPerHash, numHashes int32
+		numCellsPerHash, numHashes, err = GetCountMinSketchDimensionsForReport(report)
+		numPrivateIndices = uint64(numCellsPerHash) * uint64(numHashes) * GetNumEventVectors(metric)
+	default:
+		err = fmt.Errorf("unexpected report type %v", report.ReportType)
+	}
+	return numPrivateIndices, err
+}
diff --git a/src/bin/config_parser/src/privacy/privacy_calculations_test.go b/src/bin/config_parser/src/privacy/privacy_calculations_test.go
new file mode 100644
index 0000000..d567c2e
--- /dev/null
+++ b/src/bin/config_parser/src/privacy/privacy_calculations_test.go
@@ -0,0 +1,61 @@
+// Copyright 2020 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package privacy
+
+import (
+	"config"
+	"testing"
+
+	"github.com/golang/glog"
+)
+
+// The try-bots expect glog to be imported, but we do not use it.
+var _ = glog.Info
+
+func TestGetEventVectorBufferMax(t *testing.T) {
+	var eventVectorBufferMax uint64 = 5
+	var maxEventCode uint32 = 2
+
+	dimensionMaxEventCode := config.MetricDefinition_MetricDimension{
+		Dimension:    "dim0",
+		MaxEventCode: maxEventCode,
+	}
+	dimensionNoMaxEventCode := config.MetricDefinition_MetricDimension{
+		Dimension:  "dim1",
+		EventCodes: map[uint32]string{0: "event0", 1: "event1", 5: "event5"},
+	}
+	metricDimensions := []*config.MetricDefinition_MetricDimension{
+		&dimensionMaxEventCode,
+		&dimensionNoMaxEventCode,
+	}
+	metricWithDimensions := config.MetricDefinition{
+		MetricName:       "MetricWithDimensions",
+		MetricDimensions: metricDimensions,
+	}
+
+	reportMaxSet := config.ReportDefinition{
+		ReportName:           "ReportWithEventVectorBufferMaxSet",
+		EventVectorBufferMax: eventVectorBufferMax,
+	}
+	reportMaxUnset := config.ReportDefinition{
+		ReportName: "ReportWithEventVectorBufferMaxUnset",
+	}
+	var tests = []struct {
+		metric   *config.MetricDefinition
+		report   *config.ReportDefinition
+		expected uint64
+	}{
+		{&metricWithDimensions, &reportMaxSet, uint64(eventVectorBufferMax)},
+		{&metricWithDimensions, &reportMaxUnset, uint64((maxEventCode + 1) * 3)},
+	}
+	for _, test := range tests {
+		result := GetEventVectorBufferMax(test.metric, test.report)
+		if result != test.expected {
+			t.Errorf("eventVectorBufferMax() for metric %s and report %s: expected %d, got %d",
+				test.metric.MetricName, test.report.ReportName, test.expected, result)
+		}
+	}
+
+}
diff --git a/src/bin/config_parser/src/privacy/privacy_encoding_params.go b/src/bin/config_parser/src/privacy/privacy_encoding_params.go
index 2b57f6c..3f9142c 100644
--- a/src/bin/config_parser/src/privacy/privacy_encoding_params.go
+++ b/src/bin/config_parser/src/privacy/privacy_encoding_params.go
@@ -8,257 +8,61 @@
 // Reports that use the Poisson encoding scheme must manually specify privacy
 // encoding parameters.
 //
-// TODO(b/278932979): update this comment once Poisson encoding parameters are
+// TODO(https://fxbug.dev/278932979): update this comment once Poisson encoding parameters are
 // populated by the registry parser.
 
 package privacy
 
 import (
 	"config"
-	"encoding/csv"
 	"fmt"
-	"os"
-	"sort"
-	"strconv"
 )
 
-// The parameters which the PrivacyEncoder needs in order to encode observations for a report.
-// The config parser will copy these values into the ReportDefinition for the corresponding report.
-//
-// TODO(b/278932979): update this struct once Poisson encoding parameters are
-// populated by the registry parser.
-type PrivacyEncodingParams struct {
+// Holds the privacy parameters for a single private report.
+type PrivacyParams struct {
+	sparsity  uint64
+	epsilon   float64
+	delta     float64
+	lambda    float64
+	threshold uint32
 }
 
-// Constants needed in order to map the PrivacyLevel of a report to a PrivacyEncodingParams.
-type privacyConstants struct {
-	// The target shuffled epsilon corresponding to each Cobalt PrivacyLevel.
-	EpsilonForPrivacyLevel map[config.ReportDefinition_PrivacyLevel]float64
-	// The estimated population size.
-	population uint64
-}
-
-// The key used to look up the PrivacyEncodingParams for a given report from a precomputed table.
-// These values are derived from the ReportDefinition and a privacyConstants.
-type paramsMapKey struct {
-	epsilon    float64
-	population uint64
-	sparsity   uint64
-}
-
-// A container for the sets of values which may appear in a paramsMapKey.
-// Each list should be sorted in increasing order.
-type paramsKeyLists struct {
-	epsilons    []float64
-	populations []uint64
-	sparsities  []uint64
-}
-
-// Maintains a lookup table |paramMap| mapping paramsMapKeys to PrivacyEncodingParams.
-// Given a ReportDefinition, finds the best-match key and returns the corresponding PrivacyEncodingParams.
-type PrivacyEncodingParamsCalculator struct {
-	Constants privacyConstants
-	// The lookup table.
-	paramMap map[paramsMapKey]PrivacyEncodingParams
-	// The epsilon values, population sizes, and sparsities which are mapped in |paramMap|.
-	// Any element of the cross product of the lists in |mapped| should be a valid key of |paramMap|.
-	mapped paramsKeyLists
-}
-
-// Returns a privacyConstants struct with hard-coded values.
-// TODO(b/278917650): Decide how these values should be configured.
-func makePrivacyConstants() (pc privacyConstants) {
-	pc.EpsilonForPrivacyLevel = map[config.ReportDefinition_PrivacyLevel]float64{
-		config.ReportDefinition_LOW_PRIVACY:    10.0,
-		config.ReportDefinition_MEDIUM_PRIVACY: 5.0,
-		config.ReportDefinition_HIGH_PRIVACY:   1.0,
-	}
-	pc.population = 10000
-	return pc
-}
-
-// Public factory method for creating a PrivacyEncodingParamsCalculator from records stored in CSV files.
-//
-// The file at |paramPath| should contain records of form {epsilon, population, sparsity, prob_bit_flip, num_index_points}.
-//
-// TODO(b/278932979): update this comment once Poisson encoding parameters are
-// populated by the registry parser.
-func NewPrivacyEncodingParamsCalculator(paramPath string) (calc *PrivacyEncodingParamsCalculator, err error) {
-	paramRecords, err := readFromCsvFile(paramPath)
+func GetPrivacyParamsForReport(m *config.MetricDefinition, r *config.ReportDefinition) (params *PrivacyParams, err error) {
+	sparsity, err := getSparsityForReport(m, r)
 	if err != nil {
-		return calc, err
+		return
 	}
 
-	return newPrivacyEncodingParamsCalculatorFromRecords(paramRecords)
-}
-
-// Alternative public factory method for creating a PrivacyEncodingParamsCalculator in unit tests.
-func NewPrivacyEncodingParamsCalculatorForTesting(paramRecords [][]string) (calc *PrivacyEncodingParamsCalculator, err error) {
-	return newPrivacyEncodingParamsCalculatorFromRecords(paramRecords)
-}
-
-// Private factory method for creating a PrivacyEncodingParamsCalculator from in-memory records.
-// See NewPrivacyEncodingParamsCalculator() for the expected format of the records.
-func newPrivacyEncodingParamsCalculatorFromRecords(paramRecords [][]string) (calc *PrivacyEncodingParamsCalculator, err error) {
-	pc := makePrivacyConstants()
-
-	m, lists, err := mapPrivacyEncodingParams(paramRecords)
-	if err != nil {
-		return calc, err
-	}
-
-	return &PrivacyEncodingParamsCalculator{Constants: pc, paramMap: m, mapped: lists}, nil
-}
-
-// Reads in records from a csv file.
-func readFromCsvFile(path string) (records [][]string, err error) {
-	info, err := os.Stat(path)
-	if err != nil {
-		return records, err
-	}
-
-	if !info.Mode().IsRegular() {
-		return records, fmt.Errorf("%v is not a file.", path)
-	}
-
-	f, err := os.Open(path)
-	if err != nil {
-		return records, err
-	}
-
-	file_reader := csv.NewReader(f)
-	records, err = file_reader.ReadAll()
-	if err != nil {
-		return records, err
-	}
-
-	return records, nil
-}
-
-// Parses |records| and returns a lookup table |m| mapping paramsMapKeys to PrivacyEncodingParams.
-// Also returns a paramsKeyLists containing lists of the mapped epsilons, population sizes, and sparsities,
-// with each list sorted in increasing order.
-func mapPrivacyEncodingParams(records [][]string) (m map[paramsMapKey]PrivacyEncodingParams, lists paramsKeyLists, err error) {
-	m = make(map[paramsMapKey]PrivacyEncodingParams)
-
-	epsilons := make(map[float64]bool)
-	populations := make(map[uint64]bool)
-	sparsities := make(map[uint64]bool)
-
-	for _, record := range records {
-		key, params, err := parsePrivacyEncodingRecord(record)
-		if err != nil {
-			return m, lists, err
-		}
-		m[key] = params
-
-		epsilons[key.epsilon] = true
-		populations[key.population] = true
-		sparsities[key.sparsity] = true
-	}
-
-	lists = paramsKeyLists{}
-	sortAndStoreKeysFloat64(epsilons, &lists.epsilons)
-	sortAndStoreKeysUint64(populations, &lists.populations)
-	sortAndStoreKeysUint64(sparsities, &lists.sparsities)
-
-	return m, lists, nil
-}
-
-// Parses a record into a paramsMapKey and a PrivacyEncodingParams. The expected format of a record is:
-// {epsilon, population, sparsity, prob_bit_flip, num_index_points}.
-//
-// TODO(b/278932979): update this comment once Poisson encoding parameters are
-// populated by the registry parser.
-func parsePrivacyEncodingRecord(record []string) (key paramsMapKey, params PrivacyEncodingParams, err error) {
-	if len(record) != 5 {
-		return key, params, fmt.Errorf("wrong record size: %d", len(record))
-	}
-	epsilon, err := strconv.ParseFloat(record[0], 64)
-	if err != nil {
-		return key, params, err
-	}
-	population, err := strconv.ParseUint(record[1], 10, 64)
-	if err != nil {
-		return key, params, err
-	}
-	sparsity, err := strconv.ParseUint(record[2], 10, 64)
-	if err != nil {
-		return key, params, err
-	}
-	key.epsilon = epsilon
-	key.population = population
-	key.sparsity = sparsity
-
-	return key, params, nil
-}
-
-// Extracts the keys from |m|, sorts them in increasing order, and stores them in |vals|.
-func sortAndStoreKeysFloat64(m map[float64]bool, vals *[]float64) {
-	*vals = make([]float64, 0, len(m))
-	for key, _ := range m {
-		*vals = append(*vals, key)
-	}
-	sort.Float64s(*vals)
-}
-
-// Extracts the keys from |m|, sorts them in increasing order, and stores them in |vals|.
-func sortAndStoreKeysUint64(m map[uint64]bool, vals *[]uint64) {
-	*vals = make([]uint64, 0, len(m))
-	for key, _ := range m {
-		*vals = append(*vals, key)
-	}
-	sort.Slice(*vals, func(i, j int) bool { return (*vals)[i] < (*vals)[j] })
-}
-
-// GetPrivacyEncodingParamsForReport looks up the corresponding PrivacyEncodingParams from |calc|'s
-// paramMap, given a |metric| and |report|.
-//
-// If paramMap does not have a key which exactly matches the values drawn from |metric|, |report|,
-// and |calc.constants|, then parameters are returned for the closest key which provides at least as
-// much privacy as is required by |report|'s PrivacyLevel. See getBestMappedKey for more details.
-func (calc *PrivacyEncodingParamsCalculator) GetPrivacyEncodingParamsForReport(metric *config.MetricDefinition, report *config.ReportDefinition) (params PrivacyEncodingParams, err error) {
-	epsilon, ok := calc.Constants.EpsilonForPrivacyLevel[report.PrivacyLevel]
+	privacyConfig, ok := r.PrivacyConfig.(*config.ReportDefinition_ShuffledDp)
 	if !ok {
-		return params, fmt.Errorf("no epsilon found for privacy level: %v", report.PrivacyLevel)
+		err = fmt.Errorf("Expected PrivacyConfig to be ShuffledDp, but instead got: %T", r.PrivacyConfig)
 	}
+	c := privacyConfig.ShuffledDp
 
-	sparsity, err := getSparsityForReport(metric, report)
-	if err != nil {
-		return params, err
+	params = &PrivacyParams{
+		sparsity:  sparsity,
+		epsilon:   c.Epsilon,
+		delta:     c.Delta,
+		lambda:    c.PoissonMean * float64(c.ReportingThreshold),
+		threshold: c.ReportingThreshold,
 	}
-
-	rangeSize, err := GetIntegerRangeSizeForReport(report)
-	if err != nil {
-		return params, err
-	}
-
-	return calc.GetPrivacyEncodingParams(epsilon, calc.Constants.population, sparsity, rangeSize)
-	if err != nil {
-		return params, err
-	}
-
-	return params, err
+	return
 }
 
-// Given an |epsilon|, |population|, and |sparsity|, looks up the corresponding
-// PrivacyEncodingParams from |calc|'s paramMap.
-//
-// If paramMap does not have a key which exactly matches the tuple (|epsilon|, |population|, |sparsity|),
-// then parameters are returned for the closest key which provides |epsilon|-differential privacy
-// (in the shuffled model) or better. See getBestMappedKey for more details.
-func (calc *PrivacyEncodingParamsCalculator) GetPrivacyEncodingParams(epsilon float64, population uint64, sparsity uint64, rangeSize uint64) (params PrivacyEncodingParams, err error) {
-	key, err := getBestMappedKey(epsilon, population, sparsity, &calc.mapped)
+// Validates that the specified lambda is sufficient for the desired privacy level.
+func (p PrivacyParams) Validate() error {
+	calc := NewPoissonParameterCalculator()
+	if calc.IsPrivate(p.lambda, p.sparsity, p.epsilon, p.delta) {
+		return nil
+	}
+
+	pld, err := newPoissonPld(p.lambda, p.sparsity, calc.discretization, calc.truncation)
 	if err != nil {
-		return params, err
+		return fmt.Errorf("newPoissonPld: %v", err)
 	}
 
-	params, ok := calc.paramMap[key]
-	if !ok {
-		return params, fmt.Errorf("no params found for key: (epsilon=%f, population=%d, sparsity=%d)", key.epsilon, key.population, key.sparsity)
-	}
-
-	return params, nil
+	delta := pld.getDeltaForPrivacyLossDistribution(p.epsilon)
+	return fmt.Errorf("poisson_mean is too small resulting in a delta of %v", delta)
 }
 
 // Returns the number of valid integer values for |report|. For FleetwideOccurrenceCounts,
@@ -320,7 +124,7 @@
 		return sparsity, err
 	}
 
-	numBuckets, err := getNumBucketsPerEventVector(metric, report)
+	numBuckets, err := getPerEventVectorSparsity(metric, report)
 	if err != nil {
 		return sparsity, err
 	}
@@ -338,7 +142,7 @@
 			config.ReportDefinition_SELECT_MOST_COMMON:
 			numEventVectors, err = 1, nil
 		case config.ReportDefinition_AT_LEAST_ONCE:
-			numEventVectors, err = getEventVectorBufferMax(metric, report), nil
+			numEventVectors, err = GetEventVectorBufferMax(metric, report), nil
 		default:
 			err = fmt.Errorf("unexpected LocalAggregationProcedure: %v", report.LocalAggregationProcedure)
 		}
@@ -352,7 +156,7 @@
 		config.ReportDefinition_HOURLY_VALUE_NUMERIC_STATS,
 		config.ReportDefinition_STRING_COUNTS,
 		config.ReportDefinition_UNIQUE_DEVICE_STRING_COUNTS:
-		numEventVectors, err = getEventVectorBufferMax(metric, report), nil
+		numEventVectors, err = GetEventVectorBufferMax(metric, report), nil
 
 	default:
 		err = fmt.Errorf("unsupported ReportType: %v", report.ReportType)
@@ -360,43 +164,16 @@
 	return numEventVectors, err
 }
 
-// Returns the maximum number of event vectors for which a device stores local aggregates.
-// This is either the event_vector_buffer_max value specified in the ReportDefinition, or
-// (if that field is unset) the total number of valid event vectors for the parent metric.
-func getEventVectorBufferMax(metric *config.MetricDefinition, report *config.ReportDefinition) (bufferMax uint64) {
-	if report.EventVectorBufferMax != 0 {
-		return report.EventVectorBufferMax
-	}
-	return numEventVectors(metric)
-}
-
-// Returns the total number of valid event vectors for a MetricDefinition.
-func numEventVectors(metric *config.MetricDefinition) (numEventVectors uint64) {
-	numEventVectors = 1
-	for _, dim := range metric.MetricDimensions {
-		numEventVectors *= uint64(numEventCodes(dim))
-	}
-	return numEventVectors
-}
-
-// A helper function returning the number of valid event codes for a MetricDimension.
-func numEventCodes(dim *config.MetricDefinition_MetricDimension) (numEventCodes uint32) {
-	if dim.MaxEventCode != 0 {
-		return dim.MaxEventCode + 1
-	}
-	return uint32(len(dim.EventCodes))
-}
-
 // Returns the max number of buckets which may be populated in a contribution for |report| for each event vector defined in |metric|.
 // Returns 1 for reports for which a contribution consists of a single integer per event vector.
-func getNumBucketsPerEventVector(metric *config.MetricDefinition, report *config.ReportDefinition) (numBuckets uint64, err error) {
+func getPerEventVectorSparsity(metric *config.MetricDefinition, report *config.ReportDefinition) (numBuckets uint64, err error) {
 	switch metric.MetricType {
 	case config.MetricDefinition_OCCURRENCE:
 		numBuckets, err = 1, nil
 	case config.MetricDefinition_INTEGER:
 		switch report.ReportType {
 		case config.ReportDefinition_FLEETWIDE_HISTOGRAMS:
-			numBuckets, err = getNumHistogramBuckets(report.IntBuckets)
+			numBuckets, err = GetNumHistogramBuckets(report.IntBuckets)
 		// An Observation for FLEETWIDE_MEANS is equivalent to a histogram with 2 buckets per event code:
 		// one representing the sum, the other representing the count.
 		case config.ReportDefinition_FLEETWIDE_MEANS:
@@ -409,135 +186,36 @@
 			numBuckets, err = 1, nil
 		}
 	case config.MetricDefinition_INTEGER_HISTOGRAM:
-		numBuckets, err = getNumHistogramBuckets(metric.IntBuckets)
+		numBuckets, err = GetNumHistogramBuckets(metric.IntBuckets)
 	case config.MetricDefinition_STRING:
-		numCellsPerHash, numHashes, err := getCountMinSketchDimensionsForReport(report)
-		if err != nil {
-			return numBuckets, err
-		}
-		if report.StringBufferMax != 0 && int32(report.StringBufferMax) < numCellsPerHash {
-			numBuckets, err = uint64(report.StringBufferMax)*uint64(numHashes), nil
-		} else {
-			numBuckets, err = uint64(numCellsPerHash*numHashes), nil
-		}
+		numBuckets, err = getPerEventVectorSparsityForString(report)
 	default:
 		err = fmt.Errorf("unsupported metric type %v", metric.MetricType)
 	}
 	return numBuckets, err
 }
 
-// Returns the number of histogram buckets in an IntegerBuckets.
-func getNumHistogramBuckets(buckets *config.IntegerBuckets) (numBuckets uint64, err error) {
-	switch buckets.Buckets.(type) {
-	case *config.IntegerBuckets_Exponential:
-		numBuckets, err = uint64(buckets.GetExponential().GetNumBuckets()), nil
-	case *config.IntegerBuckets_Linear:
-		numBuckets, err = uint64(buckets.GetLinear().GetNumBuckets()), nil
-	case nil:
-		err = fmt.Errorf("IntegerBuckets type not set")
-	default:
-		err = fmt.Errorf("unexpected IntegerBuckets type")
-	}
-	return numBuckets, err
-}
-
-// Returns the dimensions of a CountMin sketch for a report of type StringCounts, or an error if
-// the report is of a different type. Currently these dimensions are hard-coded.
-func getCountMinSketchDimensionsForReport(report *config.ReportDefinition) (numCellsPerHash int32, numHashes int32, err error) {
-	switch report.ReportType {
-	case config.ReportDefinition_STRING_COUNTS,
-		config.ReportDefinition_UNIQUE_DEVICE_STRING_COUNTS:
-		{
-			numCellsPerHash = 10
-			numHashes = 5
-			err = nil
-		}
-	default:
-		err = fmt.Errorf("expected report of type StringCounts or UniqueDeviceStringCounts, found %v",
+func getPerEventVectorSparsityForString(report *config.ReportDefinition) (sparsity uint64, err error) {
+	if report.ReportType != config.ReportDefinition_STRING_COUNTS && report.ReportType != config.ReportDefinition_UNIQUE_DEVICE_STRING_COUNTS {
+		return 0, fmt.Errorf("expected report of type StringCounts or UniqueDeviceStringCounts, found %v",
 			report.ReportType)
 	}
-	return numCellsPerHash, numHashes, err
-}
 
-func (calc *PrivacyEncodingParamsCalculator) GetStringSketchParameters(report *config.ReportDefinition) (params *config.StringSketchParameters, err error) {
-	numCellsPerHash, numHashes, err := getCountMinSketchDimensionsForReport(report)
-	if err != nil {
-		return nil, err
+	if report.StringSketchParams == nil {
+		return 0, fmt.Errorf("string_sketch_params must be set.")
 	}
 
-	return &config.StringSketchParameters{
-		NumCellsPerHash: numCellsPerHash,
-		NumHashes:       numHashes,
-	}, nil
-}
-
-// Returns the paramsMapKey{e, p, s} with the following properties:
-// - |e| is the greatest mapped epsilon value which is *less than or equal* to |epsilon|
-// - |p| is the greatest mapped population value which is *less than or equal to* |population|
-// - |s| is the least mapped sparsity value which is *greater than or equal to* |sparsity|
-// or returns an error if this is not possible (e.g. |epsilon| is smaller than all mapped epsilon values).
-//
-// This ensures that the returned parameters will provide |epsilon| (shuffled) differential privacy, or better.
-func getBestMappedKey(epsilon float64, population uint64, sparsity uint64, mapped *paramsKeyLists) (key paramsMapKey, err error) {
-	e, err := mapped.getBestMappedEpsilon(epsilon)
-	if err != nil {
-		return key, err
+	numHashes := report.StringSketchParams.NumHashes
+	numCellsPerHash := report.StringSketchParams.NumCellsPerHash
+	if numHashes <= 0 || numCellsPerHash <= 0 {
+		return 0, fmt.Errorf("num_hashes and num_cells_per_hash must be set to positive values for string reports with privacy.")
 	}
 
-	p, err := mapped.getBestMappedPopulation(population)
-	if err != nil {
-		return key, err
-	}
-
-	s, err := mapped.getBestMappedSparsity(sparsity)
-	if err != nil {
-		return key, err
-	}
-
-	return paramsMapKey{e, p, s}, nil
-}
-
-// If |epsilon| is smaller than the least epsilon value in |lists|, returns a non-nil error.
-// Otherwise, the returned |mappedEpsilon| is the greatest epsilon value in |lists| which is less than or equal to |epsilon|.
-func (lists *paramsKeyLists) getBestMappedEpsilon(epsilon float64) (mappedEpsilon float64, err error) {
-	if len(lists.epsilons) == 0 {
-		return mappedEpsilon, fmt.Errorf("list of mapped epsilon values is empty")
-	}
-	i := sort.Search(len(lists.epsilons), func(i int) bool { return lists.epsilons[i] > epsilon })
-
-	if i > 0 {
-		return lists.epsilons[i-1], nil
+	if report.StringBufferMax != 0 && int32(report.StringBufferMax) < numCellsPerHash {
+		sparsity, err = uint64(report.StringBufferMax)*uint64(numHashes), nil
 	} else {
-		return mappedEpsilon, fmt.Errorf("input epsilon %v is outside the valid range", epsilon)
+		sparsity, err = uint64(numCellsPerHash*numHashes), nil
 	}
-}
 
-// If |population| is smaller than the least population value in |lists|, returns a non-nil error.
-// Otherwise, the returned |mappedPopulation| is the greatest population value in |lists| which is less than or equal to |population|.
-func (lists *paramsKeyLists) getBestMappedPopulation(population uint64) (mappedPopulation uint64, err error) {
-	if len(lists.populations) == 0 {
-		return mappedPopulation, fmt.Errorf("list of mapped population values is empty")
-	}
-	i := sort.Search(len(lists.populations), func(i int) bool { return lists.populations[i] > population })
-
-	if i > 0 {
-		return lists.populations[i-1], nil
-	} else {
-		return mappedPopulation, fmt.Errorf("input population %v is outside the valid range", population)
-	}
-}
-
-// If |sparsity| is larger than the greatest sparsity value in |lists|, returns a non-nil error.
-// Otherwise, the returned |mappedSparsity| is the least sparsity value in |lists| which is greater than or equal to |sparsity|.
-func (lists *paramsKeyLists) getBestMappedSparsity(sparsity uint64) (mappedSparsity uint64, err error) {
-	if len(lists.sparsities) == 0 {
-		return mappedSparsity, fmt.Errorf("list of mapped sparsity values is empty")
-	}
-	i := sort.Search(len(lists.sparsities), func(i int) bool { return lists.sparsities[i] >= sparsity })
-
-	if i < len(lists.sparsities) {
-		return lists.sparsities[i], nil
-	} else {
-		return mappedSparsity, fmt.Errorf("input sparsity %v is outside the valid range", sparsity)
-	}
+	return
 }
diff --git a/src/bin/config_parser/src/privacy/privacy_encoding_params_test.go b/src/bin/config_parser/src/privacy/privacy_encoding_params_test.go
index ca0a52c..ad5bde2 100644
--- a/src/bin/config_parser/src/privacy/privacy_encoding_params_test.go
+++ b/src/bin/config_parser/src/privacy/privacy_encoding_params_test.go
@@ -14,74 +14,6 @@
 // The try-bots expect glog to be imported, but we do not use it.
 var _ = glog.Info
 
-var testParamRecords = [][]string{
-	{"1.0", "10000", "1", "0.0024182694032788277", "9"},
-	{"1.0", "20000", "1", "0.0013450710102915764", "10"},
-	{"1.0", "10000", "2", "0.004249398596584797", "7"},
-	{"1.0", "20000", "2", "0.002368534915149212", "9"},
-	{"1.0", "10000", "10", "0.019537480548024178", "4"},
-	{"1.0", "20000", "10", "0.011127800680696964", "5"},
-	{"5.0", "10000", "1", "0.000906200148165226", "12"},
-	{"5.0", "20000", "1", "0.000491277314722538", "15"},
-	{"5.0", "10000", "2", "0.0009743086993694305", "12"},
-	{"5.0", "20000", "2", "0.0005256505683064461", "14"},
-	{"5.0", "10000", "10", "0.0014028092846274376", "10"},
-	{"5.0", "20000", "10", "0.0007524723187088966", "13"},
-}
-
-func TestParsePrivacyEncodingRecord(t *testing.T) {
-	var tests = []struct {
-		input          []string
-		valid          bool
-		expectedKey    paramsMapKey
-		expectedParams PrivacyEncodingParams
-	}{
-		// Valid records:
-		{[]string{"1.0", "160000", "1", "0.00022002216428518295", "19"}, true, paramsMapKey{1.0, 160000, 1}, PrivacyEncodingParams{}},
-		{[]string{"1.0", "2560000", "1", "1.821480691432953e-05", "44"}, true, paramsMapKey{1.0, 2560000, 1}, PrivacyEncodingParams{}},
-
-		// Invalid records:
-		// Wrong number of columns.
-		{[]string{"1.0", "2560000", "1", "1.821480691432953e-05"}, false, paramsMapKey{}, PrivacyEncodingParams{}},
-		{[]string{"1.0", "2560000", "1", "1.821480691432953e-05", "44", "42"}, false, paramsMapKey{}, PrivacyEncodingParams{}},
-		// Unparseable entry.
-		{[]string{"1.0", "2560000.1234", "1", "1.821480691432953e-05", "44"}, false, paramsMapKey{}, PrivacyEncodingParams{}},
-	}
-	for _, test := range tests {
-		key, params, err := parsePrivacyEncodingRecord(test.input)
-		if test.valid && err != nil {
-			t.Errorf("parsePrivacyEncodingRecord(%v) failed: %v", test.input, err)
-		} else if !test.valid && err == nil {
-			t.Errorf("parsePrivacyEncodingRecord accepted invalid input: %v", test.input)
-		} else if test.valid && (key != test.expectedKey || params != test.expectedParams) {
-			t.Errorf("parsePrivacyEncodingRecord(%v): expected (%v, %v), got (%v, %v)", test.input, test.expectedKey, test.expectedParams, key, params)
-		}
-	}
-}
-
-func TestMapPrivacyEncodingParams(t *testing.T) {
-	m, lists, err := mapPrivacyEncodingParams(testParamRecords)
-	if err != nil {
-		t.Errorf("Failed to parse and map records from list.")
-	}
-
-	// Check that the lookup table has the expected size.
-	if len(m) != len(testParamRecords) {
-		t.Errorf("Expected %v elements in lookup table, found %v", len(testParamRecords), len(m))
-	}
-
-	// Check that the lists of mapped values have the expected sizes.
-	if len(lists.epsilons) != 2 {
-		t.Errorf("Expected 2 mapped epsilon values, found %v", len(lists.epsilons))
-	}
-	if len(lists.populations) != 2 {
-		t.Errorf("Expected 2 mapped population values, found %v", len(lists.populations))
-	}
-	if len(lists.sparsities) != 3 {
-		t.Errorf("Expected 3 mapped sparsity values, found %v", len(lists.sparsities))
-	}
-}
-
 func TestGetIntegerRangeForReport(t *testing.T) {
 	var minValue int64 = -5
 	var maxValue int64 = 10
@@ -186,59 +118,11 @@
 	}
 }
 
-func TestGetEventVectorBufferMax(t *testing.T) {
-	var eventVectorBufferMax uint64 = 5
-	var maxEventCode uint32 = 2
-
-	dimensionMaxEventCode := config.MetricDefinition_MetricDimension{
-		Dimension:    "dim0",
-		MaxEventCode: maxEventCode,
-	}
-	dimensionNoMaxEventCode := config.MetricDefinition_MetricDimension{
-		Dimension:  "dim1",
-		EventCodes: map[uint32]string{0: "event0", 1: "event1", 5: "event5"},
-	}
-	metricDimensions := []*config.MetricDefinition_MetricDimension{
-		&dimensionMaxEventCode,
-		&dimensionNoMaxEventCode,
-	}
-	metricWithDimensions := config.MetricDefinition{
-		MetricName:       "MetricWithDimensions",
-		MetricDimensions: metricDimensions,
-	}
-
-	reportMaxSet := config.ReportDefinition{
-		ReportName:           "ReportWithEventVectorBufferMaxSet",
-		EventVectorBufferMax: eventVectorBufferMax,
-	}
-	reportMaxUnset := config.ReportDefinition{
-		ReportName: "ReportWithEventVectorBufferMaxUnset",
-	}
-	var tests = []struct {
-		metric   *config.MetricDefinition
-		report   *config.ReportDefinition
-		expected uint64
-	}{
-		{&metricWithDimensions, &reportMaxSet, uint64(eventVectorBufferMax)},
-		{&metricWithDimensions, &reportMaxUnset, uint64((maxEventCode + 1) * 3)},
-	}
-	for _, test := range tests {
-		result := getEventVectorBufferMax(test.metric, test.report)
-		if result != test.expected {
-			t.Errorf("eventVectorBufferMax() for metric %s and report %s: expected %d, got %d",
-				test.metric.MetricName, test.report.ReportName, test.expected, result)
-		}
-	}
-
-}
-
 func TestGetSparsityForReport(t *testing.T) {
 	var eventVectorBufferMax uint64 = 5
 	var stringBufferMax uint32 = 3
 	var numLinearBuckets uint32 = 7
 	var maxEventCode uint32 = 2
-
-	// The hard-coded dimensions of a CountMin sketch for StringCounts reports.
 	var numCellsPerHash uint64 = 10
 	var numHashes uint64 = 5
 
@@ -247,6 +131,11 @@
 		Buckets: &config.IntegerBuckets_Linear{&linearBuckets},
 	}
 
+	stringSketchParams := config.StringSketchParameters{
+		NumHashes:       int32(numHashes),
+		NumCellsPerHash: int32(numCellsPerHash),
+	}
+
 	dimension := config.MetricDefinition_MetricDimension{
 		Dimension:    "dim0",
 		MaxEventCode: maxEventCode,
@@ -346,22 +235,26 @@
 		ReportType:           config.ReportDefinition_STRING_COUNTS,
 		EventVectorBufferMax: eventVectorBufferMax,
 		StringBufferMax:      stringBufferMax,
+		StringSketchParams:   &stringSketchParams,
 	}
 	stringCountsReportWithNoStringBufferMax := config.ReportDefinition{
 		ReportName:           "StringCountsWithNoStringBufferMax",
 		ReportType:           config.ReportDefinition_STRING_COUNTS,
 		EventVectorBufferMax: eventVectorBufferMax,
+		StringSketchParams:   &stringSketchParams,
 	}
 	uniqueDeviceStringCountsReport := config.ReportDefinition{
 		ReportName:           "UniqueDeviceStringCounts",
 		ReportType:           config.ReportDefinition_UNIQUE_DEVICE_STRING_COUNTS,
 		EventVectorBufferMax: eventVectorBufferMax,
 		StringBufferMax:      stringBufferMax,
+		StringSketchParams:   &stringSketchParams,
 	}
 	uniqueDeviceStringCountsReportWithNoStringBufferMax := config.ReportDefinition{
 		ReportName:           "UniqueDeviceStringCountsWithNoStringBufferMax",
 		ReportType:           config.ReportDefinition_UNIQUE_DEVICE_STRING_COUNTS,
 		EventVectorBufferMax: eventVectorBufferMax,
+		StringSketchParams:   &stringSketchParams,
 	}
 	unsetReportTypeReport := config.ReportDefinition{
 		ReportName: "UnsetReportType",
@@ -420,356 +313,3 @@
 		}
 	}
 }
-
-func TestGetBestMappedEpsilon(t *testing.T) {
-	goodLists := paramsKeyLists{epsilons: []float64{1.0, 5.0, 10.0}}
-	emptyLists := paramsKeyLists{}
-
-	type args struct {
-		epsilon float64
-		lists   paramsKeyLists
-	}
-	var tests = []struct {
-		input    args
-		valid    bool
-		expected float64
-	}{
-		// Valid input:
-		{args{1.0, goodLists}, true, 1.0},
-		{args{2.0, goodLists}, true, 1.0},
-		{args{10.0, goodLists}, true, 10.0},
-		{args{11.0, goodLists}, true, 10.0},
-
-		// Invalid input:
-		// |epsilon| is smaller than all elements of |lists.epsilons|.
-		{args{0.5, goodLists}, false, 0.0},
-		// |lists.epsilons| is empty.
-		{args{1.0, emptyLists}, false, 0.0},
-	}
-	for _, test := range tests {
-		result, err := test.input.lists.getBestMappedEpsilon(test.input.epsilon)
-		if test.valid && err != nil {
-			t.Errorf("getBestMappedEpsilon(%v) failed for epsilon list %v with error: %v", test.input.epsilon, test.input.lists.epsilons, err)
-		} else if !test.valid && err == nil {
-			t.Errorf("getBestMappedEpsilon accepted invalid input %v for epsilon list %v", test.input.epsilon, test.input.lists.epsilons)
-		} else if test.valid && result != test.expected {
-			t.Errorf("getBestMapped(%v) for epsilon list %v: expected %v, got %v", test.input.epsilon, test.input.lists.epsilons, test.expected, result)
-		}
-	}
-}
-
-func TestGetBestMappedPopulation(t *testing.T) {
-	goodLists := paramsKeyLists{populations: []uint64{10000, 50000, 100000}}
-	emptyLists := paramsKeyLists{}
-
-	type args struct {
-		population uint64
-		lists      paramsKeyLists
-	}
-	var tests = []struct {
-		input    args
-		valid    bool
-		expected uint64
-	}{
-		// Valid input:
-		{args{10000, goodLists}, true, 10000},
-		{args{20000, goodLists}, true, 10000},
-		{args{100000, goodLists}, true, 100000},
-		{args{110000, goodLists}, true, 100000},
-
-		// Invalid input:
-		// |population| is smaller than all elements of |lists.populations|.
-		{args{5000, goodLists}, false, 0},
-		// |vals| is empty.
-		{args{10000, emptyLists}, false, 0},
-	}
-	for _, test := range tests {
-		result, err := test.input.lists.getBestMappedPopulation(test.input.population)
-		if test.valid && err != nil {
-			t.Errorf("getBestMappedPopulation(%v) failed for population list %v with error: %v", test.input.population, test.input.lists.populations, err)
-		} else if !test.valid && err == nil {
-			t.Errorf("getBestMappedPopulation accepted invalid input %v for population list %v", test.input.population, test.input.lists.populations)
-		} else if test.valid && result != test.expected {
-			t.Errorf("getBestMappedPopulation(%v) for population list %v: expected %v, got %v", test.input.population, test.input.lists.populations, test.expected, result)
-		}
-	}
-}
-
-func TestGetBestMappedSparsity(t *testing.T) {
-	goodLists := paramsKeyLists{sparsities: []uint64{1, 5, 10}}
-	emptyLists := paramsKeyLists{}
-
-	type args struct {
-		sparsity uint64
-		lists    paramsKeyLists
-	}
-	var tests = []struct {
-		input    args
-		valid    bool
-		expected uint64
-	}{
-		// Valid input:
-		{args{0, goodLists}, true, 1},
-		{args{1, goodLists}, true, 1},
-		{args{2, goodLists}, true, 5},
-		{args{10, goodLists}, true, 10},
-
-		// Invalid input:
-		// |sparsity| is larger than all elements of |lists.sparsities|.
-		{args{11, goodLists}, false, 0},
-		// |lists.sparsities| is empty.
-		{args{1, emptyLists}, false, 0},
-	}
-	for _, test := range tests {
-		result, err := test.input.lists.getBestMappedSparsity(test.input.sparsity)
-		if test.valid && err != nil {
-			t.Errorf("getBestMappedSparsity(%v) failed for population list %v with error: %v", test.input.sparsity, test.input.lists.sparsities, err)
-		} else if !test.valid && err == nil {
-			t.Errorf("getBestMappedSparsity accepted invalid input %v for sparsity list %v", test.input.sparsity, test.input.lists.sparsities)
-		} else if test.valid && result != test.expected {
-			t.Errorf("getBestMappedSparsity(%v) for sparsity list %v: expected %v, got %v", test.input.sparsity, test.input.lists.sparsities, test.expected, result)
-		}
-	}
-}
-
-func TestGetBestMappedKey(t *testing.T) {
-	type args struct {
-		epsilon    float64
-		population uint64
-		sparsity   uint64
-		mapped     *paramsKeyLists
-	}
-	var vals = paramsKeyLists{[]float64{1.0, 5.0, 10.0}, []uint64{10000, 20000, 40000, 80000}, []uint64{1, 2, 3, 4, 5, 10}}
-	var tests = []struct {
-		input    args
-		valid    bool
-		expected paramsMapKey
-	}{
-		// Valid args:
-		// All args are present in a mapped key.
-		{args{1.0, 10000, 1, &vals}, true, paramsMapKey{1.0, 10000, 1}},
-		{args{5.0, 40000, 3, &vals}, true, paramsMapKey{5.0, 40000, 3}},
-		{args{10.0, 80000, 10, &vals}, true, paramsMapKey{10.0, 80000, 10}},
-		// Some arg is not present in a mapped key, but the input is valid.
-		{args{2.0, 48000, 6, &vals}, true, paramsMapKey{1.0, 40000, 10}},
-		{args{6.0, 60000, 9, &vals}, true, paramsMapKey{5.0, 40000, 10}},
-		{args{12.0, 80001, 1, &vals}, true, paramsMapKey{10.0, 80000, 1}},
-
-		// Invalid args:
-		// Epsilon is too small.
-		{args{0.5, 40000, 10, &vals}, false, paramsMapKey{}},
-		// Population is too small.
-		{args{6.0, 5000, 10, &vals}, false, paramsMapKey{}},
-		// Sparsity is too large.
-		{args{6.0, 40000, 11, &vals}, false, paramsMapKey{}},
-	}
-	for _, test := range tests {
-		result, err := getBestMappedKey(test.input.epsilon, test.input.population, test.input.sparsity, test.input.mapped)
-		if test.valid && err != nil {
-			t.Errorf("getBestMappedKey(%v) failed: %v", test.input, err)
-		} else if !test.valid && err == nil {
-			t.Errorf("getBestMappedKey accepted invalid input: %v", test.input)
-		} else if test.valid && result != test.expected {
-			t.Errorf("getBestMappedKey(%v): expected %v, got %v", test.input, test.expected, result)
-		}
-	}
-}
-
-func TestGetPrivacyEncodingParams(t *testing.T) {
-	calc, err := NewPrivacyEncodingParamsCalculatorForTesting(testParamRecords)
-	if err != nil {
-		t.Errorf("NewPrivacyEncodingParamsCalculatorForTesting failed with valid input. Error message: %v", err)
-	}
-
-	type args struct {
-		epsilon    float64
-		population uint64
-		sparsity   uint64
-		rangeSize  uint64
-	}
-	var tests = []struct {
-		input    args
-		valid    bool
-		expected PrivacyEncodingParams
-	}{
-		// Valid input:
-		// The best-match key is {1.0, 10000, 10}.
-		{args{1.0, 15000, 5, 4}, true, PrivacyEncodingParams{}},
-		// The best-match key is {5.0, 10000, 1}.
-		{args{10.0, 15000, 1, 12}, true, PrivacyEncodingParams{}},
-		// The rangeSize is smaller than numIndexPoints.
-		{args{10.0, 15000, 1, 6}, true, PrivacyEncodingParams{}},
-
-		// Invalid input:
-		// The target epsilon is smaller than all mapped epsilons.
-		{args{0.5, 10000, 1, 1}, false, PrivacyEncodingParams{}},
-		// The target population is smaller than all mapped populations.
-		{args{1.0, 5000, 1, 1}, false, PrivacyEncodingParams{}},
-		// The target sparsity is larger than all mapped sparsities.
-		{args{1.0, 10000, 100, 1}, false, PrivacyEncodingParams{}},
-	}
-	for _, test := range tests {
-		result, err := calc.GetPrivacyEncodingParams(test.input.epsilon, test.input.population, test.input.sparsity, test.input.rangeSize)
-		if test.valid && err != nil {
-			t.Errorf("GetPrivacyEncodingParams(%v, %v, %v) failed: %v", test.input.epsilon, test.input.population, test.input.sparsity, err)
-		} else if !test.valid && err == nil {
-			t.Errorf("GetPrivacyEncodingParams() accepted invalid input: (%v, %v, %v)", test.input.epsilon, test.input.population, test.input.sparsity)
-		} else if test.valid && result != test.expected {
-			t.Errorf("GetPrivacyEncodingParams(%v, %v, %v): expected %v, got %v", test.input.epsilon, test.input.population, test.input.sparsity, test.expected, result)
-		}
-	}
-}
-
-func TestGetPrivacyEncodingParamsForReport(t *testing.T) {
-	calc, err := NewPrivacyEncodingParamsCalculatorForTesting(testParamRecords)
-	if err != nil {
-		t.Errorf("NewPrivacyEncodingParamsCalculatorForTesting failed with valid input. Error message: %v", err)
-	}
-
-	var eventVectorBufferMax uint64 = 5
-	var minValue int64 = 0
-	var smallMaxValue int64 = 1
-	var largeMaxValue int64 = 100
-	var smallMaxCount uint64 = 1
-	var largeMaxCount uint64 = 100
-	var numLinearBuckets uint32 = 2
-
-	linearBuckets := config.LinearIntegerBuckets{NumBuckets: numLinearBuckets}
-	buckets := config.IntegerBuckets{
-		Buckets: &config.IntegerBuckets_Linear{&linearBuckets},
-	}
-
-	// Metrics
-	occurrenceMetric := config.MetricDefinition{
-		MetricType: config.MetricDefinition_OCCURRENCE,
-	}
-	integerMetric := config.MetricDefinition{
-		MetricType: config.MetricDefinition_INTEGER,
-	}
-
-	// Reports
-	highPrivacyAtLeastOnceReport := config.ReportDefinition{
-		ReportName:                "HighPrivacyAtLeastOnceReport",
-		ReportType:                config.ReportDefinition_UNIQUE_DEVICE_COUNTS,
-		PrivacyLevel:              config.ReportDefinition_HIGH_PRIVACY,
-		LocalAggregationProcedure: config.ReportDefinition_AT_LEAST_ONCE,
-		EventVectorBufferMax:      eventVectorBufferMax,
-	}
-	lowPrivacySelectFirstReport := config.ReportDefinition{
-		ReportName:                "LowPrivacySelectFirstReport",
-		ReportType:                config.ReportDefinition_UNIQUE_DEVICE_COUNTS,
-		PrivacyLevel:              config.ReportDefinition_LOW_PRIVACY,
-		LocalAggregationProcedure: config.ReportDefinition_SELECT_FIRST,
-		EventVectorBufferMax:      eventVectorBufferMax,
-	}
-	smallRangeSizeFleetwideOccurrenceCountsReport := config.ReportDefinition{
-		ReportName:           "smallRangeSizeFleetwideOccurrenceCountsReport",
-		ReportType:           config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
-		PrivacyLevel:         config.ReportDefinition_LOW_PRIVACY,
-		MinValue:             minValue,
-		MaxValue:             smallMaxValue,
-		EventVectorBufferMax: eventVectorBufferMax,
-	}
-	noMaxValueFleetwideOccurrenceCountsReport := config.ReportDefinition{
-		ReportName:           "noMaxValueFleetwideOccurrenceCountsReport",
-		ReportType:           config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
-		PrivacyLevel:         config.ReportDefinition_LOW_PRIVACY,
-		MinValue:             0,
-		MaxValue:             0,
-		EventVectorBufferMax: eventVectorBufferMax,
-	}
-	largeRangeSizeFleetwideOccurrenceCountsReport := config.ReportDefinition{
-		ReportName:           "largeRangeSizeFleetwideOccurrenceCountsReport",
-		ReportType:           config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS,
-		PrivacyLevel:         config.ReportDefinition_LOW_PRIVACY,
-		MinValue:             minValue,
-		MaxValue:             largeMaxValue,
-		EventVectorBufferMax: eventVectorBufferMax,
-	}
-	smallRangeSizeFleetwideHistogramsReport := config.ReportDefinition{
-		ReportName:           "smallRangeSizeFleetwideHistogramsReport",
-		ReportType:           config.ReportDefinition_FLEETWIDE_HISTOGRAMS,
-		IntBuckets:           &buckets,
-		PrivacyLevel:         config.ReportDefinition_LOW_PRIVACY,
-		MaxCount:             smallMaxCount,
-		EventVectorBufferMax: eventVectorBufferMax,
-	}
-	largeRangeSizeFleetwideHistogramsReport := config.ReportDefinition{
-		ReportName:           "largeRangeSizeFleetwideHistogramsReport",
-		ReportType:           config.ReportDefinition_FLEETWIDE_HISTOGRAMS,
-		IntBuckets:           &buckets,
-		PrivacyLevel:         config.ReportDefinition_LOW_PRIVACY,
-		MaxCount:             largeMaxCount,
-		EventVectorBufferMax: eventVectorBufferMax,
-	}
-	unsetReportTypeReport := config.ReportDefinition{
-		ReportName: "UnsetReportTypeReport",
-	}
-
-	type args struct {
-		metric *config.MetricDefinition
-		report *config.ReportDefinition
-	}
-	var tests = []struct {
-		input    args
-		valid    bool
-		expected PrivacyEncodingParams
-	}{
-		// Valid input:
-		// The target epsilon is 1.0 and the sparsity is |eventCodeBufferMax|. The best-match key is
-		// {1.0, 10000, 10}. The report has type UniqueDeviceCounts, so the NumIndexPoints field of
-		// PrivacyEncodingParams is set to 1.
-		{args{&occurrenceMetric, &highPrivacyAtLeastOnceReport}, true,
-			PrivacyEncodingParams{}},
-		// The target epsilon is 10.0 and the sparsity is 1. The best-match key is {5.0, 10000, 1}.
-		// The report has type UniqueDeviceCounts, so the NumIndexPoints field of PrivacyEncodingParams is
-		// set to 1.
-		{args{&occurrenceMetric, &lowPrivacySelectFirstReport}, true,
-			PrivacyEncodingParams{}},
-		// The target epsilon is 10.0 and the sparsity is |eventCodeBufferMax|.
-		// The best-match key is {5.0, 10000, 10}.
-		// The integer range size of the report (= |smallMaxValue|) is smaller than the number of index
-		// points computed by |calc|, so the returned NumIndexPoints should be equal to |smallMaxValue| + 1.
-		{args{&occurrenceMetric, &smallRangeSizeFleetwideOccurrenceCountsReport}, true,
-			PrivacyEncodingParams{}},
-		// The target epsilon is 10.0 and the sparsity is |eventCodeBufferMax|.
-		// The best-match key is {5.0, 10000, 10}.
-		// The integer range size of the report (= 1) is smaller than the number of index
-		// points computed by |calc|, so the returned NumIndexPoints should be equal to 1.
-		{args{&occurrenceMetric, &noMaxValueFleetwideOccurrenceCountsReport}, true,
-			PrivacyEncodingParams{}},
-		// The target epsilon is 10.0 and the sparsity is |eventCodeBufferMax|.
-		// The best-match key is {5.0, 10000, 10}.
-		// The integer range size of the report (= largeMaxValue) is larger than the number of index points
-		// computed by |calc|, so the returned NumIndexPoints is given by the value at the best-match key.
-		{args{&occurrenceMetric, &largeRangeSizeFleetwideOccurrenceCountsReport}, true,
-			PrivacyEncodingParams{}},
-		// The target epsilon is 10.0 and the sparsity is |eventCodeBufferMax| * |numLinearBuckets|.
-		// The best-match key is {5.0, 10000, 10}.
-		// The integer range size of the report (= |smallMaxCount|) is smaller than the number of index
-		// points computed by |calc|, so the returned NumIndexPoints should be equal to |smallMaxCount + 1|.
-		{args{&integerMetric, &smallRangeSizeFleetwideHistogramsReport}, true,
-			PrivacyEncodingParams{}},
-		// The target epsilon is 10.0 and the sparsity is |eventCodeBufferMax| * |numLinearBuckets|.
-		// The best-match key is {5.0, 10000, 10}.
-		// The integer range size of the report (= |largeMaxCount|) is larger than the number of index
-		// points computed by |calc|, so the returned NumIndexPoints is given by the value at the best-match
-		// key.
-		{args{&integerMetric, &largeRangeSizeFleetwideHistogramsReport}, true,
-			PrivacyEncodingParams{}},
-
-		// Invalid input:
-		// This report does not have a report type set.
-		{args{&occurrenceMetric, &unsetReportTypeReport}, false, PrivacyEncodingParams{}},
-	}
-	for _, test := range tests {
-		result, err := calc.GetPrivacyEncodingParamsForReport(test.input.metric, test.input.report)
-		if test.valid && err != nil {
-			t.Errorf("GetPrivacyEncodingParamsForReport() failed for report %v: %v", test.input.report.ReportName, err)
-		} else if !test.valid && err == nil {
-			t.Errorf("GetPrivacyEncodingParamsForReport() accepted invalid report: %v", test.input.report.ReportName)
-		} else if test.valid && result != test.expected {
-			t.Errorf("GetPrivacyEncodingParamsForReport() for report %v: expected %v, got %v", test.input.report.ReportName, test.expected, result)
-		}
-	}
-}
diff --git a/src/bin/config_parser/src/source_generator/dep.go b/src/bin/config_parser/src/source_generator/dep.go
index a4464c1..b556a6e 100644
--- a/src/bin/config_parser/src/source_generator/dep.go
+++ b/src/bin/config_parser/src/source_generator/dep.go
@@ -14,7 +14,7 @@
 // Write a depfile listing the files in 'inputFiles' to the specified 'w' Writer.
 func writeDepFile(formats, inputFiles []string, generateFilename func(string) string, w io.Writer) error {
 	// Since all targets share the same dependencies, we only need to output one.
-	// TODO(b/278917650): Generate one line per output file since different builds might need different generated files.
+	// TODO(https://fxbug.dev/278917650): Generate one line per output file since different builds might need different generated files.
 	if len(formats) == 0 {
 		return nil
 	}
diff --git a/src/bin/config_parser/src/source_generator/json.go b/src/bin/config_parser/src/source_generator/json.go
index 22019dd..ee02725 100644
--- a/src/bin/config_parser/src/source_generator/json.go
+++ b/src/bin/config_parser/src/source_generator/json.go
@@ -8,12 +8,9 @@
 import (
 	"config"
 	"encoding/json"
-	"privacy"
 )
 
-type jsonOutputter struct {
-	errorCalculator *privacy.ErrorCalculator
-}
+type jsonOutputter struct{}
 
 // JSON export structure
 // go tags set the field name when exported to JSON data.
@@ -48,27 +45,19 @@
 	Reports             []jsonReport `json:"reports"`
 }
 
-type jsonError map[string]jsonErrorEstimate
-
 type jsonReport struct {
-	Name                                 string    `json:"name"`
-	Id                                   uint32    `json:"id"`
-	ReportType                           string    `json:"report_type"`
-	ReportTypeId                         int32     `json:"report_type_id"`
-	PrivacyLevel                         string    `json:"privacy_level"`
-	ErrorEstimates                       jsonError `json:"error_estimates"`
-	EventVectorBufferMax                 uint64    `json:"event_vector_buffer_max"`
-	StringBufferMax                      uint32    `json:"string_buffer_max"`
-	SystemProfileField                   []string  `json:"system_profile_field"`
-	LocalAggregationPeriod               int32     `json:"local_aggregation_period"`
-	LocalAggregationProcedure            string    `json:"local_aggregation_procedure"`
-	LocalAggregationProcedurePercentileN uint32    `json:"local_aggregation_procedure_percentile_n"`
-	ExperimentId                         []int64   `json:"experiment_id"`
-}
-
-type jsonErrorEstimate struct {
-	Epsilon   float64            `json:"epsilon"`
-	Estimates map[uint64]float64 `json:"estimates"`
+	Name                                 string   `json:"name"`
+	Id                                   uint32   `json:"id"`
+	ReportType                           string   `json:"report_type"`
+	ReportTypeId                         int32    `json:"report_type_id"`
+	PrivacyLevel                         string   `json:"privacy_level"`
+	EventVectorBufferMax                 uint64   `json:"event_vector_buffer_max"`
+	StringBufferMax                      uint32   `json:"string_buffer_max"`
+	SystemProfileField                   []string `json:"system_profile_field"`
+	LocalAggregationPeriod               int32    `json:"local_aggregation_period"`
+	LocalAggregationProcedure            string   `json:"local_aggregation_procedure"`
+	LocalAggregationProcedurePercentileN uint32   `json:"local_aggregation_procedure_percentile_n"`
+	ExperimentId                         []int64  `json:"experiment_id"`
 }
 
 // Generates a list of evenly distributed integer values
@@ -82,48 +71,6 @@
 	return elements
 }
 
-func (jo *jsonOutputter) makeErrorEstimates(report *config.ReportDefinition, metric *config.MetricDefinition, populations []uint64) (estimate jsonError) {
-	if jo.errorCalculator == nil {
-		// Error calculator not provided; error estimation skipped.
-		return nil
-	}
-
-	var estimates = jsonError{}
-	for l, n := range config.ReportDefinition_PrivacyLevel_name {
-		if n == "PRIVACY_LEVEL_UNKNOWN" || n == "NO_ADDED_PRIVACY" {
-			continue
-		}
-		level := config.ReportDefinition_PrivacyLevel(l)
-		epsilon := jo.errorCalculator.ParamsCalc.Constants.EpsilonForPrivacyLevel[level]
-
-		var err error
-		values := make(map[uint64]float64, len(populations))
-		for _, population := range populations {
-			// TODO(b/228513924): Support minDenominatorEstimates. Currently any report type requiring
-			// a denominator estimate will return an error and not be included.
-			var errorValue float64
-			errorValue, err = jo.errorCalculator.Estimate(metric, report, epsilon, population, 0)
-			if err != nil {
-				break
-			}
-			values[population] = errorValue
-		}
-		if err != nil {
-			// Error estimates must compute successfully for every population value otherwise the
-			// estimates for this epsilon are not included.
-			continue
-		}
-		estimates[level.String()] = jsonErrorEstimate{
-			Epsilon:   epsilon,
-			Estimates: values,
-		}
-	}
-	if len(estimates) == 0 {
-		return nil
-	}
-	return estimates
-}
-
 // JSON struct constructors
 func (jo *jsonOutputter) makeJSONReport(report *config.ReportDefinition, metric *config.MetricDefinition) jsonReport {
 	if report == nil {
@@ -135,16 +82,12 @@
 		systemProfileField = append(systemProfileField, f.String())
 	}
 
-	populations := linspace(10000, 10000000, 50)
-	estimates := jo.makeErrorEstimates(report, metric, populations)
-
 	return jsonReport{
 		Name:                                 report.GetReportName(),
 		Id:                                   report.GetId(),
 		ReportType:                           report.GetReportType().String(),
 		ReportTypeId:                         int32(report.GetReportType()),
 		PrivacyLevel:                         report.GetPrivacyLevel().String(),
-		ErrorEstimates:                       estimates,
 		EventVectorBufferMax:                 report.GetEventVectorBufferMax(),
 		StringBufferMax:                      report.GetStringBufferMax(),
 		SystemProfileField:                   systemProfileField,
@@ -261,9 +204,7 @@
 }
 
 // Returns an output formatter for JSON
-//
-// privacyParamsPath is the string path of the privacy params file to be used for error estimation.
-func JSONOutputFactory(errorCalculator *privacy.ErrorCalculator) OutputFormatter {
-	jo := jsonOutputter{errorCalculator}
+func JSONOutputFactory() OutputFormatter {
+	jo := jsonOutputter{}
 	return jo.JSONOutput
 }
diff --git a/src/bin/config_parser/src/source_generator/json_test.go b/src/bin/config_parser/src/source_generator/json_test.go
index 8d23dd5..ee476aa 100644
--- a/src/bin/config_parser/src/source_generator/json_test.go
+++ b/src/bin/config_parser/src/source_generator/json_test.go
@@ -8,7 +8,6 @@
 
 import (
 	"config"
-	"privacy"
 	"reflect"
 	"testing"
 )
@@ -29,12 +28,7 @@
 }
 
 func constructTestJsonOutputter(t *testing.T) jsonOutputter {
-	paramsCalc, err := privacy.NewPrivacyEncodingParamsCalculatorForTesting(testParamRecords)
-	if err != nil {
-		t.Errorf("Failed to create error calculator.")
-	}
-	errorCalc := privacy.NewErrorCalculator(*paramsCalc)
-	return jsonOutputter{errorCalc}
+	return jsonOutputter{}
 }
 
 func TestConstructorsHandleNil(t *testing.T) {
@@ -61,25 +55,6 @@
 	}
 }
 
-func TestMakeErrorEstimates(t *testing.T) {
-	jo := constructTestJsonOutputter(t)
-	r := config.ReportDefinition{
-		ReportType:                config.ReportDefinition_UNIQUE_DEVICE_COUNTS,
-		PrivacyLevel:              config.ReportDefinition_LOW_PRIVACY,
-		LocalAggregationProcedure: config.ReportDefinition_SELECT_FIRST,
-	}
-	m := config.MetricDefinition{
-		MetricType: config.MetricDefinition_OCCURRENCE,
-	}
-
-	want := jsonError(nil)
-
-	got := jo.makeErrorEstimates(&r, &m, []uint64{10000, 100000, 1000000})
-	if reflect.DeepEqual(want, got) == false {
-		t.Errorf("makeJSONReport(%v)\n\n GOT: %v\nWANT: %v", r, got, want)
-	}
-}
-
 func TestMakeJSONReport(t *testing.T) {
 	jo := constructTestJsonOutputter(t)
 	name := "test_name"
@@ -115,7 +90,6 @@
 		ReportType:                           "FLEETWIDE_OCCURRENCE_COUNTS",
 		ReportTypeId:                         int32(config.ReportDefinition_FLEETWIDE_OCCURRENCE_COUNTS),
 		PrivacyLevel:                         "LOW_PRIVACY",
-		ErrorEstimates:                       nil,
 		EventVectorBufferMax:                 eventVectorBufferMax,
 		StringBufferMax:                      stringBufferMax,
 		SystemProfileField:                   []string{"OS", "ARCH"},
diff --git a/src/bin/config_parser/src/source_generator/source_generator_test.go b/src/bin/config_parser/src/source_generator/source_generator_test.go
index 7b2ef48..f87422d 100644
--- a/src/bin/config_parser/src/source_generator/source_generator_test.go
+++ b/src/bin/config_parser/src/source_generator/source_generator_test.go
@@ -207,7 +207,7 @@
 
 	{"golden_with_name_maps_filtered.cb.h", CppOutputFactory("config", []string{}, newOptionsWithNameMaps("300"))},
 
-	{"golden.cb.json", JSONOutputFactory(nil)},
+	{"golden.cb.json", JSONOutputFactory()},
 }
 
 func TestPrintConfig(t *testing.T) {
diff --git a/src/bin/config_parser/src/source_generator/source_generator_test_files/golden.cb.json b/src/bin/config_parser/src/source_generator/source_generator_test_files/golden.cb.json
index 0c91f57..a939997 100644
--- a/src/bin/config_parser/src/source_generator/source_generator_test_files/golden.cb.json
+++ b/src/bin/config_parser/src/source_generator/source_generator_test_files/golden.cb.json
@@ -27,7 +27,6 @@
                   "report_type": "FLEETWIDE_OCCURRENCE_COUNTS",
                   "report_type_id": 11,
                   "privacy_level": "PRIVACY_LEVEL_UNKNOWN",
-                  "error_estimates": null,
                   "event_vector_buffer_max": 0,
                   "string_buffer_max": 0,
                   "system_profile_field": null,
@@ -45,7 +44,6 @@
                   "report_type": "HOURLY_VALUE_NUMERIC_STATS",
                   "report_type_id": 18,
                   "privacy_level": "PRIVACY_LEVEL_UNKNOWN",
-                  "error_estimates": null,
                   "event_vector_buffer_max": 0,
                   "string_buffer_max": 0,
                   "system_profile_field": null,
@@ -78,7 +76,6 @@
                   "report_type": "FLEETWIDE_OCCURRENCE_COUNTS",
                   "report_type_id": 11,
                   "privacy_level": "PRIVACY_LEVEL_UNKNOWN",
-                  "error_estimates": null,
                   "event_vector_buffer_max": 0,
                   "string_buffer_max": 0,
                   "system_profile_field": null,
@@ -115,7 +112,6 @@
                   "report_type": "FLEETWIDE_OCCURRENCE_COUNTS",
                   "report_type_id": 11,
                   "privacy_level": "LOW_PRIVACY",
-                  "error_estimates": null,
                   "event_vector_buffer_max": 0,
                   "string_buffer_max": 0,
                   "system_profile_field": null,
@@ -157,7 +153,6 @@
                   "report_type": "REPORT_TYPE_UNSET",
                   "report_type_id": 0,
                   "privacy_level": "PRIVACY_LEVEL_UNKNOWN",
-                  "error_estimates": null,
                   "event_vector_buffer_max": 0,
                   "string_buffer_max": 0,
                   "system_profile_field": null,
diff --git a/src/bin/config_parser/src/source_generator/source_outputter.go b/src/bin/config_parser/src/source_generator/source_outputter.go
index dd8f2fd..45e8d58 100644
--- a/src/bin/config_parser/src/source_generator/source_outputter.go
+++ b/src/bin/config_parser/src/source_generator/source_outputter.go
@@ -10,7 +10,6 @@
 	"bytes"
 	"config"
 	"fmt"
-	"privacy"
 	"reflect"
 	"sort"
 	"strconv"
@@ -443,7 +442,7 @@
 	}
 }
 
-func getOutputFormatter(format, namespace, goPackageName, varName string, options generatorOptions, outFilename string, errorCalculator *privacy.ErrorCalculator) (OutputFormatter, error) {
+func getOutputFormatter(format, namespace, goPackageName, varName string, options generatorOptions, outFilename string) (OutputFormatter, error) {
 	namespaceList := []string{}
 	if namespace != "" {
 		namespaceList = strings.Split(namespace, ".")
@@ -465,7 +464,7 @@
 	case "java":
 		return JavaOutputFactory(varName, namespaceList, options, outFilename), nil
 	case "json":
-		return JSONOutputFactory(errorCalculator), nil
+		return JSONOutputFactory(), nil
 	case "rust":
 		return RustOutputFactory(varName, namespaceList, options), nil
 	default:
diff --git a/src/bin/config_parser/src/source_generator/source_outputter_test.go b/src/bin/config_parser/src/source_generator/source_outputter_test.go
index 806d411..cee2596 100644
--- a/src/bin/config_parser/src/source_generator/source_outputter_test.go
+++ b/src/bin/config_parser/src/source_generator/source_outputter_test.go
@@ -5,16 +5,14 @@
 package source_generator
 
 import (
-	"privacy"
 	"testing"
 )
 
 func TestGetOutputFormatter(t *testing.T) {
 	formats := []string{"bin", "b64", "cpp", "dart", "rust", "go", "java", "json"}
-	errorCalculator := privacy.ErrorCalculator{privacy.PrivacyEncodingParamsCalculator{}}
 
 	for _, format := range formats {
-		outputFormatter, err := getOutputFormatter(format, "ns", "package", "varName", generatorOptions{}, "", &errorCalculator)
+		outputFormatter, err := getOutputFormatter(format, "ns", "package", "varName", generatorOptions{}, "")
 		if outputFormatter == nil {
 			t.Errorf("Unexpected nil output formatter for format %v", format)
 		}
@@ -23,7 +21,7 @@
 		}
 	}
 
-	outputFormatter, err := getOutputFormatter("invalid_format", "ns", "package", "varName", generatorOptions{}, "", &errorCalculator)
+	outputFormatter, err := getOutputFormatter("invalid_format", "ns", "package", "varName", generatorOptions{}, "")
 	if outputFormatter != nil {
 		t.Errorf("Unexpectedly got an output formatter.")
 	}
diff --git a/src/bin/config_parser/src/source_generator/utils.go b/src/bin/config_parser/src/source_generator/utils.go
index a20ba3f..755b9c6 100644
--- a/src/bin/config_parser/src/source_generator/utils.go
+++ b/src/bin/config_parser/src/source_generator/utils.go
@@ -41,7 +41,7 @@
 }
 
 // parseOutFormatList parses a space-separated list of output formats.
-// TODO(b/278917650): Switch to comma-separated.
+// TODO(https://fxbug.dev/278917650): Switch to comma-separated.
 func parseOutFormatList(outFormat string) []string {
 	return strings.FieldsFunc(outFormat, func(c rune) bool { return c == ' ' })
 }
diff --git a/src/bin/config_parser/src/source_generator/writer.go b/src/bin/config_parser/src/source_generator/writer.go
index a5c5aa9..d8b345d 100644
--- a/src/bin/config_parser/src/source_generator/writer.go
+++ b/src/bin/config_parser/src/source_generator/writer.go
@@ -11,7 +11,6 @@
 	"flag"
 	"fmt"
 	"os"
-	"privacy"
 	"strconv"
 	"strings"
 )
@@ -136,22 +135,13 @@
 		return err
 	}
 
-	var errorCalc *privacy.ErrorCalculator
-	if *privacyParamsPath != "" {
-		var err error
-		errorCalc, err = privacy.NewErrorCalculatorFromPrivacyParams(*privacyParamsPath)
-		if err != nil {
-			return err
-		}
-	}
-
 	generateFilename := filenameGeneratorFromFlags()
 	generatorOptions, err := newGeneratorOptions(*features, *dimensionNameMapsForMetricIds)
 	if err != nil {
 		return err
 	}
 	for _, format := range parseOutFormatList(*outFormat) {
-		outputFormatter, err := getOutputFormatter(format, *namespace, *goPackageName, *varName, *generatorOptions, *outFilename, errorCalc)
+		outputFormatter, err := getOutputFormatter(format, *namespace, *goPackageName, *varName, *generatorOptions, *outFilename)
 		if err != nil {
 			return err
 		}
diff --git a/src/bin/config_parser/test_source_generator_for_all_projects.py b/src/bin/config_parser/test_source_generator_for_all_projects.py
index 29ee954..4caff08 100755
--- a/src/bin/config_parser/test_source_generator_for_all_projects.py
+++ b/src/bin/config_parser/test_source_generator_for_all_projects.py
@@ -14,7 +14,7 @@
     os.path.join(THIS_DIR, os.pardir, os.pardir, os.pardir)
 )
 
-sys.path += [os.path.join(SRC_ROOT_DIR, 'third_party', 'pyyaml', 'lib3')]
+sys.path += [os.path.join(SRC_ROOT_DIR, 'third_party', 'pyyaml', 'lib')]
 import yaml
 
 OUT_DIR = os.path.abspath(os.path.join(SRC_ROOT_DIR, 'out'))
diff --git a/src/bin/error_calculator/src/BUILD.gn b/src/bin/error_calculator/src/BUILD.gn
deleted file mode 100644
index 54a5fb4..0000000
--- a/src/bin/error_calculator/src/BUILD.gn
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2020 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-visibility = [ "$cobalt_root/*" ]
-
-import("//build/go/go_binary.gni")
-import("//build/go/go_library.gni")
-import("//build/go/go_test.gni")
-
-go_library("main") {
-  name = target_name
-
-  sources = [ "error_calculator_main.go" ]
-  source_dir = "."
-}
-
-go_binary("bin") {
-  library = ":main"
-  output_name = "error_calculator"
-
-  deps = [
-    "$cobalt_root/src/bin/config_parser/src:privacy",
-    "$cobalt_root/src/bin/config_parser/src:registry_util",
-    "$cobalt_root/src/registry:cobalt_registry_proto_go",
-    "//third_party/golibs:github.com/golang/glog",
-  ]
-}
diff --git a/src/bin/error_calculator/src/error_calculator_main.go b/src/bin/error_calculator/src/error_calculator_main.go
deleted file mode 100644
index 31908da..0000000
--- a/src/bin/error_calculator/src/error_calculator_main.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2020 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file reads a Cobalt Configuration in YAML format and uses the
-// error_estimation package to output an error estimate for each relevant
-// privacy level and corresponding epsilon value.
-
-package main
-
-import (
-	"config"
-	"flag"
-	"fmt"
-	"os"
-	"privacy"
-	"registry_util"
-	"strconv"
-	"text/tabwriter"
-
-	"github.com/golang/glog"
-)
-
-var (
-	registryProto              = flag.String("registry_proto", "", "File path of the serialized Cobalt registry.")
-	privacyParams              = flag.String("privacy_params", "", "File containing privacy param records.")
-	epsilonFlag                = flag.Float64("epsilon", 0, "If set, estimates the error for the specified epsilon value.")
-	populationFlag             = flag.Int("population", 0, "If set, estimates the error given the specified population estimate.")
-	minDenominatorEstimateFlag = flag.Int("min_denominator", 0, "Estimated minimum number of unique contributing devices per day.")
-	minValueFlag               = flag.Int("min_value", 0, "Optionally overrides the report's minValue field.")
-	maxValueFlag               = flag.Int("max_value", 0, "Optionally overrides the report's maxValue field.")
-	maxCountFlag               = flag.Int("max_count", 0, "Optionally overrides the report's maxCount field.")
-	simpleFlag                 = flag.Bool("simple", false, "Output a single error estimate.")
-)
-
-var (
-	privacyLevels = []config.ReportDefinition_PrivacyLevel{
-		config.ReportDefinition_LOW_PRIVACY,
-		config.ReportDefinition_MEDIUM_PRIVACY,
-		config.ReportDefinition_HIGH_PRIVACY,
-	}
-)
-
-// Input for each error estimation row.
-type errorEstimationConfig struct {
-	label          string
-	epsilon        float64
-	population     uint64
-	minDemonimator uint64
-}
-
-// Data for a single error estimate row.
-type errorEstimate struct {
-	config        errorEstimationConfig
-	absoluteError float64
-	relativeError float64
-}
-
-// Prints a list of estimates to the console structured as a table.
-func prettyPrint(estimates []errorEstimate, report *config.ReportDefinition) {
-	w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.TabIndent)
-	fmt.Printf("Report: %v\n", report.ReportName)
-	fmt.Printf("Population Estimate: %v\n", estimates[0].config.population)
-	fmt.Printf("Current Privacy Level: %v\n", report.PrivacyLevel)
-	if *minDenominatorEstimateFlag != 0 {
-		fmt.Printf("Estimated Denominator Lower Bound: %v\n", *minDenominatorEstimateFlag)
-	}
-	fmt.Println("")
-	fmt.Fprintln(w, "Privacy Level\t| Epsilon\t| Absolute Error*\t| Relative Error*\t|")
-	fmt.Fprintln(w, "-------------\t+ -------\t+ ---------------\t+ ---------------\t+")
-	for _, e := range estimates {
-		fmt.Fprintf(w, "%v\t| %v\t| ±%.5v\t| ±%.5v\t|\n", e.config.label, e.config.epsilon, e.absoluteError, e.relativeError)
-	}
-	fmt.Fprintln(w, "\n* Estimated root mean square error for the 'active_count' field for each report row.")
-	w.Flush()
-}
-
-// Confirms that required flags are provided and all flag values are valid.
-func validateFlags() (err error) {
-	flag.Parse()
-	if *registryProto == "" {
-		return fmt.Errorf("--registry_proto flag is required.")
-	}
-	if *privacyParams == "" {
-		return fmt.Errorf("--privacy_params flag is required.")
-	}
-	if *populationFlag == 0 {
-		return fmt.Errorf("--population flag is required.")
-	}
-	if flag.NArg() != 4 {
-		return fmt.Errorf("You must specify customer, project, metric, and report id.")
-	}
-	if *epsilonFlag < 0 {
-		return fmt.Errorf("--epsilon must be positive.")
-	}
-	if *populationFlag < 0 {
-		return fmt.Errorf("--population must be positive.")
-	}
-	return nil
-}
-
-func overrideReport(report *config.ReportDefinition) *config.ReportDefinition {
-	if *minValueFlag != 0 {
-		report.MinValue = int64(*minValueFlag)
-	}
-	if *maxValueFlag != 0 {
-		report.MaxValue = int64(*maxValueFlag)
-	}
-	if *maxCountFlag != 0 {
-		report.MaxCount = uint64(*maxCountFlag)
-	}
-	return report
-}
-
-func generateSingleEstimate(report *config.ReportDefinition, metric *config.MetricDefinition, paramsCalc *privacy.PrivacyEncodingParamsCalculator, errorCalc *privacy.ErrorCalculator) (float64, error) {
-	epsilon := paramsCalc.Constants.EpsilonForPrivacyLevel[report.PrivacyLevel]
-	if *epsilonFlag != 0 {
-		epsilon = *epsilonFlag
-	}
-	value, err := errorCalc.Estimate(metric, report, epsilon, uint64(*populationFlag), uint64(*minDenominatorEstimateFlag))
-	if err != nil {
-		return 0, err
-	}
-	return value, nil
-}
-
-func generateAllEstimates(report *config.ReportDefinition, metric *config.MetricDefinition, paramsCalc *privacy.PrivacyEncodingParamsCalculator, errorCalc *privacy.ErrorCalculator) ([]errorEstimate, error) {
-	configs := []errorEstimationConfig{}
-	for _, l := range privacyLevels {
-		configs = append(configs, errorEstimationConfig{
-			label:          l.String(),
-			epsilon:        paramsCalc.Constants.EpsilonForPrivacyLevel[l],
-			population:     uint64(*populationFlag),
-			minDemonimator: uint64(*minDenominatorEstimateFlag),
-		})
-	}
-	if *epsilonFlag != 0 {
-		configs = append(configs, errorEstimationConfig{
-			label:          "CUSTOM",
-			epsilon:        *epsilonFlag,
-			population:     uint64(*populationFlag),
-			minDemonimator: uint64(*minDenominatorEstimateFlag),
-		})
-	}
-
-	values := []errorEstimate{}
-	for _, c := range configs {
-		absoluteError, err := errorCalc.Estimate(metric, report, c.epsilon, c.population, c.minDemonimator)
-		relativeError := absoluteError / float64(c.population)
-		if err != nil {
-			return nil, err
-		}
-		values = append(values, errorEstimate{config: c, absoluteError: absoluteError, relativeError: relativeError})
-	}
-	return values, nil
-}
-
-func main() {
-	err := validateFlags()
-	if err != nil {
-		glog.Exit(err)
-	}
-
-	ids := []uint64{}
-	for i := 0; i < 4; i++ {
-		id, err := strconv.ParseUint(flag.Arg(i), 10, 64)
-		if err != nil {
-			glog.Exitf("Failure converting id '%v' to integer.", flag.Arg(i))
-		}
-		ids = append(ids, id)
-	}
-	customerId := uint32(ids[0])
-	projectId := uint32(ids[1])
-	metricId := uint32(ids[2])
-	reportId := uint32(ids[3])
-
-	registryUtil, err := registry_util.NewRegistryUtil(*registryProto)
-	if err != nil {
-		glog.Exit(err)
-	}
-	metric, err := registryUtil.FindMetric(customerId, projectId, metricId)
-	if err != nil {
-		glog.Exit(err)
-	}
-	report, err := registryUtil.FindReport(customerId, projectId, metricId, reportId)
-	if err != nil {
-		glog.Exit(err)
-	}
-	report = overrideReport(report)
-
-	paramsCalc, err := privacy.NewPrivacyEncodingParamsCalculator(*privacyParams)
-	errorCalc := privacy.NewErrorCalculator(*paramsCalc)
-	if err != nil {
-		glog.Exit(err)
-	}
-
-	if *simpleFlag {
-		estimate, err := generateSingleEstimate(report, metric, paramsCalc, errorCalc)
-		if err != nil {
-			glog.Exit(err)
-		}
-		fmt.Println(estimate)
-	} else {
-		estimates, err := generateAllEstimates(report, metric, paramsCalc, errorCalc)
-		if err != nil {
-			glog.Exit(err)
-		}
-		prettyPrint(estimates, report)
-	}
-	os.Exit(0)
-}
diff --git a/src/lib/clearcut/BUILD.gn b/src/lib/clearcut/BUILD.gn
index 8959a1a..55b2660 100644
--- a/src/lib/clearcut/BUILD.gn
+++ b/src/lib/clearcut/BUILD.gn
@@ -7,10 +7,16 @@
 import("//third_party/protobuf/proto_library.gni")
 
 proto_library("clearcut_proto") {
-  sources = [ "clearcut.proto" ]
+  sources = [
+    "clearcut.proto",
+    "clearcut_log_source.proto",
+  ]
+  import_dirs = [ "$cobalt_root" ]
+  proto_in_dir = "$cobalt_root"
   import_protobuf_full = true
   generate_python = false
   cc_generator_options = "lite"
+  extra_configs = [ "$cobalt_root:cobalt_config" ]
 }
 
 source_set("clearcut") {
diff --git a/src/lib/clearcut/clearcut.proto b/src/lib/clearcut/clearcut.proto
index 892028c..6e3e884 100644
--- a/src/lib/clearcut/clearcut.proto
+++ b/src/lib/clearcut/clearcut.proto
@@ -4,21 +4,26 @@
 
 syntax = "proto2";
 
-package cobalt.lib.clearcut;
+package cobalt.clearcut_protos;
+
+import "src/lib/clearcut/clearcut_log_source.proto";
 
 message ClientInfo {
-  optional int32 client_type = 1;
+  enum ClientType {
+    FUCHSIA = 17;
+  }
+  optional ClientType client_type = 1;
 }
 
 message LogEvent {
   optional int32 event_code = 11;
 
-  extensions 1000 to max;
+  optional bytes source_extension = 6;
 }
 
 message LogRequest {
   optional ClientInfo client_info = 1;
-  optional int32 log_source = 2 [default = -1];
+  optional LogSourceEnum.LogSource log_source = 2 [default = UNKNOWN];
   repeated LogEvent log_event = 3;
 }
 
diff --git a/src/lib/clearcut/clearcut_log_source.proto b/src/lib/clearcut/clearcut_log_source.proto
new file mode 100644
index 0000000..43d410a
--- /dev/null
+++ b/src/lib/clearcut/clearcut_log_source.proto
@@ -0,0 +1,9 @@
+syntax = "proto2";
+
+package cobalt.clearcut_protos.LogSourceEnum;
+
+enum LogSource {
+  UNKNOWN = -1;
+  TURQUOISE_COBALT_SHUFFLER_INPUT_DEVEL = 844;
+  TURQUOISE_COBALT_SHUFFLER_INPUT_PROD = 1176;
+}
diff --git a/src/lib/clearcut/curl_handle.cc b/src/lib/clearcut/curl_handle.cc
index 736ee79..0a2e4a1 100644
--- a/src/lib/clearcut/curl_handle.cc
+++ b/src/lib/clearcut/curl_handle.cc
@@ -76,7 +76,7 @@
   return Status(StatusCode::INTERNAL, curl_easy_strerror(code), details);
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 StatusOr<HTTPResponse> CurlHandle::Post(const std::string &url, std::string body) {
   CB_RETURN_IF_ERROR(Setopt(CURLOPT_URL, url.c_str()));
   CB_RETURN_IF_ERROR(Setopt(CURLOPT_POSTFIELDSIZE, body.size()));
diff --git a/src/lib/clearcut/uploader.cc b/src/lib/clearcut/uploader.cc
index f7500ea..b7e4519 100644
--- a/src/lib/clearcut/uploader.cc
+++ b/src/lib/clearcut/uploader.cc
@@ -16,12 +16,15 @@
 
 using cobalt::Status;
 using cobalt::StatusCode;
+using cobalt::clearcut_protos::ClientInfo;
+using cobalt::clearcut_protos::LogRequest;
+using cobalt::clearcut_protos::LogResponse;
 using cobalt::util::SleeperInterface;
 using cobalt::util::SteadyClockInterface;
 
 ClearcutUploader::ClearcutUploader(
     std::string url, util::NotNullUniquePtr<HTTPClient> client,
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     int64_t upload_timeout_millis, int64_t initial_backoff_millis,
     util::NotNullUniquePtr<SteadyClockInterface> steady_clock,
     util::NotNullUniquePtr<cobalt::util::SleeperInterface> sleeper)
@@ -85,7 +88,7 @@
                   "clearcut server");
   }
 
-  log_request->mutable_client_info()->set_client_type(kFuchsiaClientType);
+  log_request->mutable_client_info()->set_client_type(ClientInfo::FUCHSIA);
   HTTPResponse response;
   // Because we will be moving the request body into the Post() method it will not be available to
   // us later. Here we keep an escaped copy of the request body just in case we need to use it
@@ -167,7 +170,7 @@
 
   LogResponse log_response;
   if (!log_response.ParseFromString(response.response)) {
-    // TODO(fxbug.dev/45751): add metric to capture how often this happens.
+    // TODO(https://fxbug.dev/42122310): add metric to capture how often this happens.
     LOG(ERROR) << "Unable to parse response from clearcut server";
   } else {
     if (log_response.next_request_wait_millis() >= 0) {
diff --git a/src/lib/clearcut/uploader.h b/src/lib/clearcut/uploader.h
index f15f9d5..5786fbf 100644
--- a/src/lib/clearcut/uploader.h
+++ b/src/lib/clearcut/uploader.h
@@ -21,7 +21,6 @@
 
 namespace cobalt::lib::clearcut {
 
-static const int32_t kFuchsiaClientType = 17;
 static const int32_t kMaxRetries = 8;  // Wait between tries: 0.25s 0.5.s 1s 2s 4s 8s 16s
 static const int64_t kInitialBackoffMillis = 250;
 
@@ -31,10 +30,13 @@
   virtual ~ClearcutUploaderInterface() = default;
 
   // Uploads the |log_request|  with retries.
-  virtual Status UploadEvents(LogRequest *log_request, int32_t max_retries) = 0;
+  virtual Status UploadEvents(cobalt::clearcut_protos::LogRequest *log_request,
+                              int32_t max_retries) = 0;
 
   // Uploads the |log_request|  with kMaxRetries retries.
-  Status UploadEvents(LogRequest *log_request) { return UploadEvents(log_request, kMaxRetries); }
+  Status UploadEvents(cobalt::clearcut_protos::LogRequest *log_request) {
+    return UploadEvents(log_request, kMaxRetries);
+  }
 
   // Resets the internal metrics to use the provided logger.
   void ResetInternalMetrics(logger::InternalMetrics *internal_metrics = nullptr) {
@@ -77,11 +79,13 @@
                    util::NotNullUniquePtr<cobalt::util::SleeperInterface> sleeper =
                        util::MakeNotNullUniquePtr<cobalt::util::Sleeper>());
 
-  Status UploadEvents(LogRequest *log_request, int32_t max_retries) override;
+  Status UploadEvents(cobalt::clearcut_protos::LogRequest *log_request,
+                      int32_t max_retries) override;
 
  private:
   // Tries once to upload |log_request|.
-  Status TryUploadEvents(LogRequest *log_request, std::chrono::steady_clock::time_point deadline);
+  Status TryUploadEvents(cobalt::clearcut_protos::LogRequest *log_request,
+                         std::chrono::steady_clock::time_point deadline);
 
   const std::string url_;
   const util::PinnedUniquePtr<HTTPClient> client_;
diff --git a/src/lib/clearcut/uploader_test.cc b/src/lib/clearcut/uploader_test.cc
index f6042c1..c896013 100644
--- a/src/lib/clearcut/uploader_test.cc
+++ b/src/lib/clearcut/uploader_test.cc
@@ -11,6 +11,7 @@
 #include <gtest/gtest.h>
 
 #include "src/lib/clearcut/clearcut.pb.h"
+#include "src/lib/clearcut/clearcut_log_source.pb.h"
 #include "src/lib/util/clock.h"
 #include "src/lib/util/not_null.h"
 #include "src/lib/util/sleeper.h"
@@ -19,6 +20,10 @@
 namespace cobalt::lib::clearcut {
 
 using cobalt::StatusCode;
+using cobalt::clearcut_protos::LogEvent;
+using cobalt::clearcut_protos::LogRequest;
+using cobalt::clearcut_protos::LogResponse;
+using cobalt::clearcut_protos::LogSourceEnum::TURQUOISE_COBALT_SHUFFLER_INPUT_DEVEL;
 using cobalt::util::FakeSleeper;
 using cobalt::util::IncrementingSteadyClock;
 using std::chrono::steady_clock;
@@ -99,8 +104,7 @@
  public:
   [[nodiscard]] Status UploadClearcutDemoEvent(int32_t event_code, int32_t max_retries = 1) const {
     LogRequest request;
-    constexpr int32_t kClearcutDemoSource = 12345;
-    request.set_log_source(kClearcutDemoSource);
+    request.set_log_source(TURQUOISE_COBALT_SHUFFLER_INPUT_DEVEL);
     request.add_log_event()->set_event_code(event_code);
     auto status = uploader->UploadEvents(&request, max_retries);
     return status;
diff --git a/src/lib/client/cpp/buckets_config.cc b/src/lib/client/cpp/buckets_config.cc
index 3223071..c4a1933 100644
--- a/src/lib/client/cpp/buckets_config.cc
+++ b/src/lib/client/cpp/buckets_config.cc
@@ -14,7 +14,7 @@
     return 0;
   }
 
-  // TODO(b/278918086): Maybe switch to binary search?
+  // TODO(https://fxbug.dev/278918086): Maybe switch to binary search?
   for (uint32_t i = 1; i < floors_.size(); i++) {
     if (val >= floors_[i - 1] && val < floors_[i]) {
       return i;
@@ -51,7 +51,7 @@
   return floor;
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 std::unique_ptr<IntegerBucketConfig> IntegerBucketConfig::CreateLinear(int64_t floor,
                                                                        uint32_t num_buckets,
                                                                        uint32_t step_size) {
@@ -74,7 +74,7 @@
 }
 
 std::unique_ptr<IntegerBucketConfig> IntegerBucketConfig::CreateExponential(
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     int64_t floor, uint32_t num_buckets, uint32_t initial_step, uint32_t step_multiplier) {
   if (num_buckets == 0) {
     LOG(ERROR) << "ExponentialIntegerBucket with 0 buckets.";
diff --git a/src/lib/crypto_util/BUILD.gn b/src/lib/crypto_util/BUILD.gn
index 7a61415..53dcaca 100644
--- a/src/lib/crypto_util/BUILD.gn
+++ b/src/lib/crypto_util/BUILD.gn
@@ -8,8 +8,6 @@
   sources = [
     "errors.cc",
     "errors.h",
-    "hash.cc",
-    "hash.h",
     "random.cc",
     "random.h",
     "types.h",
@@ -27,10 +25,7 @@
 
 source_set("tests") {
   testonly = true
-  sources = [
-    "hash_test.cc",
-    "random_test.cc",
-  ]
+  sources = [ "random_test.cc" ]
 
   deps = [
     ":crypto_util",
diff --git a/src/lib/crypto_util/hash.cc b/src/lib/crypto_util/hash.cc
deleted file mode 100644
index 3e932e8..0000000
--- a/src/lib/crypto_util/hash.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/lib/crypto_util/hash.h"
-
-#include <string>
-
-#include <openssl/sha.h>
-
-namespace cobalt::crypto::hash {
-
-bool Hash(const std::string &data, std::string &out) {
-  out.resize(SHA256_DIGEST_LENGTH);
-  return Hash(data, reinterpret_cast<byte *>(&out.front()));
-}
-
-bool Hash(const std::string &data, byte out[SHA256_DIGEST_LENGTH]) {
-  SHA256_CTX sha256;
-  if (SHA256_Init(&sha256) != 1) {
-    return false;
-  }
-  if (SHA256_Update(&sha256, data.c_str(), data.size()) != 1) {
-    return false;
-  }
-  return SHA256_Final(out, &sha256) == 1;
-}
-
-}  // namespace cobalt::crypto::hash
diff --git a/src/lib/crypto_util/hash.h b/src/lib/crypto_util/hash.h
deleted file mode 100644
index 85e2383..0000000
--- a/src/lib/crypto_util/hash.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef COBALT_SRC_LIB_CRYPTO_UTIL_HASH_H_
-#define COBALT_SRC_LIB_CRYPTO_UTIL_HASH_H_
-
-#include <cstddef>
-#include <string>
-
-#include <openssl/sha.h>
-
-#include "src/lib/crypto_util/types.h"
-
-namespace cobalt::crypto::hash {
-
-static const size_t DIGEST_SIZE = SHA256_DIGEST_LENGTH;  // SHA-256 outputs 32 bytes.
-
-// Computes the SHA256 digest of |data| and writes the result to |out| which
-// will be automatically resized to |DIGEST_SIZE|.
-//
-// Returns true for success or false for failure.
-bool Hash(const std::string &data, std::string &out);
-
-// Computes the SHA256 digest of |data| and writes the result to |out| which
-// must have length |DIGEST_SIZE|.
-//
-// Returns true for success or false for failure.
-bool Hash(const std::string &data, byte out[DIGEST_SIZE]);
-
-}  // namespace cobalt::crypto::hash
-
-#endif  // COBALT_SRC_LIB_CRYPTO_UTIL_HASH_H_
diff --git a/src/lib/crypto_util/hash_test.cc b/src/lib/crypto_util/hash_test.cc
deleted file mode 100644
index a2894c1..0000000
--- a/src/lib/crypto_util/hash_test.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 The Fuchsia Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/lib/crypto_util/hash.h"
-
-#include <string>
-
-#include <gtest/gtest.h>
-
-namespace cobalt::crypto::hash {
-
-TEST(HashTest, TestHash) {
-  std::string data =
-      "The algorithms were first published in 2001 in the draft FIPS PUB "
-      "180-2, at which time public review and comments were accepted. In "
-      "August 2002, FIPS PUB 180-2 became the new Secure Hash Standard, "
-      "replacing FIPS PUB 180-1, which was released in April 1995. The updated "
-      "standard included the original SHA-1 algorithm, with updated technical "
-      "notation consistent with that describing the inner workings of the "
-      "SHA-2 family.[9]";
-
-  // Hash the data into digest.
-  byte digest[DIGEST_SIZE];
-  EXPECT_TRUE(Hash(data, digest));
-
-  // Generate a human-readable string representing the bytes of digest.
-  std::ostringstream stream;
-  stream << std::hex << std::setfill('0');
-  for (unsigned char ch : digest) {
-    stream << std::setw(2) << static_cast<int>(ch);
-  }
-
-  // Compare this to an expected result.
-  EXPECT_EQ(std::string("fc11f3cbffea99f65944e50e72e5bfc09674eed67bcebcd76ec0f9dc90faef05"),
-            stream.str());
-}
-
-TEST(HashTest, TestHashWithStrings) {
-  std::string data =
-      "The algorithms were first published in 2001 in the draft FIPS PUB "
-      "180-2, at which time public review and comments were accepted. In "
-      "August 2002, FIPS PUB 180-2 became the new Secure Hash Standard, "
-      "replacing FIPS PUB 180-1, which was released in April 1995. The updated "
-      "standard included the original SHA-1 algorithm, with updated technical "
-      "notation consistent with that describing the inner workings of the "
-      "SHA-2 family.[9]";
-  std::string digest;
-  EXPECT_TRUE(Hash(data, digest));
-  EXPECT_EQ(DIGEST_SIZE, digest.size());
-}
-
-}  // namespace cobalt::crypto::hash
diff --git a/src/lib/privacy/private_index_decoding.cc b/src/lib/privacy/private_index_decoding.cc
index 026401f..06dde53 100644
--- a/src/lib/privacy/private_index_decoding.cc
+++ b/src/lib/privacy/private_index_decoding.cc
@@ -75,7 +75,7 @@
 cobalt::Status DecodePrivateIndexAsSumOrCount(
     uint64_t index,
     const google::protobuf::RepeatedPtrField<MetricDefinition::MetricDimension>& metric_dimensions,
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     int64_t min_value, int64_t max_value, uint64_t max_count, uint64_t num_index_points,
     std::vector<uint32_t>* event_vector, SumOrCount* sum_or_count) {
   uint64_t event_vector_index = 0;
@@ -135,7 +135,7 @@
 Status DecodePrivateIndexAsHistogramBucketIndexAndCount(
     uint64_t index,
     const google::protobuf::RepeatedPtrField<MetricDefinition::MetricDimension>& metric_dimensions,
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     uint32_t max_bucket_index, uint64_t max_count, uint64_t num_index_points,
     std::vector<uint32_t>* event_vector, uint32_t* bucket_index, double* bucket_count) {
   uint64_t event_vector_index = 0;
diff --git a/src/lib/util/BUILD.gn b/src/lib/util/BUILD.gn
index 48979b8..59d6687 100644
--- a/src/lib/util/BUILD.gn
+++ b/src/lib/util/BUILD.gn
@@ -303,6 +303,16 @@
   deps = [ "//third_party/github.com/google/farmhash" ]
 }
 
+source_set("hash_test") {
+  testonly = true
+  sources = [ "hash_test.cc" ]
+  configs += [ "$cobalt_root:cobalt_config" ]
+  deps = [
+    ":hash",
+    "//third_party/googletest:gtest",
+  ]
+}
+
 source_set("named_type") {
   sources = [ "named_type.h" ]
 }
@@ -322,8 +332,17 @@
     ":datetime_util_test",
     ":encrypted_message_util_test",
     ":file_util_test",
+    ":hash_test",
     ":protected_fields_test",
     ":sleeper_test",
     ":status_builder_test",
   ]
 }
+
+source_set("thread") {
+  sources = [
+    "thread.cc",
+    "thread.h",
+  ]
+  configs += [ "$cobalt_root:cobalt_config" ]
+}
diff --git a/src/lib/util/clock.h b/src/lib/util/clock.h
index f197191..4b75e8c 100644
--- a/src/lib/util/clock.h
+++ b/src/lib/util/clock.h
@@ -23,7 +23,7 @@
 // The SystemClockInterface used in the constructor must remain valid for the entire lifetime of
 // this class.
 //
-// TODO(fxbug.dev/87103): Remove this once it is no longer used.
+// TODO(https://fxbug.dev/42168188): Remove this once it is no longer used.
 class SystemClockRef : public SystemClockInterface {
  public:
   explicit SystemClockRef(SystemClockInterface* ref) : ref_(ref) {}
diff --git a/src/lib/util/file_util.h b/src/lib/util/file_util.h
index 421abce..2cf6ea1 100644
--- a/src/lib/util/file_util.h
+++ b/src/lib/util/file_util.h
@@ -21,7 +21,7 @@
 lib::statusor::StatusOr<std::string> ReadHexFile(const Path& file_path);
 
 std::string ReadHexFileOrDefault(const Path& file_path, const std::string& default_string);
-// DEPRECATED: TODO(b/278930401): Remove once not used in Fuchsia
+// DEPRECATED: TODO(https://fxbug.dev/278930401): Remove once not used in Fuchsia
 std::string ReadHexFileOrDefault(const std::string& file_path, const std::string& default_string);
 
 template <>
diff --git a/src/lib/util/hash.cc b/src/lib/util/hash.cc
index 80ca8b0..77ed1d6 100644
--- a/src/lib/util/hash.cc
+++ b/src/lib/util/hash.cc
@@ -6,29 +6,47 @@
 
 #include <farmhash.h>
 
-namespace cobalt::util {
+#include <cstddef>
+#include <cstdint>
+#include <initializer_list>
+#include <string>
 
+namespace cobalt::util {
 namespace {
 
-const uint32_t kBytesPerInt64 = 8;
-const uint32_t kSingleByteMask = 0xFF;
+constexpr uint32_t kBytesPerInt64 = 8;
+constexpr uint32_t kSingleByteMask = 0xFF;
 
-void AppendBytesLittleEndian(uint64_t value, std::string *bytes) {
-  for (uint32_t i = 0; i < kBytesPerInt64; i++) {
-    bytes->push_back(static_cast<char>((value >> (kBytesPerInt64 * i)) & kSingleByteMask));
+char GetByte(const uint64_t value, const size_t idx) {
+  return static_cast<char>(value >> (kBytesPerInt64 * idx) & kSingleByteMask);
+}
+
+std::string LittleEndianBytes(const std::initializer_list<uint64_t> values) {
+  std::string bytes(kBytesPerInt64 * values.size(), '\0');
+  size_t vi = 0;
+  for (const uint64_t value : values) {
+    for (size_t i = 0; i < kBytesPerInt64; ++i) {
+      bytes[vi + i] = GetByte(value, i);
+    }
+    vi += kBytesPerInt64;
   }
+  return bytes;
 }
 
 }  // namespace
 
 std::string FarmhashFingerprint(const std::string &data) {
-  farmhash::uint128_t fingerprint = farmhash::Fingerprint128(data);
+  const farmhash::uint128_t fingerprint = farmhash::Fingerprint128(data);
+  return LittleEndianBytes({
+      farmhash::Uint128Low64(fingerprint),
+      farmhash::Uint128High64(fingerprint),
+  });
+}
 
-  std::string bytes;
-  AppendBytesLittleEndian(farmhash::Uint128Low64(fingerprint), &bytes);
-  AppendBytesLittleEndian(farmhash::Uint128High64(fingerprint), &bytes);
-
-  return bytes;
+std::string FarmhashFingerprint64(const std::string &data) {
+  return LittleEndianBytes({
+      farmhash::Fingerprint64(data),
+  });
 }
 
 uint64_t Farmhash64(const std::string &data) { return farmhash::Fingerprint64(data); }
diff --git a/src/lib/util/hash.h b/src/lib/util/hash.h
index 771fa37..5698004 100644
--- a/src/lib/util/hash.h
+++ b/src/lib/util/hash.h
@@ -5,6 +5,7 @@
 #ifndef COBALT_SRC_LIB_UTIL_HASH_H_
 #define COBALT_SRC_LIB_UTIL_HASH_H_
 
+#include <cstdint>
 #include <string>
 
 namespace cobalt::util {
@@ -12,6 +13,9 @@
 // Computes the farmhash::Fingerprint128 of the input data.
 std::string FarmhashFingerprint(const std::string& data);
 
+// Computes the farmhash::Fingerprint64 of the input data.
+std::string FarmhashFingerprint64(const std::string& data);
+
 uint64_t Farmhash64(const std::string& data);
 
 }  // namespace cobalt::util
diff --git a/src/lib/util/hash_test.cc b/src/lib/util/hash_test.cc
new file mode 100644
index 0000000..96d96ef
--- /dev/null
+++ b/src/lib/util/hash_test.cc
@@ -0,0 +1,51 @@
+// Copyright 2024 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/lib/util/hash.h"
+
+#include <string>
+
+#include <gtest/gtest.h>
+
+namespace cobalt::util {
+namespace {
+
+char Byte(const unsigned char c) { return static_cast<char>(c); }
+
+TEST(HashTest, FarmhashFingerprint) {
+  EXPECT_EQ(FarmhashFingerprint("cobalt"), std::string({
+                                               Byte(0xf1),
+                                               Byte(0xc1),
+                                               Byte(0xef),
+                                               Byte(0x24),
+                                               Byte(0x81),
+                                               Byte(0x79),
+                                               Byte(0xa1),
+                                               Byte(0x91),
+                                               Byte(0x02),
+                                               Byte(0x04),
+                                               Byte(0xdb),
+                                               Byte(0x03),
+                                               Byte(0xf9),
+                                               Byte(0x8f),
+                                               Byte(0x2c),
+                                               Byte(0xd7),
+                                           }));
+}
+
+TEST(HashTest, FarmhashFingerprint64) {
+  EXPECT_EQ(FarmhashFingerprint64("cobalt"), std::string({
+                                                 Byte(0x8b),
+                                                 Byte(0x46),
+                                                 Byte(0x65),
+                                                 Byte(0xd5),
+                                                 Byte(0x07),
+                                                 Byte(0x36),
+                                                 Byte(0x74),
+                                                 Byte(0x2e),
+                                             }));
+}
+
+}  // namespace
+}  // namespace cobalt::util
diff --git a/src/lib/util/hybrid_tink_encrypted_message_maker.cc b/src/lib/util/hybrid_tink_encrypted_message_maker.cc
index d13c88e..1734244 100644
--- a/src/lib/util/hybrid_tink_encrypted_message_maker.cc
+++ b/src/lib/util/hybrid_tink_encrypted_message_maker.cc
@@ -22,7 +22,7 @@
 // Make a HybridTinkEncryptedMessageMaker from a serialized encoded keyset.
 lib::statusor::StatusOr<util::NotNullUniquePtr<EncryptedMessageMaker>>
 MakeHybridTinkEncryptedMessageMaker(
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     const std::string& public_keyset_bytes, const std::string& context_info, uint32_t key_index) {
   auto status = ::crypto::tink::HybridConfig::Register();
   if (!status.ok()) {
diff --git a/src/lib/util/thread.cc b/src/lib/util/thread.cc
new file mode 100644
index 0000000..8ef63af
--- /dev/null
+++ b/src/lib/util/thread.cc
@@ -0,0 +1,32 @@
+// Copyright 2023 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/lib/util/thread.h"
+
+#include <string>
+#include <thread>
+
+#ifdef __Fuchsia__
+#include <zircon/syscalls.h>
+#include <zircon/syscalls/object.h>
+#include <zircon/threads.h>
+
+namespace cobalt::util {
+
+void NameThread(const std::string& name, std::thread& thread) {
+  zx_handle_t thread_handle = native_thread_get_zx_handle(thread.native_handle());
+  zx_object_set_property(thread_handle, ZX_PROP_NAME, name.c_str(), name.size());
+}
+
+}  // namespace cobalt::util
+
+#else
+
+namespace cobalt::util {
+
+void NameThread(const std::string& name, std::thread& thread) {}
+
+}  // namespace cobalt::util
+
+#endif  // __Fuchsia__
diff --git a/src/lib/util/thread.h b/src/lib/util/thread.h
new file mode 100644
index 0000000..500cca4
--- /dev/null
+++ b/src/lib/util/thread.h
@@ -0,0 +1,17 @@
+// Copyright 2023 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef COBALT_SRC_LIB_UTIL_THREAD_H_
+#define COBALT_SRC_LIB_UTIL_THREAD_H_
+
+#include <string>
+#include <thread>
+
+namespace cobalt::util {
+
+void NameThread(const std::string& name, std::thread& thread);
+
+}  // namespace cobalt::util
+
+#endif  // COBALT_SRC_LIB_UTIL_THREAD_H_
diff --git a/src/local_aggregation/BUILD.gn b/src/local_aggregation/BUILD.gn
index 08edbec..4fdbab3 100644
--- a/src/local_aggregation/BUILD.gn
+++ b/src/local_aggregation/BUILD.gn
@@ -143,6 +143,7 @@
     "$cobalt_root/src/lib/util:clock",
     "$cobalt_root/src/lib/util:datetime_util",
     "$cobalt_root/src/lib/util:not_null",
+    "$cobalt_root/src/lib/util:thread",
     "$cobalt_root/src/logger:project_context_factory",
     "$cobalt_root/src/public/lib/statusor",
     "aggregation_procedures",
diff --git a/src/local_aggregation/aggregation_procedures/aggregation_procedure.cc b/src/local_aggregation/aggregation_procedures/aggregation_procedure.cc
index 72876bc..01c3fca 100644
--- a/src/local_aggregation/aggregation_procedures/aggregation_procedure.cc
+++ b/src/local_aggregation/aggregation_procedures/aggregation_procedure.cc
@@ -329,6 +329,23 @@
   return current_time_info;
 }
 
+namespace {
+
+// Create aggregate data needed to generate a single string histogram observation for a aggregation
+// period bucket. Use legacy hashes if they're present. Use Farmhash Fingerprint 64 hashes
+// otherwise.
+//
+// TODO(https://fxbug.dev/322409910): Delete usage of legacy hash after clients no longer
+// store them.
+AggregateDataToGenerate GetAggregateDataToGenerateForFF64Migration(AggregationPeriodBucket *agg) {
+  if (!agg->string_hashes().empty()) {
+    return AggregateDataToGenerate(agg->string_hashes(), /*use_legacy_hash=*/true);
+  }
+  return AggregateDataToGenerate(agg->string_hashes_ff64(), /*use_legacy_hash=*/false);
+}
+
+}  // namespace
+
 std::map<uint64_t, std::vector<AggregateDataToGenerate>>
 AggregationProcedure::GetAggregateDataToGenerate(const util::TimeInfo &time_info,
                                                  ReportAggregate &aggregate) const {
@@ -348,7 +365,7 @@
           continue;
         }
         AggregationPeriodBucket *agg = &(*aggregate.mutable_daily()->mutable_by_day_index())[i];
-        AggregateDataToGenerate agg_to_generate(agg->string_hashes());
+        AggregateDataToGenerate agg_to_generate = GetAggregateDataToGenerateForFF64Migration(agg);
         for (SystemProfileAggregate &system_profile_aggregate :
              *agg->mutable_system_profile_aggregates()) {
           // For SELECT_FIRST and SELECT_LAST there should only be one SystemProfileAggregate, but
@@ -383,7 +400,7 @@
         AggregationPeriodBucket *agg = &(*aggregate.mutable_daily()->mutable_by_day_index())[i];
         for (SystemProfileAggregate &system_profile_aggregate :
              *agg->mutable_system_profile_aggregates()) {
-          AggregateDataToGenerate agg_to_generate(agg->string_hashes());
+          AggregateDataToGenerate agg_to_generate = GetAggregateDataToGenerateForFF64Migration(agg);
           for (EventCodesAggregateData &data : *system_profile_aggregate.mutable_by_event_code()) {
             agg_to_generate.aggregate_data.push_back(data);
           }
@@ -398,7 +415,7 @@
           &(*aggregate.mutable_hourly()->mutable_by_hour_id())[start_time_info.hour_id];
       for (SystemProfileAggregate &system_profile_aggregate :
            *agg->mutable_system_profile_aggregates()) {
-        AggregateDataToGenerate agg_to_generate(agg->string_hashes());
+        AggregateDataToGenerate agg_to_generate = GetAggregateDataToGenerateForFF64Migration(agg);
         for (EventCodesAggregateData &data : *system_profile_aggregate.mutable_by_event_code()) {
           agg_to_generate.aggregate_data.push_back(data);
         }
diff --git a/src/local_aggregation/aggregation_procedures/aggregation_procedure.h b/src/local_aggregation/aggregation_procedures/aggregation_procedure.h
index a094368..23b6c0b 100644
--- a/src/local_aggregation/aggregation_procedures/aggregation_procedure.h
+++ b/src/local_aggregation/aggregation_procedures/aggregation_procedure.h
@@ -33,13 +33,17 @@
 // For multi-day reports, multiple of these objects are needed to generate the observation. When
 // generating observations, all AggregateDataToGenerate and all the aggregate_data they contain must
 // be for the same system profile.
+//
+// TODO(https://fxbug.dev/322409910): Delete usage of |use_legacy_hash| after clients no longer
+// store them.
 struct AggregateDataToGenerate {
   std::vector<std::reference_wrapper<EventCodesAggregateData>> aggregate_data;
   const google::protobuf::RepeatedPtrField<std::string> &string_hashes;
+  bool use_legacy_hash;
 
   explicit AggregateDataToGenerate(
-      const google::protobuf::RepeatedPtrField<std::string> &string_hashes)
-      : string_hashes(string_hashes) {}
+      const google::protobuf::RepeatedPtrField<std::string> &string_hashes, bool use_legacy_hash)
+      : string_hashes(string_hashes), use_legacy_hash(use_legacy_hash) {}
 
   // Make the struct move only
   AggregateDataToGenerate(AggregateDataToGenerate const &) = delete;
diff --git a/src/local_aggregation/aggregation_procedures/at_least_once_aggregation_procedure.cc b/src/local_aggregation/aggregation_procedures/at_least_once_aggregation_procedure.cc
index 4e8919c..9deb18d 100644
--- a/src/local_aggregation/aggregation_procedures/at_least_once_aggregation_procedure.cc
+++ b/src/local_aggregation/aggregation_procedures/at_least_once_aggregation_procedure.cc
@@ -18,7 +18,7 @@
 void AtLeastOnceAggregationProcedure::UpdateAggregateData(
     const logger::EventRecord & /*event_record*/, AggregateData &aggregate_data,
     AggregationPeriodBucket & /*bucket*/) {
-  // TODO(b/278938040): Handle the case where event_record is malformed.
+  // TODO(https://fxbug.dev/278938040): Handle the case where event_record is malformed.
   aggregate_data.mutable_at_least_once()->set_at_least_once(true);
 }
 
diff --git a/src/local_aggregation/aggregation_procedures/at_least_once_string_aggregation_procedure.cc b/src/local_aggregation/aggregation_procedures/at_least_once_string_aggregation_procedure.cc
index 28fc6f4..8da595f 100644
--- a/src/local_aggregation/aggregation_procedures/at_least_once_string_aggregation_procedure.cc
+++ b/src/local_aggregation/aggregation_procedures/at_least_once_string_aggregation_procedure.cc
@@ -30,14 +30,22 @@
   Map<uint32_t, UniqueString> *unique_strings =
       aggregate_data.mutable_unique_strings()->mutable_unique_strings();
 
+  // TODO(https://fxbug.dev/322409910): Delete usage of legacy hash after clients no longer store
+  // them. Continue to use legacy hashes if they're already stored in the current aggregation period
+  // bucket. Use Farmhash Fingerprint 64 hashes otherwise.
+  bool use_legacy_hash = !bucket.string_hashes().empty();
   std::string bytes =
-      util::FarmhashFingerprint(event_record.event()->string_event().string_value());
+      use_legacy_hash
+          ? util::FarmhashFingerprint(event_record.event()->string_event().string_value())
+          : util::FarmhashFingerprint64(event_record.event()->string_event().string_value());
+  const google::protobuf::RepeatedPtrField<std::string> &string_hashes =
+      use_legacy_hash ? bucket.string_hashes() : bucket.string_hashes_ff64();
 
   // Check if the current string event value's byte representation has appeared before in
   // the string hashes of the current period bucket, if so, then initialize a UniqueString message
   // if the index of the string hash doesn't exist in the current UniqueString mapping.
-  for (int i = 0; i < bucket.string_hashes_size(); i++) {
-    if (bucket.string_hashes(i) == bytes) {
+  for (int i = 0; i < string_hashes.size(); i++) {
+    if (string_hashes.at(i) == bytes) {
       if (!unique_strings->contains(i)) {
         (*unique_strings)[i] = UniqueString();
       }
@@ -45,10 +53,14 @@
     }
   }
 
-  if (bucket.string_hashes_size() < string_buffer_max_) {
+  if (string_hashes.size() < string_buffer_max_) {
     // Add new entry
-    (*unique_strings)[bucket.string_hashes_size()] = UniqueString();
-    bucket.add_string_hashes(bytes);
+    (*unique_strings)[string_hashes.size()] = UniqueString();
+    if (use_legacy_hash) {
+      bucket.add_string_hashes(bytes);
+    } else {
+      bucket.add_string_hashes_ff64(bytes);
+    }
   }
 }
 
@@ -92,7 +104,27 @@
   // seen hashes is a mapping from a string hash to it's hash index, which correlates to the index
   // of string hashes in the hashes vector above.
   std::map<std::string, uint32_t> seen_hashes;
+
+  // Observation generation should use Farmhash Fingerprint 64 if a multi-day report has a mix of
+  // legacy and FF64 across multiple days. Use legacy hashes only if buckets is not empty and all
+  // buckets stores legacy hashes.
+  //
+  // TODO(https://fxbug.dev/322409910): Delete usage of legacy hash after clients no longer
+  // store them.
+  const bool generate_observation_use_legacy_hash =
+      !buckets.empty() && std::all_of(buckets.begin(), buckets.end(), [](const auto &b) {
+        return static_cast<bool>(b.use_legacy_hash);
+      });
+
   for (const AggregateDataToGenerate &bucket : buckets) {
+    // Drop aggregated data for any bucket that doesn't match the correct string hash.
+    // Note, buckets using FF64 string hashes are never expected to be dropped because the function
+    // takes precedence over the legacy function when determining
+    // `generate_observation_use_legacy_hash`.
+    if (generate_observation_use_legacy_hash != bucket.use_legacy_hash) {
+      continue;
+    }
+
     for (const EventCodesAggregateData &aggregate_data : bucket.aggregate_data) {
       std::vector<uint32_t> event_vector(aggregate_data.event_codes().begin(),
                                          aggregate_data.event_codes().end());
@@ -139,7 +171,8 @@
     return {nullptr};
   }
 
-  return logger::encoder::EncodeStringHistogramObservation(hashes, data);
+  return logger::encoder::EncodeStringHistogramObservation(hashes, data,
+                                                           generate_observation_use_legacy_hash);
 }
 
 void AtLeastOnceStringAggregationProcedure::ObservationsCommitted(
diff --git a/src/local_aggregation/aggregation_procedures/at_least_once_string_aggregation_procedure_test.cc b/src/local_aggregation/aggregation_procedures/at_least_once_string_aggregation_procedure_test.cc
index d6bd1bb..ae887c5 100644
--- a/src/local_aggregation/aggregation_procedures/at_least_once_string_aggregation_procedure_test.cc
+++ b/src/local_aggregation/aggregation_procedures/at_least_once_string_aggregation_procedure_test.cc
@@ -49,7 +49,72 @@
   }
 };
 
-TEST_F(AtLeastOnceStringAggregationProcedureTest, UpdateAggregate1DayReport) {
+// Test that the local aggregation continues to use legacy hash if it has legacy hash
+// stored.
+//
+// TODO(https://fxbug.dev/322409910): Delete this test after clients stop storing legacy hash.
+TEST_F(AtLeastOnceStringAggregationProcedureTest, UpdateAggregate1DayReportLegacy) {
+  uint32_t metric_id = kStringMetricMetricId;
+  int report_index = kStringMetricUniqueDeviceStringCountsReport1DayReportIndex;
+  util::PinnedUniquePtr<AggregationProcedure> procedure(GetProcedureFor(metric_id, report_index));
+
+  ReportAggregate report_aggregate;
+  const uint32_t kDayIndex = 10000;
+  const uint64_t system_profile_hash = uint64_t{2222};
+  const std::vector<std::string> kTestStrings = {
+      "Nunc dictum justo ac arcu.",
+      "Suspendisse ullamcorper mi vel pulvinar dictum.",
+  };
+  const std::vector<std::string> kTestStrings1 = {
+      kTestStrings.at(0),
+  };
+  const std::vector<std::string> kTestStrings2 = {
+      kTestStrings.at(0),
+      kTestStrings.at(1),
+  };
+  const std::map<uint32_t, std::vector<std::string>> events_to_strings = {
+      {0, kTestStrings1},
+      {2, kTestStrings2},
+  };
+  ASSERT_GE(GetReportDef(metric_id, report_index).event_vector_buffer_max(),
+            events_to_strings.size());
+
+  // Mock that there is stored legacy hash in the current aggregation period bucket. The legacy hash
+  // should be used for local aggregation.
+  AggregationPeriodBucket bucket;
+  bucket.add_string_hashes(util::FarmhashFingerprint(kTestStrings[0]));
+  report_aggregate.mutable_daily()->mutable_by_day_index()->insert({kDayIndex, bucket});
+
+  AddStringEventsForDay(kDayIndex, events_to_strings, system_profile_hash, *procedure,
+                        report_aggregate);
+
+  std::vector<std::string> expected_hashes;
+  expected_hashes.reserve(kTestStrings.size());
+  for (const std::string& string : kTestStrings) {
+    expected_hashes.push_back(util::FarmhashFingerprint(string));
+  }
+
+  ASSERT_TRUE(report_aggregate.daily().by_day_index().contains(kDayIndex));
+  EXPECT_EQ(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes_size(),
+            kTestStrings.size());
+  EXPECT_EQ(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes_ff64_size(), 0u);
+  EXPECT_THAT(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes(),
+              UnorderedElementsAreArray(expected_hashes));
+  ASSERT_EQ(report_aggregate.daily().by_day_index().at(kDayIndex).system_profile_aggregates_size(),
+            1u);
+  const SystemProfileAggregate& system_profile_agg =
+      report_aggregate.daily().by_day_index().at(kDayIndex).system_profile_aggregates(0);
+  ASSERT_EQ(system_profile_agg.system_profile_hash(), system_profile_hash);
+
+  for (int i = 0; i < events_to_strings.size(); i++) {
+    std::vector<std::string> test_strings =
+        events_to_strings.at(system_profile_agg.by_event_code(i).event_codes(0));
+    ASSERT_EQ(system_profile_agg.by_event_code(i).data().unique_strings().unique_strings().size(),
+              test_strings.size());
+  }
+}
+
+TEST_F(AtLeastOnceStringAggregationProcedureTest, UpdateAggregate1DayReportFF64) {
   uint32_t metric_id = kStringMetricMetricId;
   int report_index = kStringMetricUniqueDeviceStringCountsReport1DayReportIndex;
   util::PinnedUniquePtr<AggregationProcedure> procedure(GetProcedureFor(metric_id, report_index));
@@ -81,13 +146,14 @@
   std::vector<std::string> expected_hashes;
   expected_hashes.reserve(kTestStrings.size());
   for (const std::string& string : kTestStrings) {
-    expected_hashes.push_back(util::FarmhashFingerprint(string));
+    expected_hashes.push_back(util::FarmhashFingerprint64(string));
   }
 
   ASSERT_TRUE(report_aggregate.daily().by_day_index().contains(kDayIndex));
-  EXPECT_EQ(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes_size(),
+  EXPECT_EQ(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes_size(), 0u);
+  EXPECT_EQ(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes_ff64_size(),
             kTestStrings.size());
-  EXPECT_THAT(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes(),
+  EXPECT_THAT(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes_ff64(),
               UnorderedElementsAreArray(expected_hashes));
   ASSERT_EQ(report_aggregate.daily().by_day_index().at(kDayIndex).system_profile_aggregates_size(),
             1u);
@@ -178,7 +244,113 @@
   EXPECT_EQ(merged_data.unique_strings().unique_strings().at(2).last_day_index(), 100);
 }
 
-TEST_F(AtLeastOnceStringAggregationProcedureTest, GenerateObservation1DayReport) {
+TEST_F(AtLeastOnceStringAggregationProcedureTest, GenerateObservation1DayReportFF64) {
+  uint32_t metric_id = kStringMetricMetricId;
+  int report_index = kStringMetricUniqueDeviceStringCountsReport1DayReportIndex;
+  util::PinnedUniquePtr<AggregationProcedure> procedure(GetProcedureFor(metric_id, report_index));
+
+  const uint64_t system_profile_hash = uint64_t{2222};
+  const uint32_t kDayIndex = 10000;
+  util::TimeInfo time_info;
+  time_info.day_index = kDayIndex;
+
+  ReportAggregate report_aggregate;
+  const std::vector<std::string> kTestStrings = {
+      "Nunc dictum justo ac arcu.",
+      "Suspendisse ullamcorper mi vel pulvinar dictum.",
+      "Aenean feugiat consectetur vestibulum.",
+  };
+  const std::vector<std::string> kTestStrings1 = {
+      kTestStrings.at(0),
+  };
+  const std::vector<std::string> kTestStrings2 = {
+      kTestStrings.at(1),
+      kTestStrings.at(2),
+  };
+  const std::vector<std::string> kTestStrings3 = {
+      kTestStrings.at(2),
+  };
+  const std::map<uint32_t, std::vector<std::string>> events_to_strings = {
+      {0, kTestStrings1},
+      {2, kTestStrings2},
+      {5, kTestStrings3},
+  };
+  ASSERT_GE(GetReportDef(metric_id, report_index).event_vector_buffer_max(),
+            events_to_strings.size());
+
+  const std::vector<std::string> kTestHashes1 = {
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
+  };
+  const std::vector<std::string> kTestHashes2 = {
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(2)),
+  };
+  const std::vector<std::string> kTestHashes3 = {
+      util::FarmhashFingerprint64(kTestStrings.at(2)),
+  };
+  const std::map<std::vector<std::string>, std::vector<std::string>> strings_to_hashes = {
+      {kTestStrings1, kTestHashes1},
+      {kTestStrings2, kTestHashes2},
+      {kTestStrings3, kTestHashes3},
+  };
+  AddStringEventsForDay(kDayIndex, events_to_strings, system_profile_hash, *procedure,
+                        report_aggregate);
+
+  lib::statusor::StatusOr<std::vector<ObservationAndSystemProfile>> observations_or =
+      procedure->GenerateObservations(time_info, report_aggregate);
+  ASSERT_EQ(observations_or.status().error_code(), StatusCode::OK);
+  std::vector<ObservationAndSystemProfile> observations = std::move(observations_or).value();
+
+  ASSERT_EQ(observations.size(), 1u);
+  EXPECT_EQ(observations[0].system_profile_hash, system_profile_hash);
+  ASSERT_TRUE(observations[0].observation->has_string_histogram());
+  const StringHistogramObservation& histogram = observations[0].observation->string_histogram();
+  ASSERT_EQ(histogram.string_histograms_size(), events_to_strings.size());
+
+  std::vector<std::string> expected_hashes;
+  expected_hashes.reserve(kTestStrings.size());
+  for (const std::string& string : kTestStrings) {
+    expected_hashes.push_back(util::FarmhashFingerprint64(string));
+  }
+  EXPECT_THAT(histogram.string_hashes_ff64(), UnorderedElementsAreArray(expected_hashes));
+
+  for (const IndexHistogram& value : histogram.string_histograms()) {
+    // These string vectors represent the expected (test) vectors of strings and hashes that the
+    // current event vector histogram should have.
+    const std::vector<std::string>& test_strings = events_to_strings.at(value.event_codes(0));
+    const std::vector<std::string>& test_hashes = strings_to_hashes.at(test_strings);
+
+    // This creates a vector of string hashes by fetching the string hashes that correspond to each
+    // bucket indices found in the current event vector histogram.
+    std::vector<std::string> actualHashes;
+    actualHashes.reserve(test_hashes.size());
+    for (const uint32_t index : value.bucket_indices()) {
+      actualHashes.push_back(histogram.string_hashes_ff64(static_cast<int>(index)));
+    }
+
+    // Assert that the created (actual) string hash vector has all of the same string hashes as the
+    // expected (test) string hash vector.
+    ASSERT_THAT(actualHashes, UnorderedElementsAreArray(test_hashes));
+  }
+
+  // Check that obsolete aggregates get cleaned up.
+  procedure->ObservationsCommitted(report_aggregate, time_info, system_profile_hash);
+  ASSERT_EQ(report_aggregate.daily().by_day_index_size(), 0);
+  EXPECT_FALSE(report_aggregate.daily().by_day_index().contains(kDayIndex));
+
+  // Check that calling observation generation the next day generates no observation.
+  time_info.day_index++;
+  observations_or = procedure->GenerateObservations(time_info, report_aggregate);
+  ASSERT_EQ(observations_or.status().error_code(), StatusCode::OK);
+  observations = std::move(observations_or).value();
+  EXPECT_EQ(observations.size(), 0u);
+}
+
+// Test that legacy hash is used for 1 day report observation generation if legacy hash is stored
+// for the local aggregation period.
+//
+// TODO(https://fxbug.dev/322409910): Delete this test after clients stop storing legacy hash.
+TEST_F(AtLeastOnceStringAggregationProcedureTest, GenerateObservation1DayReportLegacy) {
   uint32_t metric_id = kStringMetricMetricId;
   int report_index = kStringMetricUniqueDeviceStringCountsReport1DayReportIndex;
   util::PinnedUniquePtr<AggregationProcedure> procedure(GetProcedureFor(metric_id, report_index));
@@ -227,6 +399,13 @@
       {kTestStrings2, kTestHashes2},
       {kTestStrings3, kTestHashes3},
   };
+
+  // Mock that there is stored legacy hash in the current aggregation period bucket. The legacy hash
+  // should be used for local aggregation.
+  AggregationPeriodBucket bucket;
+  bucket.add_string_hashes(util::FarmhashFingerprint(kTestStrings[0]));
+  report_aggregate.mutable_daily()->mutable_by_day_index()->insert({kDayIndex, bucket});
+
   AddStringEventsForDay(kDayIndex, events_to_strings, system_profile_hash, *procedure,
                         report_aggregate);
 
@@ -310,11 +489,11 @@
             events_to_strings.size());
 
   const std::vector<std::string> kTestHashes1 = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
   };
   const std::vector<std::string> kTestHashes2 = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
-      util::FarmhashFingerprint(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
   };
   const std::map<std::vector<std::string>, std::vector<std::string>> strings_to_hashes = {
       {kTestStrings1, kTestHashes1},
@@ -337,9 +516,9 @@
   std::vector<std::string> expected_hashes;
   expected_hashes.reserve(kTestStrings.size());
   for (const std::string& string : kTestStrings) {
-    expected_hashes.push_back(util::FarmhashFingerprint(string));
+    expected_hashes.push_back(util::FarmhashFingerprint64(string));
   }
-  EXPECT_THAT(histogram.string_hashes(), UnorderedElementsAreArray(expected_hashes));
+  EXPECT_THAT(histogram.string_hashes_ff64(), UnorderedElementsAreArray(expected_hashes));
 
   for (const IndexHistogram& value : histogram.string_histograms()) {
     // These string vectors represent the expected (test) vectors of strings and hashes that the
@@ -352,7 +531,7 @@
     std::vector<std::string> actualHashes;
     actualHashes.reserve(test_hashes.size());
     for (const uint32_t index : value.bucket_indices()) {
-      actualHashes.push_back(histogram.string_hashes(static_cast<int>(index)));
+      actualHashes.push_back(histogram.string_hashes_ff64(static_cast<int>(index)));
     }
 
     // Assert that the created (actual) string hash vector has all of the same string hashes as the
@@ -397,6 +576,123 @@
   EXPECT_FALSE(report_aggregate.daily().by_day_index().contains(kDayIndex));
 }
 
+// Test that the aggregated data with legacy string hashes are dropped when generating observation
+// for 7 days report.
+//
+// TODO(https://fxbug.dev/322409910): Delete this test after clients stop storing legacy hash.
+TEST_F(AtLeastOnceStringAggregationProcedureTest, GenerateObservation7DaysReportDropLegacy) {
+  uint32_t metric_id = kStringMetricMetricId;
+  int report_index = kStringMetricUniqueDeviceStringCountsReport7DaysReportIndex;
+  util::PinnedUniquePtr<AggregationProcedure> procedure(GetProcedureFor(metric_id, report_index));
+
+  const uint64_t system_profile_hash = uint64_t{2222};
+
+  ReportAggregate report_aggregate;
+  const std::vector<std::string> kTestStrings = {
+      "Nunc dictum justo ac arcu.",
+      "Suspendisse ullamcorper mi vel pulvinar dictum.",
+      "Integer a ullamcorper dolor.",
+  };
+  const std::vector<std::string> kTestStrings1 = {
+      kTestStrings.at(0),
+  };
+  const std::vector<std::string> kTestStrings2 = {
+      kTestStrings.at(1),
+      kTestStrings.at(2),
+  };
+  const std::vector<std::string> kTestHashes1 = {
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
+  };
+  const std::vector<std::string> kTestHashes2 = {
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(2)),
+  };
+  const std::map<std::vector<std::string>, std::vector<std::string>> strings_to_hashes = {
+      {kTestStrings1, kTestHashes1},
+      {kTestStrings2, kTestHashes2},
+  };
+
+  const uint32_t kDayIndexDay1 = 10000;
+
+  const std::map<uint32_t, std::vector<std::string>> events_to_strings_day1 = {
+      {1, kTestStrings1},
+  };
+
+  // Mock that there is stored legacy hash in the aggregation period bucket for Day 1. The legacy
+  // hash should be used for Day 1 local aggregation.
+  AggregationPeriodBucket bucket;
+  bucket.add_string_hashes(util::FarmhashFingerprint(kTestStrings[0]));
+  report_aggregate.mutable_daily()->mutable_by_day_index()->insert({kDayIndexDay1, bucket});
+  AddStringEventsForDay(kDayIndexDay1, events_to_strings_day1, system_profile_hash, *procedure,
+                        report_aggregate);
+
+  const uint32_t kDayIndexDay2 = kDayIndexDay1 + 1;
+  const std::map<uint32_t, std::vector<std::string>> events_to_strings_day2 = {
+      {0, kTestStrings1},
+      {2, kTestStrings2},
+  };
+  AddStringEventsForDay(kDayIndexDay2, events_to_strings_day2, system_profile_hash, *procedure,
+                        report_aggregate);
+
+  // The observation should only contains data from Day 2.
+  const std::map<uint32_t, std::vector<std::string>> events_to_hashes = {
+      {0, kTestHashes1},
+      {2, kTestHashes2},
+  };
+
+  std::vector<std::string> expected_hashes;
+  expected_hashes.reserve(kTestStrings.size());
+  for (const std::string& string : kTestStrings) {
+    expected_hashes.push_back(util::FarmhashFingerprint64(string));
+  }
+
+  util::TimeInfo time_info;
+  time_info.day_index = kDayIndexDay2;
+  lib::statusor::StatusOr<std::vector<ObservationAndSystemProfile>> observations_or =
+      procedure->GenerateObservations(time_info, report_aggregate);
+  ASSERT_EQ(observations_or.status().error_code(), StatusCode::OK);
+  std::vector<ObservationAndSystemProfile> observations = std::move(observations_or).value();
+
+  ASSERT_EQ(observations.size(), 1u);
+  EXPECT_EQ(observations[0].system_profile_hash, system_profile_hash);
+  ASSERT_TRUE(observations[0].observation->has_string_histogram());
+  const StringHistogramObservation& histogram = observations[0].observation->string_histogram();
+
+  // Check that the number of string histograms for the second day index is the number of unique
+  // events on the second day, due to the fact that first day is using legacy hash so the data a
+  // dropped.
+  ASSERT_EQ(histogram.string_histograms_size(), events_to_hashes.size());
+  EXPECT_THAT(histogram.string_hashes_ff64(), UnorderedElementsAreArray(expected_hashes));
+
+  for (const IndexHistogram& value : histogram.string_histograms()) {
+    // These string vectors represent the expected (test) vectors of strings hashes that the
+    // current event vector histogram should have.
+    const std::vector<std::string>& test_hashes = events_to_hashes.at(value.event_codes(0));
+
+    // This creates a vector of string hashes by fetching the string hashes that correspond to
+    // each bucket indices found in the current event vector histogram.
+    std::vector<std::string> actualHashes;
+    actualHashes.reserve(test_hashes.size());
+    for (const uint32_t index : value.bucket_indices()) {
+      actualHashes.push_back(histogram.string_hashes_ff64(static_cast<int>(index)));
+    }
+
+    // Assert that the created (actual) string hash vector has all of the same string hashes as
+    // the expected (test) string hash vector.
+    ASSERT_THAT(actualHashes, UnorderedElementsAreArray(test_hashes));
+  }
+
+  // Commit observation
+  procedure->ObservationsCommitted(report_aggregate, time_info, system_profile_hash);
+
+  // After 7 days the observation is no longer generated.
+  time_info.day_index = kDayIndexDay2 + 7;
+  observations_or = procedure->GenerateObservations(time_info, report_aggregate);
+  ASSERT_EQ(observations_or.status().error_code(), StatusCode::OK);
+  observations = std::move(observations_or).value();
+  EXPECT_EQ(observations.size(), 0u);
+}
+
 TEST_F(AtLeastOnceStringAggregationProcedureTest, GenerateObservation7DaysReport) {
   uint32_t metric_id = kStringMetricMetricId;
   int report_index = kStringMetricUniqueDeviceStringCountsReport7DaysReportIndex;
@@ -426,11 +722,11 @@
             events_to_strings.size());
 
   const std::vector<std::string> kTestHashes1 = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
   };
   const std::vector<std::string> kTestHashes2 = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
-      util::FarmhashFingerprint(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
   };
   const std::map<std::vector<std::string>, std::vector<std::string>> strings_to_hashes = {
       {kTestStrings1, kTestHashes1},
@@ -442,7 +738,7 @@
   std::vector<std::string> expected_hashes;
   expected_hashes.reserve(kTestStrings.size());
   for (const std::string& string : kTestStrings) {
-    expected_hashes.push_back(util::FarmhashFingerprint(string));
+    expected_hashes.push_back(util::FarmhashFingerprint64(string));
   }
 
   for (int i = 0; i < 7; i++) {
@@ -457,7 +753,7 @@
     ASSERT_TRUE(observations[0].observation->has_string_histogram());
     const StringHistogramObservation& histogram = observations[0].observation->string_histogram();
     ASSERT_EQ(histogram.string_histograms_size(), events_to_strings.size());
-    EXPECT_THAT(histogram.string_hashes(), UnorderedElementsAreArray(expected_hashes));
+    EXPECT_THAT(histogram.string_hashes_ff64(), UnorderedElementsAreArray(expected_hashes));
 
     for (const IndexHistogram& value : histogram.string_histograms()) {
       // These string vectors represent the expected (test) vectors of strings and hashes that the
@@ -470,7 +766,7 @@
       std::vector<std::string> actualHashes;
       actualHashes.reserve(test_hashes.size());
       for (const uint32_t index : value.bucket_indices()) {
-        actualHashes.push_back(histogram.string_hashes(static_cast<int>(index)));
+        actualHashes.push_back(histogram.string_hashes_ff64(static_cast<int>(index)));
       }
 
       // Assert that the created (actual) string hash vector has all of the same string hashes as
@@ -545,21 +841,21 @@
       kTestStrings.at(2),
   };
   const std::vector<std::string> kTestHashes1 = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
   };
   const std::vector<std::string> kTestHashes2 = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
-      util::FarmhashFingerprint(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
   };
   const std::vector<std::string> kTestHashes3 = {
-      util::FarmhashFingerprint(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
   };
   const std::vector<std::string> kTestHashes4 = {
-      util::FarmhashFingerprint(kTestStrings.at(1)),
-      util::FarmhashFingerprint(kTestStrings.at(2)),
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(2)),
   };
   const std::vector<std::string> kTestHashes5 = {
-      util::FarmhashFingerprint(kTestStrings.at(2)),
+      util::FarmhashFingerprint64(kTestStrings.at(2)),
   };
   const std::map<std::vector<std::string>, std::vector<std::string>> strings_to_hashes = {
       {kTestStrings1, kTestHashes1}, {kTestStrings2, kTestHashes2}, {kTestStrings3, kTestHashes3},
@@ -586,9 +882,9 @@
   // This a vector string hashes that is a combination of hashes across 2 days for a single event
   // vector.
   std::vector<std::string> kTestMultiDayHash = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
-      util::FarmhashFingerprint(kTestStrings.at(1)),
-      util::FarmhashFingerprint(kTestStrings.at(2)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(2)),
   };
   const std::map<uint32_t, std::vector<std::string>> events_to_hashes = {
       {0, kTestHashes1},
@@ -600,7 +896,7 @@
   std::vector<std::string> expected_hashes;
   expected_hashes.reserve(kTestStrings.size());
   for (const std::string& string : kTestStrings) {
-    expected_hashes.push_back(util::FarmhashFingerprint(string));
+    expected_hashes.push_back(util::FarmhashFingerprint64(string));
   }
 
   util::TimeInfo time_info;
@@ -621,7 +917,7 @@
   ASSERT_EQ(histogram.string_histograms_size(), events_to_hashes.size());
 
   // Check that the sting hashes from the histogram is the same as the expected string hashes.
-  EXPECT_THAT(histogram.string_hashes(), UnorderedElementsAreArray(expected_hashes));
+  EXPECT_THAT(histogram.string_hashes_ff64(), UnorderedElementsAreArray(expected_hashes));
 
   std::set<uint32_t> seen_event_vectors;
   for (const IndexHistogram& value : histogram.string_histograms()) {
@@ -641,7 +937,7 @@
     std::set<std::string> seen_string_hashes;
     for (const uint32_t index : value.bucket_indices()) {
       // Check that each string hash is unique for each event vector.
-      std::string string_hash = histogram.string_hashes(static_cast<int>(index));
+      std::string string_hash = histogram.string_hashes_ff64(static_cast<int>(index));
       ASSERT_FALSE(seen_string_hashes.count(string_hash));
       seen_string_hashes.insert(string_hash);
 
@@ -693,11 +989,11 @@
             events_to_strings.size());
 
   const std::vector<std::string> kTestHashes1 = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
   };
   const std::vector<std::string> kTestHashes2 = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
-      util::FarmhashFingerprint(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
   };
   const std::map<std::vector<std::string>, std::vector<std::string>> strings_to_hashes = {
       {kTestStrings1, kTestHashes1},
@@ -709,7 +1005,7 @@
   std::vector<std::string> expected_hashes;
   expected_hashes.reserve(kTestStrings.size());
   for (const std::string& string : kTestStrings) {
-    expected_hashes.push_back(util::FarmhashFingerprint(string));
+    expected_hashes.push_back(util::FarmhashFingerprint64(string));
   }
 
   for (int i = 0; i < 7; i++) {
@@ -724,7 +1020,7 @@
     ASSERT_TRUE(observations[0].observation->has_string_histogram());
     const StringHistogramObservation& histogram = observations[0].observation->string_histogram();
     ASSERT_EQ(histogram.string_histograms_size(), events_to_strings.size());
-    EXPECT_THAT(histogram.string_hashes(), UnorderedElementsAreArray(expected_hashes));
+    EXPECT_THAT(histogram.string_hashes_ff64(), UnorderedElementsAreArray(expected_hashes));
 
     for (const IndexHistogram& value : histogram.string_histograms()) {
       // These string vectors represent the expected (test) vectors of strings and hashes that the
@@ -737,7 +1033,7 @@
       std::vector<std::string> actualHashes;
       actualHashes.reserve(test_hashes.size());
       for (const uint32_t index : value.bucket_indices()) {
-        actualHashes.push_back(histogram.string_hashes(static_cast<int>(index)));
+        actualHashes.push_back(histogram.string_hashes_ff64(static_cast<int>(index)));
       }
 
       // Assert that the created (actual) string hash vector has all of the same string hashes as
@@ -826,7 +1122,7 @@
                         report_aggregate);
 
   EXPECT_LT(string_buffer_max, kTestStrings.size());
-  EXPECT_EQ(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes_size(),
+  EXPECT_EQ(report_aggregate.daily().by_day_index().at(kDayIndex).string_hashes_ff64_size(),
             string_buffer_max);
 
   const SystemProfileAggregate& system_profile_agg =
@@ -867,14 +1163,14 @@
       kTestStrings.at(5),
   };
   const std::vector<std::string> kTestHashes1 = {
-      util::FarmhashFingerprint(kTestStrings.at(0)),
-      util::FarmhashFingerprint(kTestStrings.at(1)),
-      util::FarmhashFingerprint(kTestStrings.at(2)),
+      util::FarmhashFingerprint64(kTestStrings.at(0)),
+      util::FarmhashFingerprint64(kTestStrings.at(1)),
+      util::FarmhashFingerprint64(kTestStrings.at(2)),
   };
   const std::vector<std::string> kTestHashes2 = {
-      util::FarmhashFingerprint(kTestStrings.at(3)),
-      util::FarmhashFingerprint(kTestStrings.at(4)),
-      util::FarmhashFingerprint(kTestStrings.at(5)),
+      util::FarmhashFingerprint64(kTestStrings.at(3)),
+      util::FarmhashFingerprint64(kTestStrings.at(4)),
+      util::FarmhashFingerprint64(kTestStrings.at(5)),
   };
 
   const uint32_t kDayIndexDay1 = 10000;
@@ -898,8 +1194,8 @@
 
   // The total number of unique strings should be 6, which should be greater than the string buffer
   // max value of 5.
-  EXPECT_GT(report_aggregate.daily().by_day_index().at(kDayIndexDay1).string_hashes_size() +
-                report_aggregate.daily().by_day_index().at(kDayIndexDay2).string_hashes_size(),
+  EXPECT_GT(report_aggregate.daily().by_day_index().at(kDayIndexDay1).string_hashes_ff64_size() +
+                report_aggregate.daily().by_day_index().at(kDayIndexDay2).string_hashes_ff64_size(),
             string_buffer_max);
 
   // Generate the observation on the second day to generate an observation for the last 7 days,
@@ -920,7 +1216,7 @@
 
   // The observation should only have a max string hash size equal to or less than the string buffer
   // max. So check that the string hash size is equal to the string buffer max.
-  ASSERT_EQ(histogram.string_hashes_size(), string_buffer_max);
+  ASSERT_EQ(histogram.string_hashes_ff64_size(), string_buffer_max);
 
   for (const IndexHistogram& value : histogram.string_histograms()) {
     // This string vector represent the expected (test) vector of string hashes that the
@@ -932,7 +1228,7 @@
     std::vector<std::string> actualHashes;
     actualHashes.reserve(test_hashes.size());
     for (const uint32_t index : value.bucket_indices()) {
-      actualHashes.push_back(histogram.string_hashes(static_cast<int>(index)));
+      actualHashes.push_back(histogram.string_hashes_ff64(static_cast<int>(index)));
     }
 
     // Assert that the created (actual) string hash vector is a subset of the string hashes within
diff --git a/src/local_aggregation/aggregation_procedures/select_most_common_aggregation_procedure.cc b/src/local_aggregation/aggregation_procedures/select_most_common_aggregation_procedure.cc
index 19c6a9c..478e4c6 100644
--- a/src/local_aggregation/aggregation_procedures/select_most_common_aggregation_procedure.cc
+++ b/src/local_aggregation/aggregation_procedures/select_most_common_aggregation_procedure.cc
@@ -16,7 +16,7 @@
 void SelectMostCommonAggregationProcedure::UpdateAggregateData(
     const logger::EventRecord &event_record, AggregateData &aggregate_data,
     AggregationPeriodBucket & /*bucket*/) {
-  // TODO(b/278938040): Handle the case where event_record is malformed.
+  // TODO(https://fxbug.dev/278938040): Handle the case where event_record is malformed.
   aggregate_data.set_count(aggregate_data.count() +
                            event_record.event()->occurrence_event().count());
 }
diff --git a/src/local_aggregation/aggregation_procedures/string_histogram_aggregation_procedure.cc b/src/local_aggregation/aggregation_procedures/string_histogram_aggregation_procedure.cc
index 27176b6..c40bf32 100644
--- a/src/local_aggregation/aggregation_procedures/string_histogram_aggregation_procedure.cc
+++ b/src/local_aggregation/aggregation_procedures/string_histogram_aggregation_procedure.cc
@@ -20,20 +20,32 @@
     AggregationPeriodBucket &bucket) {
   StringHistogram *histogram = aggregate_data.mutable_string_histogram();
 
+  // TODO(https://fxbug.dev/322409910): Delete usage of legacy hash after clients no longer store
+  // them. Continue to use legacy hashes if they're already stored in the current aggregation period
+  // bucket. Use Farmhash Fingerprint 64 hashes otherwise.
+  bool use_legacy_hash = !bucket.string_hashes().empty();
   std::string bytes =
-      util::FarmhashFingerprint(event_record.event()->string_event().string_value());
+      use_legacy_hash
+          ? util::FarmhashFingerprint(event_record.event()->string_event().string_value())
+          : util::FarmhashFingerprint64(event_record.event()->string_event().string_value());
+  const google::protobuf::RepeatedPtrField<std::string> &string_hashes =
+      use_legacy_hash ? bucket.string_hashes() : bucket.string_hashes_ff64();
 
-  for (int i = 0; i < bucket.string_hashes_size(); i++) {
-    if (bucket.string_hashes(i) == bytes) {
+  for (int i = 0; i < string_hashes.size(); i++) {
+    if (string_hashes.at(i) == bytes) {
       (*histogram->mutable_histogram())[i] += 1;
       return;
     }
   }
 
-  if (bucket.string_hashes_size() < string_buffer_max_) {
+  if (string_hashes.size() < string_buffer_max_) {
     // Add new entry
-    (*histogram->mutable_histogram())[bucket.string_hashes_size()] += 1;
-    bucket.add_string_hashes(bytes);
+    (*histogram->mutable_histogram())[string_hashes.size()] += 1;
+    if (use_legacy_hash) {
+      bucket.add_string_hashes(bytes);
+    } else {
+      bucket.add_string_hashes_ff64(bytes);
+    }
   }
 }
 
@@ -75,7 +87,7 @@
     hashes.push_back(hash);
   }
 
-  return logger::encoder::EncodeStringHistogramObservation(hashes, data);
+  return logger::encoder::EncodeStringHistogramObservation(hashes, data, bucket.use_legacy_hash);
 }
 
 std::string StringHistogramAggregationProcedure::DebugString() const { return "STRING_HISTOGRAM"; }
diff --git a/src/local_aggregation/aggregation_procedures/string_histogram_aggregation_procedure_test.cc b/src/local_aggregation/aggregation_procedures/string_histogram_aggregation_procedure_test.cc
index 953eb99..8d2097e 100644
--- a/src/local_aggregation/aggregation_procedures/string_histogram_aggregation_procedure_test.cc
+++ b/src/local_aggregation/aggregation_procedures/string_histogram_aggregation_procedure_test.cc
@@ -45,7 +45,7 @@
   }
 };
 
-TEST_F(StringHistogramAggregationProcedureTest, UpdateAggregateWorks) {
+TEST_F(StringHistogramAggregationProcedureTest, UpdateAggregateFF64Works) {
   util::PinnedUniquePtr<AggregationProcedure> procedure(
       GetProcedureFor(kStringMetricMetricId, kStringMetricStringCountsReportIndex));
 
@@ -65,6 +65,46 @@
 
   ASSERT_EQ(aggregate.hourly().by_hour_id_size(), 1);
   ASSERT_EQ(aggregate.hourly().by_hour_id().at(kHourId).system_profile_aggregates_size(), 1u);
+  ASSERT_EQ(aggregate.hourly().by_hour_id().at(kHourId).string_hashes_size(), 0u);
+  ASSERT_EQ(aggregate.hourly().by_hour_id().at(kHourId).string_hashes_ff64_size(), 5u);
+  const SystemProfileAggregate& system_profile_agg =
+      aggregate.hourly().by_hour_id().at(kHourId).system_profile_aggregates(0);
+  EXPECT_EQ(system_profile_agg.system_profile_hash(), system_profile_hash);
+  ASSERT_EQ(system_profile_agg.by_event_code_size(), kNumEventCodes);
+}
+
+// Test that the aggregation period bucket continues to use legacy hash if it has legacy hash
+// stored.
+//
+// TODO(https://fxbug.dev/322409910): Delete this test after clients stop storing legacy hash.
+TEST_F(StringHistogramAggregationProcedureTest, UpdateAggregateLegacyWorks) {
+  util::PinnedUniquePtr<AggregationProcedure> procedure(
+      GetProcedureFor(kStringMetricMetricId, kStringMetricStringCountsReportIndex));
+
+  ReportAggregate aggregate;
+  const uint32_t kNumEventCodes = 100;
+  const uint32_t kHourId = 1;
+  const uint64_t system_profile_hash = uint64_t{111111};
+  const std::vector<std::string> kTestStrings = {
+      "Nunc dictum justo ac arcu.",
+      "Suspendisse ullamcorper mi vel pulvinar dictum.",
+      "Aenean feugiat consectetur vestibulum.",
+      "Integer a ullamcorper dolor.",
+      "Praesent vel nulla quis metus consectetur aliquam sed ut felis.",
+  };
+
+  // Mock that legacy hash is already used in the current aggregation period bucket.
+  AggregationPeriodBucket bucket;
+  bucket.add_string_hashes(util::FarmhashFingerprint(kTestStrings[0]));
+  aggregate.mutable_hourly()->mutable_by_hour_id()->insert({kHourId, bucket});
+
+  LogStringEvents(kHourId, kNumEventCodes, kTestStrings, system_profile_hash, *procedure,
+                  aggregate);
+
+  ASSERT_EQ(aggregate.hourly().by_hour_id_size(), 1);
+  ASSERT_EQ(aggregate.hourly().by_hour_id().at(kHourId).system_profile_aggregates_size(), 1u);
+  ASSERT_EQ(aggregate.hourly().by_hour_id().at(kHourId).string_hashes_size(), 5u);
+  ASSERT_EQ(aggregate.hourly().by_hour_id().at(kHourId).string_hashes_ff64_size(), 0u);
   const SystemProfileAggregate& system_profile_agg =
       aggregate.hourly().by_hour_id().at(kHourId).system_profile_aggregates(0);
   EXPECT_EQ(system_profile_agg.system_profile_hash(), system_profile_hash);
@@ -138,7 +178,7 @@
   EXPECT_EQ(merged_data.string_histogram().histogram().at(2), 40);
 }
 
-TEST_F(StringHistogramAggregationProcedureTest, GenerateObservationWorks) {
+TEST_F(StringHistogramAggregationProcedureTest, GenerateObservationFF64Works) {
   util::PinnedUniquePtr<AggregationProcedure> procedure(
       GetProcedureFor(kStringMetricMetricId, kStringMetricStringCountsReportIndex));
 
@@ -175,6 +215,66 @@
   std::vector<std::string> expected_hashes;
   expected_hashes.reserve(kTestStrings.size());
   for (const std::string& string : kTestStrings) {
+    expected_hashes.push_back(util::FarmhashFingerprint64(string));
+  }
+
+  for (const IndexHistogram& value : histogram.string_histograms()) {
+    for (int i = 0; i < value.bucket_indices_size(); i++) {
+      ASSERT_EQ(value.bucket_counts(i), 1);
+
+      ASSERT_THAT(expected_hashes, Contains(histogram.string_hashes_ff64(value.bucket_indices(i))));
+    }
+  }
+  // Check that obsolete aggregates get cleaned up.
+  procedure->ObservationsCommitted(aggregate, util::TimeInfo::FromHourId(kEndHourId),
+                                   system_profile_hash);
+  ASSERT_EQ(aggregate.hourly().by_hour_id_size(), 0);
+}
+
+// Test that observation are generated using legacy hash
+TEST_F(StringHistogramAggregationProcedureTest, GenerateObservationLegacyWorks) {
+  util::PinnedUniquePtr<AggregationProcedure> procedure(
+      GetProcedureFor(kStringMetricMetricId, kStringMetricStringCountsReportIndex));
+
+  ReportAggregate aggregate;
+  const uint32_t kNumEventCodes = 10;
+  const uint32_t kEndHourId = 11;
+  const uint64_t system_profile_hash = uint64_t{111111};
+  const std::vector<std::string> kTestStrings = {
+      "Nunc dictum justo ac arcu.",
+      "Suspendisse ullamcorper mi vel pulvinar dictum.",
+      "Aenean feugiat consectetur vestibulum.",
+      "Integer a ullamcorper dolor.",
+      "Praesent vel nulla quis metus consectetur aliquam sed ut felis.",
+  };
+
+  // Mock that legacy hash is already used in the kEndHourId aggregation period bucket.
+  AggregationPeriodBucket bucket;
+  bucket.add_string_hashes(util::FarmhashFingerprint(kTestStrings[0]));
+  aggregate.mutable_hourly()->mutable_by_hour_id()->insert({kEndHourId, bucket});
+
+  for (int hour_id = 1; hour_id <= kEndHourId; hour_id += 2) {
+    LogStringEvents(hour_id, kNumEventCodes, kTestStrings, system_profile_hash, *procedure,
+                    aggregate);
+  }
+
+  lib::statusor::StatusOr<std::vector<ObservationAndSystemProfile>> observations_or =
+      procedure->GenerateObservations(util::TimeInfo::FromHourId(kEndHourId), aggregate);
+  ASSERT_EQ(observations_or.status().error_code(), StatusCode::OK);
+  std::vector<ObservationAndSystemProfile> observations = std::move(observations_or).value();
+
+  // Should only generate for kEndHourId
+  ASSERT_EQ(observations.size(), 1u);
+  EXPECT_EQ(observations[0].system_profile_hash, system_profile_hash);
+
+  ASSERT_EQ(observations[0].observation->string_histogram().string_histograms_size(),
+            kNumEventCodes);
+
+  const StringHistogramObservation& histogram = observations[0].observation->string_histogram();
+
+  std::vector<std::string> expected_hashes;
+  expected_hashes.reserve(kTestStrings.size());
+  for (const std::string& string : kTestStrings) {
     expected_hashes.push_back(util::FarmhashFingerprint(string));
   }
 
diff --git a/src/local_aggregation/local_aggregate_storage/BUILD.gn b/src/local_aggregation/local_aggregate_storage/BUILD.gn
index b1c2f43..2641692 100644
--- a/src/local_aggregation/local_aggregate_storage/BUILD.gn
+++ b/src/local_aggregation/local_aggregate_storage/BUILD.gn
@@ -24,6 +24,7 @@
     "$cobalt_root/src/lib/util:hash",
     "$cobalt_root/src/lib/util:not_null",
     "$cobalt_root/src/lib/util:protected_fields",
+    "$cobalt_root/src/lib/util:thread",
     "$cobalt_root/src/local_aggregation:proto",
     "$cobalt_root/src/local_aggregation/aggregation_procedures",
     "$cobalt_root/src/logger:internal_metrics",
diff --git a/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage.cc b/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage.cc
index b8b45a3..34652d2 100644
--- a/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage.cc
+++ b/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage.cc
@@ -8,11 +8,13 @@
 #include <set>
 
 #include "src/lib/util/file_system.h"
+#include "src/lib/util/thread.h"
 #include "src/local_aggregation/local_aggregate_storage/local_aggregate_storage.h"
 #include "src/local_aggregation/local_aggregation.pb.h"
 #include "src/logger/internal_metrics.h"
 #include "src/logger/project_context.h"
 #include "src/logger/project_context_factory.h"
+#include "src/logging.h"
 #include "src/public/lib/registry_identifiers.h"
 #include "src/public/lib/status.h"
 #include "src/public/lib/statusor/statusor.h"
@@ -42,6 +44,7 @@
 
   std::thread t([this] { this->Run(); });
   writeback_thread_ = std::move(t);
+  util::NameThread("writeback-thread", writeback_thread_);
 }
 
 DelayedLocalAggregateStorage::~DelayedLocalAggregateStorage() {
@@ -77,7 +80,6 @@
   aggregates_.clear_metric_aggregates();
   aggregates_.clear_filtered_system_profiles();
   state_.lock()->data_modified = true;
-  WaitUntilSave(writeback_frequency_ * 2);
 }
 
 void DelayedLocalAggregateStorage::MigrateStoreStructure() {
diff --git a/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage.h b/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage.h
index d86ff4a..7265ddf 100644
--- a/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage.h
+++ b/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage.h
@@ -100,8 +100,8 @@
   // DeleteOutdatedMetrics walks the filesystem from the |base_directory_| down and deletes
   // MetricAggregate files, and project directories that do not exist in the CobaltRegistry.
   //
-  // TODO(fxbug.dev/51390): Customers that are not present in the registry should be deleted
-  // too.
+  // TODO(https://fxbug.dev/42128576): Customers that are not present in the registry should be
+  // deleted too.
   void DeleteOutdatedMetrics();
 
   // MigrateStoredData updates the data in the aggregate storage to reflect changes that have
diff --git a/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage_test.cc b/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage_test.cc
index 222a2be..8211e87 100644
--- a/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage_test.cc
+++ b/src/local_aggregation/local_aggregate_storage/delayed_local_aggregate_storage_test.cc
@@ -16,6 +16,7 @@
 #include "src/lib/util/not_null.h"
 #include "src/lib/util/testing/test_with_files.h"
 #include "src/local_aggregation/local_aggregate_storage/local_aggregate_storage.h"
+#include "src/local_aggregation/local_aggregation.pb.h"
 #include "src/local_aggregation/testing/test_registry.cb.h"
 #include "src/logger/project_context_factory.h"
 #include "src/logging.h"
@@ -504,6 +505,51 @@
   ASSERT_FALSE(has_data);
 }
 
+TEST_F(DelayedLocalAggregateStorageTest, DeleteDataIsFast) {
+  std::promise<bool> complete;
+
+  // Run the test in a thread in case it hangs.
+  std::thread thread = std::thread([this, &complete]() {
+    GlobalAggregates aggregates;
+    MetricAggregateEntry* entry = aggregates.add_metric_aggregates();
+    entry->set_customer_id(123);
+    entry->set_project_id(100);
+    entry->set_metric_id(1);
+
+    // Write the updated local aggregate storage file and recreate the storage so it reads the file.
+    Status status = fs().Write(local_aggregation_store_path(), aggregates);
+    ASSERT_EQ(status.error_code(), StatusCode::OK);
+
+    InitStorage();
+    storage_->DeleteData();
+    storage_->WaitUntilSave(kMaxWait);
+
+    // Load the written global aggregate file.
+    GlobalAggregates post_deletion_aggregates;
+    status = fs().Read(local_aggregation_store_path(), &post_deletion_aggregates);
+    ASSERT_EQ(status.error_code(), StatusCode::OK);
+
+    bool has_data = false;
+    for (MetricAggregateEntry& entry : *post_deletion_aggregates.mutable_metric_aggregates()) {
+      if (entry.customer_id() == 123 && entry.project_id() == 100 && entry.metric_id() == 1) {
+        has_data = true;
+      }
+    }
+    ASSERT_FALSE(has_data);
+
+    complete.set_value(true);
+  });
+
+  // If the promise isn't resolved after 10 seconds, a deadlock has likely occurred.
+  if (complete.get_future().wait_for(std::chrono::seconds(10)) == std::future_status::timeout) {
+    ADD_FAILURE() << "Deadlock found";
+    thread.detach();
+    return;
+  }
+
+  thread.join();
+}
+
 TEST_F(DelayedLocalAggregateStorageTest, ShutDownIsFast) {
   // Construct a storage with an extremely long writeback frequency.
   InitStorage(GetRegistry(), 0, std::chrono::hours(99999));
diff --git a/src/local_aggregation/local_aggregate_storage/immediate_local_aggregate_storage.cc b/src/local_aggregation/local_aggregate_storage/immediate_local_aggregate_storage.cc
index 574b15c..99c0922 100644
--- a/src/local_aggregation/local_aggregate_storage/immediate_local_aggregate_storage.cc
+++ b/src/local_aggregation/local_aggregate_storage/immediate_local_aggregate_storage.cc
@@ -32,7 +32,7 @@
   if (fs_.FileExists(filtered_system_profiles_file)) {
     auto status = proto_store_.Read(filtered_system_profiles_file, &filtered_system_profiles_);
     if (!status.ok()) {
-      // TODO(b/278914567): record metric for tracking file system read errors.
+      // TODO(https://fxbug.dev/278914567): record metric for tracking file system read errors.
       LOG(ERROR) << "Failed to load the stored FilteredSystemProfiles from file '"
                  << filtered_system_profiles_file << "':" << status;
       filtered_system_profiles_.clear_by_system_profile_hash();
diff --git a/src/local_aggregation/local_aggregate_storage/immediate_local_aggregate_storage.h b/src/local_aggregation/local_aggregate_storage/immediate_local_aggregate_storage.h
index 8ae1d0a..f60c6a2 100644
--- a/src/local_aggregation/local_aggregate_storage/immediate_local_aggregate_storage.h
+++ b/src/local_aggregation/local_aggregate_storage/immediate_local_aggregate_storage.h
@@ -85,8 +85,8 @@
   // DeleteOutdatedMetrics walks the filesystem from the |base_directory_| down and deletes
   // MetricAggregate files, and project directories that do not exist in the CobaltRegistry.
   //
-  // TODO(fxbug.dev/51390): Customers that are not present in the registry should be deleted
-  // too.
+  // TODO(https://fxbug.dev/42128576): Customers that are not present in the registry should be
+  // deleted too.
   void DeleteOutdatedMetrics();
 
   // MigrateStoredData updates the data in the aggregate storage to reflect changes that have
diff --git a/src/local_aggregation/local_aggregate_storage/local_aggregate_storage.cc b/src/local_aggregation/local_aggregate_storage/local_aggregate_storage.cc
index c90fe1d..196f2e5 100644
--- a/src/local_aggregation/local_aggregate_storage/local_aggregate_storage.cc
+++ b/src/local_aggregation/local_aggregate_storage/local_aggregate_storage.cc
@@ -118,7 +118,7 @@
   bool changed = false;
 
   if (metric.deprecated_by_system_profile_size() > 0) {
-    // NOTE(fxbug.dev/87271): This is needed to clean up excess by_system_profiles in the aggregate
+    // NOTE(fxbug.dev/42168374): This is needed to clean up excess by_system_profiles in the aggregate
     // store. Do not delete unless you are *certain* there are no devices in the wild that still
     // need this cleanup
     metric.clear_deprecated_by_system_profile();
diff --git a/src/local_aggregation/local_aggregation.proto b/src/local_aggregation/local_aggregation.proto
index c43c4f1..1a621ca 100644
--- a/src/local_aggregation/local_aggregation.proto
+++ b/src/local_aggregation/local_aggregation.proto
@@ -102,6 +102,10 @@
   // DEPRECATED: use by_event_code in the appropriate SystemProfileAggregate.
   repeated EventCodesAggregateData deprecated_by_event_code = 4;
 
+  // TODO(https://fxbug.dev/322409910): Delete string_hashes after clients
+  // no longer store the field.
+  repeated bytes string_hashes = 2 [deprecated = true];
+
   // Used for metrics of type STRING only. The list is append only, as the
   // indexes into the list appear below in the StringHistogram.histogram map.
   // This list should not exceed string_buffer_max entries per aggregation
@@ -110,7 +114,12 @@
   // entries may be stored in different buckets for days that make up the
   // aggregation period. Apply string_buffer_max limits to the combination of
   // all the string_hashes found in all the aggregation period buckets.
-  repeated bytes string_hashes = 2;
+  //
+  // Only one of `string_hashes` or `string_hashes_ff64` should be used.
+  //
+  // Note, Cobalt local aggregation does not yet support this field and it shouldn't be
+  // used.
+  repeated bytes string_hashes_ff64 = 6;
 
   // Aggregates for this report, with the filtered system profile they apply
   // to. SELECT_FIRST and SELECT_LAST reports will have only a single entry.
diff --git a/src/local_aggregation/observation_generator.cc b/src/local_aggregation/observation_generator.cc
index 6bb0715..8510153 100644
--- a/src/local_aggregation/observation_generator.cc
+++ b/src/local_aggregation/observation_generator.cc
@@ -13,10 +13,12 @@
 #include "src/lib/util/clock.h"
 #include "src/lib/util/datetime_util.h"
 #include "src/lib/util/not_null.h"
+#include "src/lib/util/thread.h"
 #include "src/local_aggregation/aggregation_procedures/aggregation_procedure.h"
 #include "src/local_aggregation/backfill_manager.h"
 #include "src/local_aggregation/local_aggregate_storage/local_aggregate_storage.h"
 #include "src/logger/observation_writer.h"
+#include "src/logging.h"
 #include "src/pb/observation_batch.pb.h"
 #include "src/public/lib/status_codes.h"
 #include "src/public/lib/statusor/status_macros.h"
@@ -61,6 +63,7 @@
   locked->shut_down = false;
   LOG(INFO) << "Starting ObservationGenerator Worker Thread";
   worker_thread_ = std::thread([this, clock]() mutable { this->Run(clock); });
+  util::NameThread("observation-generation-thread", worker_thread_);
 }
 
 void ObservationGenerator::ShutDown() {
@@ -233,7 +236,8 @@
                           privacy_encoder_->MaybeMakePrivateObservations(nullptr, metric, report));
 
       // Use the current system profile if any fabricated private observations are created.
-      // TODO(b/278930562): choose plausible SystemProfiles for fabricated observations.
+      // TODO(https://fxbug.dev/278930562): choose plausible SystemProfiles for fabricated
+      // observations.
       CB_RETURN_IF_ERROR(WriteObservations(report_aggregate, procedure, time_info, metric_ref,
                                            report, private_observations,
                                            current_filtered_system_profile, std::nullopt));
diff --git a/src/local_aggregation/testing/test_registry/CustomerA/ProjectA1/metrics.yaml b/src/local_aggregation/testing/test_registry/CustomerA/ProjectA1/metrics.yaml
index df84ac7..dbb59e5 100644
--- a/src/local_aggregation/testing/test_registry/CustomerA/ProjectA1/metrics.yaml
+++ b/src/local_aggregation/testing/test_registry/CustomerA/ProjectA1/metrics.yaml
@@ -14,7 +14,7 @@
         privacy_level: NO_ADDED_PRIVACY
         event_vector_buffer_max: 100
         system_profile_field: [OS, SYSTEM_VERSION]
-        # TODO(fxbug.dev/85440): This is an invalid configuration.
+        # TODO(https://fxbug.dev/42166340): This is an invalid configuration.
         # system_profile_selection should not be set for
         # FLEETWIDE_OCCURRENCE_COUNTS metrics, and requires setting
         # skip_validation = true in BUILD.gn
diff --git a/src/logger/BUILD.gn b/src/logger/BUILD.gn
index 2b69b08..1fd1368 100644
--- a/src/logger/BUILD.gn
+++ b/src/logger/BUILD.gn
@@ -55,6 +55,7 @@
   deps = [
     "$cobalt_root/src/lib/util:status_builder",
     "$cobalt_root/src/public/lib:registry_identifiers",
+    "//third_party/abseil-cpp/absl/strings",
   ]
   public_deps = [
     "$cobalt_root/src:logging",
@@ -455,6 +456,7 @@
     "$cobalt_root/src/lib/util:hash",
     "$cobalt_root/src/public/lib/statusor",
     "$cobalt_root/src/registry:cobalt_registry_proto",
+    "//third_party/googletest:gmock",
     "//third_party/googletest:gtest",
   ]
 }
diff --git a/src/logger/encoder.cc b/src/logger/encoder.cc
index 68d28dd..410f233 100644
--- a/src/logger/encoder.cc
+++ b/src/logger/encoder.cc
@@ -73,14 +73,18 @@
 
 lib::statusor::StatusOr<std::unique_ptr<Observation>> EncodeStringHistogramObservation(
     const std::vector<std::string>& hashes,
-    const std::vector<std::tuple<EventCodes, Histogram>>& data) {
+    const std::vector<std::tuple<EventCodes, Histogram>>& data, bool use_legacy_hash) {
   auto observation = std::make_unique<Observation>();
 
   StringHistogramObservation* string_histogram_observation =
       observation->mutable_string_histogram();
 
   for (const std::string& hash : hashes) {
-    string_histogram_observation->add_string_hashes(hash);
+    if (use_legacy_hash) {
+      string_histogram_observation->add_string_hashes(hash);
+    } else {
+      string_histogram_observation->add_string_hashes_ff64(hash);
+    }
   }
 
   for (const auto& [event_codes, histogram] : data) {
diff --git a/src/logger/encoder.h b/src/logger/encoder.h
index 1f99f1c..530bdfa 100644
--- a/src/logger/encoder.h
+++ b/src/logger/encoder.h
@@ -49,9 +49,15 @@
 // data: A vector of (event_codes, histogram vector of (index, count)) pairs that will be used to
 // encode the Observation. The bucket with index i in the histogram contains the count for the
 // string whose hash is in position i in the hashes vector.
+// use_legacy_hash: a boolean indicating if legacy hash function (Farmhash Fingerprint 128) is used
+// to produce the string hashes.
+//
+// TODO(https://fxbug.dev/322409910): Delete usage of legacy hash after clients stop sending the
+// filed.
 [[nodiscard]] lib::statusor::StatusOr<std::unique_ptr<Observation>>
 EncodeStringHistogramObservation(const std::vector<std::string>& hashes,
-                                 const std::vector<std::tuple<EventCodes, Histogram>>& data);
+                                 const std::vector<std::tuple<EventCodes, Histogram>>& data,
+                                 bool use_legacy_hash);
 
 }  // namespace cobalt::logger::encoder
 
diff --git a/src/logger/encoder_test.cc b/src/logger/encoder_test.cc
index bbf3228..7ed882f 100644
--- a/src/logger/encoder_test.cc
+++ b/src/logger/encoder_test.cc
@@ -105,13 +105,15 @@
   }
 }
 
-TEST(Encoder, EncodeStringHistogramObservationSingleValue) {
+// TODO(https://fxbug.dev/322409910): Delete this test after clients stop using legacy hash.
+TEST(Encoder, EncodeStringHistogramObservationSingleValueLegacy) {
   StatusOr<std::unique_ptr<Observation>> obs_or =
-      EncodeStringHistogramObservation({"4"}, {{{1, 2, 3}, {{4, 5}}}});
+      EncodeStringHistogramObservation({"4"}, {{{1, 2, 3}, {{4, 5}}}}, /*use_legacy_hash=*/true);
   ASSERT_EQ(obs_or.status(), Status::OkStatus());
   std::unique_ptr<Observation> obs = std::move(obs_or.value());
   ASSERT_TRUE(obs->has_string_histogram());
   ASSERT_EQ(obs->string_histogram().string_hashes_size(), 1);
+  ASSERT_EQ(obs->string_histogram().string_hashes_ff64_size(), 0);
   ASSERT_EQ(obs->string_histogram().string_hashes(0), "4");
   ASSERT_EQ(obs->string_histogram().string_histograms_size(), 1);
   EXPECT_EQ(obs->string_histogram().string_histograms(0).event_codes_size(), 3);
@@ -121,17 +123,37 @@
   EXPECT_EQ(obs->string_histogram().string_histograms(0).bucket_counts(0), 5);
 }
 
-TEST(Encoder, EncodeStringHistogramObservationMultipleValues) {
+TEST(Encoder, EncodeStringHistogramObservationSingleValueFF64) {
+  StatusOr<std::unique_ptr<Observation>> obs_or =
+      EncodeStringHistogramObservation({"4"}, {{{1, 2, 3}, {{4, 5}}}}, /*use_legacy_hash=*/false);
+  ASSERT_EQ(obs_or.status(), Status::OkStatus());
+  std::unique_ptr<Observation> obs = std::move(obs_or.value());
+  ASSERT_TRUE(obs->has_string_histogram());
+  ASSERT_EQ(obs->string_histogram().string_hashes_size(), 0);
+  ASSERT_EQ(obs->string_histogram().string_hashes_ff64_size(), 1);
+  ASSERT_EQ(obs->string_histogram().string_hashes_ff64(0), "4");
+  ASSERT_EQ(obs->string_histogram().string_histograms_size(), 1);
+  EXPECT_EQ(obs->string_histogram().string_histograms(0).event_codes_size(), 3);
+  ASSERT_EQ(obs->string_histogram().string_histograms(0).bucket_indices_size(), 1);
+  EXPECT_EQ(obs->string_histogram().string_histograms(0).bucket_indices(0), 4u);
+  ASSERT_EQ(obs->string_histogram().string_histograms(0).bucket_counts_size(), 1);
+  EXPECT_EQ(obs->string_histogram().string_histograms(0).bucket_counts(0), 5);
+}
+
+// TODO(https://fxbug.dev/322409910): Delete this test after clients stop using legacy hash.
+TEST(Encoder, EncodeStringHistogramObservationMultipleValuesLegacy) {
   StatusOr<std::unique_ptr<Observation>> obs_or =
       EncodeStringHistogramObservation({"1", "2", "3", "4", "5", "6", "7", "8", "10"},
                                        {{{}, {{1, 2}}},
                                         {{1}, {{2, 3}, {4, 5}}},
                                         {{1, 2}, {{3, 4}, {5, 6}, {7, 8}}},
-                                        {{1, 2, 3}, {{4, 5}, {6, 7}, {8, 9}, {10, 11}}}});
+                                        {{1, 2, 3}, {{4, 5}, {6, 7}, {8, 9}, {10, 11}}}},
+                                       /*use_legacy_hash=*/true);
   ASSERT_EQ(obs_or.status(), Status::OkStatus());
   std::unique_ptr<Observation> obs = std::move(obs_or.value());
   ASSERT_TRUE(obs->has_string_histogram());
   ASSERT_EQ(obs->string_histogram().string_hashes_size(), 9);
+  ASSERT_EQ(obs->string_histogram().string_hashes_ff64_size(), 0);
   ASSERT_EQ(obs->string_histogram().string_hashes(0), "1");
   ASSERT_EQ(obs->string_histogram().string_histograms_size(), 4);
   for (int i = 0; i < 4; i++) {
@@ -146,4 +168,31 @@
   }
 }
 
+TEST(Encoder, EncodeStringHistogramObservationMultipleValuesFF64) {
+  StatusOr<std::unique_ptr<Observation>> obs_or =
+      EncodeStringHistogramObservation({"1", "2", "3", "4", "5", "6", "7", "8", "10"},
+                                       {{{}, {{1, 2}}},
+                                        {{1}, {{2, 3}, {4, 5}}},
+                                        {{1, 2}, {{3, 4}, {5, 6}, {7, 8}}},
+                                        {{1, 2, 3}, {{4, 5}, {6, 7}, {8, 9}, {10, 11}}}},
+                                       /*use_legacy_hash=*/false);
+  ASSERT_EQ(obs_or.status(), Status::OkStatus());
+  std::unique_ptr<Observation> obs = std::move(obs_or.value());
+  ASSERT_TRUE(obs->has_string_histogram());
+  ASSERT_EQ(obs->string_histogram().string_hashes_size(), 0);
+  ASSERT_EQ(obs->string_histogram().string_hashes_ff64_size(), 9);
+  ASSERT_EQ(obs->string_histogram().string_hashes_ff64(0), "1");
+  ASSERT_EQ(obs->string_histogram().string_histograms_size(), 4);
+  for (int i = 0; i < 4; i++) {
+    EXPECT_EQ(obs->string_histogram().string_histograms(i).event_codes_size(), i);
+    ASSERT_EQ(obs->string_histogram().string_histograms(i).bucket_indices_size(), i + 1);
+    ASSERT_EQ(obs->string_histogram().string_histograms(i).bucket_counts_size(), i + 1);
+    for (int j = 0; j < i + 1; j++) {
+      EXPECT_EQ(obs->string_histogram().string_histograms(i).bucket_indices(j),
+                static_cast<uint32_t>(i + j * 2 + 1));
+      EXPECT_EQ(obs->string_histogram().string_histograms(i).bucket_counts(j), i + j * 2 + 2);
+    }
+  }
+}
+
 }  // namespace cobalt::logger::encoder
diff --git a/src/logger/fake_logger.cc b/src/logger/fake_logger.cc
index 1d8edd5..fc96138 100644
--- a/src/logger/fake_logger.cc
+++ b/src/logger/fake_logger.cc
@@ -38,7 +38,7 @@
 
 }  // namespace
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 Status FakeLogger::LogOccurrence(uint32_t metric_id, uint64_t count,
                                  const std::vector<uint32_t>& event_codes) {
   call_count_ += 1;
@@ -53,7 +53,7 @@
   return Status::OkStatus();
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 Status FakeLogger::LogInteger(uint32_t metric_id, int64_t value,
                               const std::vector<uint32_t>& event_codes) {
   call_count_ += 1;
diff --git a/src/logger/internal_metrics.h b/src/logger/internal_metrics.h
index e570da6..fa84e6b 100644
--- a/src/logger/internal_metrics.h
+++ b/src/logger/internal_metrics.h
@@ -161,7 +161,7 @@
   void SetSoftwareDistributionInfoCalled(
       SetSoftwareDistributionInfoCalledMigratedEventCodes event_codes) override {}
 
-  // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+  // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
   void TrackDiskUsage(StorageClass storage_class, size_t bytes, int64_t max_bytes) override {}
 
   void LocalAggregationQuotaEvent(const lib::ProjectIdentifier& project_identifier,
diff --git a/src/logger/logger.cc b/src/logger/logger.cc
index 84027a2..fe99ee3 100644
--- a/src/logger/logger.cc
+++ b/src/logger/logger.cc
@@ -70,7 +70,7 @@
   is_internal_logger_ = !internal_metrics;
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 Status Logger::LogOccurrence(uint32_t metric_id, uint64_t count,
                              const std::vector<uint32_t>& event_codes) {
   InternalMetrics::InternalMetricsFlusher flusher = internal_metrics_.Flusher();
@@ -82,7 +82,7 @@
   return Log(metric_id, MetricDefinition::OCCURRENCE, std::move(event_record));
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 Status Logger::LogInteger(uint32_t metric_id, int64_t value,
                           const std::vector<uint32_t>& event_codes) {
   InternalMetrics::InternalMetricsFlusher flusher = internal_metrics_.Flusher();
@@ -117,7 +117,7 @@
   return Log(metric_id, MetricDefinition::STRING, std::move(event_record));
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 Status Logger::LogOccurrence(uint32_t metric_id, uint64_t count,
                              const std::vector<uint32_t>& event_codes,
                              const std::chrono::system_clock::time_point& event_timestamp) {
@@ -131,7 +131,7 @@
                         event_timestamp);
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 Status Logger::LogInteger(uint32_t metric_id, int64_t value,
                           const std::vector<uint32_t>& event_codes,
                           const std::chrono::system_clock::time_point& event_timestamp) {
diff --git a/src/logger/logger_test.cc b/src/logger/logger_test.cc
index 9095e6b..3a870cd 100644
--- a/src/logger/logger_test.cc
+++ b/src/logger/logger_test.cc
@@ -223,7 +223,7 @@
                 ->LogInteger(testing::all_report_types::kNewLoginModuleFrameRateMetricId, 5123,
                              std::vector<uint32_t>({}))
                 .error_code());
-  // TODO(b/278924711): once local_aggregation supports testing, enable this.
+  // TODO(https://fxbug.dev/278924711): once local_aggregation supports testing, enable this.
   // EXPECT_TRUE(CheckNumericEventObservations(expected_report_ids, 0u, "", 5123,
   //                                           *observation_store_, update_recipient_.get()));
   ResetObservationStore();
@@ -241,7 +241,7 @@
                 ->LogInteger(testing::all_report_types::kNewLoginModuleFrameRateMetricId, 5123,
                              std::vector<uint32_t>({45}))
                 .error_code());
-  // TODO(b/278924711): once local_aggregation supports testing, enable this.
+  // TODO(https://fxbug.dev/278924711): once local_aggregation supports testing, enable this.
   // EXPECT_TRUE(CheckNumericEventObservations(expected_report_ids, 45u, "", 5123,
   //                                          *observation_store_, update_recipient_.get()));
 }
@@ -255,7 +255,7 @@
                 ->LogIntegerHistogram(testing::all_report_types::kNewFileSystemWriteTimesMetricId,
                                       std::move(histogram), std::vector<uint32_t>({47}))
                 .error_code());
-  // TODO(b/278924711): once local_aggregation supports testing, enable this.
+  // TODO(https://fxbug.dev/278924711): once local_aggregation supports testing, enable this.
   // Observation observation;
   // uint32_t expected_report_id =
   //     testing::all_report_types::kNewFileSystemWriteTimesNewFileSystemWriteTimesHistogramReportId;
@@ -300,7 +300,7 @@
                                             "component4", std::vector<uint32_t>({}))
                                 .error_code());
 
-  // TODO(b/278924711): once local_aggregation supports testing, enable this.
+  // TODO(https://fxbug.dev/278924711): once local_aggregation supports testing, enable this.
   // EXPECT_TRUE(CheckNumericEventObservations(expected_report_ids, 0u, "component4", 4004,
   //                                           *observation_store_, update_recipient_.get()));
 }
diff --git a/src/logger/logger_test_utils.h b/src/logger/logger_test_utils.h
index 6b35642..b925f1d 100644
--- a/src/logger/logger_test_utils.h
+++ b/src/logger/logger_test_utils.h
@@ -33,7 +33,7 @@
 using ExpectedReportParticipationObservations = std::set<std::pair<MetricReportId, uint32_t>>;
 
 // A mock ObservationStore.
-// TODO(b/278925674): Move this to a more appropriate location (Perhaps in
+// TODO(https://fxbug.dev/278925674): Move this to a more appropriate location (Perhaps in
 // src/observation_store)
 class FakeObservationStore : public ::cobalt::observation_store::ObservationStoreWriterInterface {
  public:
diff --git a/src/logger/privacy_encoder.cc b/src/logger/privacy_encoder.cc
index de113b8..8754311 100644
--- a/src/logger/privacy_encoder.cc
+++ b/src/logger/privacy_encoder.cc
@@ -9,14 +9,9 @@
 #include "src/pb/observation.pb.h"
 #include "src/public/lib/statusor/status_macros.h"
 
+using google::protobuf::RepeatedPtrField;
+
 namespace cobalt::logger {
-namespace {
-
-// The dimensions of a CountMin sketch for a report of type StringCounts.
-const size_t kNumCountMinCellsPerHash = 10;
-const size_t kNumCountMinHashes = 5;
-
-}  // namespace
 
 PrivacyEncoder::PrivacyEncoder(std::unique_ptr<SecureBitGeneratorInterface<uint32_t>> secure_gen,
                                std::unique_ptr<BitGeneratorInterface<uint32_t>> gen)
@@ -145,7 +140,7 @@
   switch (report_def.report_type()) {
     case ReportDefinition::STRING_COUNTS:
     case ReportDefinition::UNIQUE_DEVICE_STRING_COUNTS: {
-      return kNumCountMinCellsPerHash;
+      return report_def.string_sketch_params().num_cells_per_hash();
     }
     default: {
       return Status(StatusCode::INVALID_ARGUMENT,
@@ -159,7 +154,7 @@
   switch (report_def.report_type()) {
     case ReportDefinition::STRING_COUNTS:
     case ReportDefinition::UNIQUE_DEVICE_STRING_COUNTS: {
-      return kNumCountMinHashes;
+      return report_def.string_sketch_params().num_hashes();
     }
     default: {
       return Status(StatusCode::INVALID_ARGUMENT,
@@ -393,10 +388,15 @@
                                       string_histogram.event_codes().end());
     CB_ASSIGN_OR_RETURN(auto event_vector_index, EventVectorToIndex(event_codes, metric_def));
 
+    // TODO(https://fxbug.dev/322409910): Delete usage of legacy hash after clients no longer store
+    // them. Use legacy hashes if they're present. Use Farmhash Fingerprint 64 hashes otherwise.
+    const RepeatedPtrField<std::string> &string_hashes =
+        observation.string_histogram().string_hashes().empty()
+            ? observation.string_histogram().string_hashes_ff64()
+            : observation.string_histogram().string_hashes();
     CB_ASSIGN_OR_RETURN(
         CountMin<uint64_t> count_min,
-        MakeCountMinSketch(string_histogram, observation.string_histogram().string_hashes(),
-                           num_cells_per_hash, num_hashes));
+        MakeCountMinSketch(string_histogram, string_hashes, num_cells_per_hash, num_hashes));
 
     for (size_t cell_index = 0; cell_index < count_min.size(); ++cell_index) {
       CB_ASSIGN_OR_RETURN(uint64_t cell_value, count_min.GetCellValue(cell_index));
@@ -429,10 +429,15 @@
                                       string_histogram.event_codes().end());
     CB_ASSIGN_OR_RETURN(auto event_vector_index, EventVectorToIndex(event_codes, metric_def));
 
+    // TODO(https://fxbug.dev/322409910): Delete usage of legacy hash after clients no longer store
+    // them. Use legacy hashes if they're present. Use Farmhash Fingerprint 64 hashes otherwise.
+    const RepeatedPtrField<std::string> &string_hashes =
+        observation.string_histogram().string_hashes().empty()
+            ? observation.string_histogram().string_hashes_ff64()
+            : observation.string_histogram().string_hashes();
     CB_ASSIGN_OR_RETURN(
         CountMin<uint64_t> count_min,
-        MakeCountMinSketch(string_histogram, observation.string_histogram().string_hashes(),
-                           num_cells_per_hash, num_hashes));
+        MakeCountMinSketch(string_histogram, string_hashes, num_cells_per_hash, num_hashes));
 
     for (size_t cell_index = 0; cell_index < count_min.size(); ++cell_index) {
       CB_ASSIGN_OR_RETURN(uint64_t cell_value, count_min.GetCellValue(cell_index));
@@ -464,9 +469,8 @@
 }
 
 lib::statusor::StatusOr<CountMin<uint64_t>> PrivacyEncoder::MakeCountMinSketch(
-    const IndexHistogram &string_histogram,
-    const google::protobuf::RepeatedPtrField<std::string> &string_hashes, size_t num_cells_per_hash,
-    size_t num_hashes) {
+    const IndexHistogram &string_histogram, const RepeatedPtrField<std::string> &string_hashes,
+    size_t num_cells_per_hash, size_t num_hashes) {
   auto count_min = CountMin<uint64_t>::MakeSketch(num_cells_per_hash, num_hashes);
   for (int i = 0; i < string_histogram.bucket_indices_size(); ++i) {
     const std::string &string_hash =
diff --git a/src/logger/privacy_encoder_test.cc b/src/logger/privacy_encoder_test.cc
index 992a280..fb61c66 100644
--- a/src/logger/privacy_encoder_test.cc
+++ b/src/logger/privacy_encoder_test.cc
@@ -2,6 +2,7 @@
 
 #include <algorithm>
 
+#include <gmock/gmock.h>
 #include <gtest/gtest.h>
 
 #include "src/algorithms/privacy/count_min.h"
@@ -104,6 +105,10 @@
   std::unique_ptr<PrivacyEncoder> privacy_encoder_;
 };
 
+namespace {
+
+using testing::UnorderedElementsAreArray;
+
 TEST_F(PrivacyEncoderTest, MaybeMakePrivateObservationsNoAddedPrivacyReport) {
   MetricDefinition metric_def;
   ReportDefinition report_def;
@@ -509,11 +514,27 @@
   EXPECT_EQ(indices.value(), expected_indices);
 }
 
+namespace {
+
+// Increments cell at index |index| in |sketch| by |count|.
+void IncrementCellBy(std::unordered_map<size_t, uint64_t> &sketch, size_t index, uint64_t count) {
+  auto iter = sketch.find(index);
+  if (iter == sketch.end()) {
+    sketch[index] = count;
+  } else {
+    iter->second += count;
+  }
+}
+
+}  // namespace
+
+// TODO(https://fxbug.dev/322409910): Delete this test after clients no longer store legacy string
+// hash.
 TEST_F(PrivacyEncoderTest, StringCounts) {
   size_t num_cells_per_hash = 2;
   size_t num_hashes = 2;
   uint32_t max_event_code = 1;
-  uint32_t max_count = 2;
+  uint64_t max_count = 2;
   uint32_t num_index_points = max_count + 1;
 
   // |metric_def| has 2 valid event vectors.
@@ -552,34 +573,135 @@
 
   // The general formula for an expected index is:
   // (count + num_index_points * sketch cell index) * (num_event_vectors) + event_vector_index.
-  // Cells with count 0 are omitted.
   auto count_min = CountMin<uint64_t>::MakeSketch(num_cells_per_hash, num_hashes);
-  std::vector<size_t> blobfs_indices =
-      count_min.GetCellIndices(string_histogram_obs->string_hashes(0));
-  std::vector<size_t> thinfs_indices =
-      count_min.GetCellIndices(string_histogram_obs->string_hashes(1));
+  std::unordered_map<size_t, uint64_t> sketch_0;
+  std::unordered_map<size_t, uint64_t> sketch_1;
 
-  std::vector<uint64_t> expected_private_indices;
-  expected_private_indices.reserve(6);
-
-  for (size_t index : blobfs_indices) {
-    // Expected private indices for a count of 1 for "blobfs" with event vector {0}
-    expected_private_indices.push_back((1 + num_index_points * index) * (max_event_code + 1) + 0);
+  // Increment cells for "blobfs" with event vector {0} by 1
+  for (const size_t index : count_min.GetCellIndices(string_histogram_obs->string_hashes(0))) {
+    IncrementCellBy(sketch_0, index, /*count=*/1);
+  }
+  for (const size_t index : count_min.GetCellIndices(string_histogram_obs->string_hashes(1))) {
+    // Increment cells for "thinfs" with event vector {0} by 2
+    IncrementCellBy(sketch_0, index, /*count=*/2);
+    // Increment cells for "thinfs" with event vector {1} by 2
+    IncrementCellBy(sketch_1, index, /*count=*/2);
   }
 
-  for (size_t index : thinfs_indices) {
-    // Expected private indices for a count of 2 for "thinfs" with event vector {0}
-    expected_private_indices.push_back((2 + num_index_points * index) * (max_event_code + 1) + 0);
-    // Expected private indices for a count of 2 for "thinfs" with event vector {1}
-    expected_private_indices.push_back((2 + num_index_points * index) * (max_event_code + 1) + 1);
+  std::vector<uint64_t> expected_private_indices;
+  for (const auto &[cell_index, cell_value] : sketch_0) {
+    if (cell_value == 0) {
+      continue;
+    }
+    const uint64_t clipped_count = std::clamp(cell_value, 0ul, max_count);
+
+    // Expected private indices for event vector {0}
+    expected_private_indices.push_back(
+        (clipped_count + num_index_points * cell_index) * (max_event_code + 1) + 0 /*event_code*/);
+  }
+
+  for (const auto &[cell_index, cell_value] : sketch_1) {
+    if (cell_value == 0) {
+      continue;
+    }
+    const uint64_t clipped_count = std::clamp(cell_value, 0ul, max_count);
+
+    // Expected private indices for event vector {1}
+    expected_private_indices.push_back(
+        (clipped_count + num_index_points * cell_index) * (max_event_code + 1) + 1 /*event_code*/);
   }
 
   CB_ASSERT_OK_AND_ASSIGN(std::vector<uint64_t> indices,
                           PrepareIndexVectorForStringCountsReport(
                               observation, metric_def, report_def, num_cells_per_hash, num_hashes));
-  std::sort(indices.begin(), indices.end());
-  std::sort(expected_private_indices.begin(), expected_private_indices.end());
-  EXPECT_EQ(indices, expected_private_indices);
+  EXPECT_THAT(indices, UnorderedElementsAreArray(expected_private_indices));
+}
+
+TEST_F(PrivacyEncoderTest, StringCountsFF64) {
+  size_t num_cells_per_hash = 2;
+  size_t num_hashes = 2;
+  uint32_t max_event_code = 1;
+  uint64_t max_count = 2;
+  uint32_t num_index_points = max_count + 1;
+
+  // |metric_def| has 2 valid event vectors.
+  MetricDefinition metric_def;
+  metric_def.set_metric_type(MetricDefinition::STRING);
+  MetricDefinition::MetricDimension *metric_dim = metric_def.add_metric_dimensions();
+  metric_dim->set_dimension("dimension 0");
+  metric_dim->set_max_event_code(max_event_code);
+
+  // |report_def| has 3 valid count values: {0, 1, 2}.
+  ReportDefinition report_def;
+  report_def.set_max_count(max_count);
+  report_def.set_num_index_points(num_index_points);
+  report_def.set_string_buffer_max(10);
+
+  // Prepare a StringHistogramObservation with 2 IndexHistograms:
+  // - with event vector {0}: ("blobfs", count = 1), ("thinfs", count = 2)
+  // - with event vector {1}: ("thinfs", count = 2)
+  Observation observation;
+  StringHistogramObservation *string_histogram_obs = observation.mutable_string_histogram();
+
+  string_histogram_obs->add_string_hashes_ff64(util::FarmhashFingerprint64("blobfs"));
+  string_histogram_obs->add_string_hashes_ff64(util::FarmhashFingerprint64("thinfs"));
+
+  IndexHistogram *histogram_1 = string_histogram_obs->add_string_histograms();
+  histogram_1->add_event_codes(0u);
+  histogram_1->add_bucket_indices(0u);
+  histogram_1->add_bucket_counts(1u);
+  histogram_1->add_bucket_indices(1u);
+  histogram_1->add_bucket_counts(2u);
+
+  IndexHistogram *histogram_2 = string_histogram_obs->add_string_histograms();
+  histogram_2->add_event_codes(1u);
+  histogram_2->add_bucket_indices(1u);
+  histogram_2->add_bucket_counts(2u);
+
+  // The general formula for an expected index is:
+  // (count + num_index_points * sketch cell index) * (num_event_vectors) + event_vector_index
+  auto count_min = CountMin<uint64_t>::MakeSketch(num_cells_per_hash, num_hashes);
+  std::unordered_map<size_t, uint64_t> sketch_0;
+  std::unordered_map<size_t, uint64_t> sketch_1;
+
+  // Increment cells for "blobfs" with event vector {0} by 1
+  for (const size_t index : count_min.GetCellIndices(string_histogram_obs->string_hashes_ff64(0))) {
+    IncrementCellBy(sketch_0, index, /*count=*/1);
+  }
+  for (const size_t index : count_min.GetCellIndices(string_histogram_obs->string_hashes_ff64(1))) {
+    // Increment cells for "thinfs" with event vector {0} by 2
+    IncrementCellBy(sketch_0, index, /*count=*/2);
+    // Increment cells for "thinfs" with event vector {1} by 2
+    IncrementCellBy(sketch_1, index, /*count=*/2);
+  }
+
+  std::vector<uint64_t> expected_private_indices;
+  for (const auto &[cell_index, cell_value] : sketch_0) {
+    if (cell_value == 0) {
+      continue;
+    }
+    const uint64_t clipped_count = std::clamp(cell_value, 0ul, max_count);
+
+    // Expected private indices for event vector {0}
+    expected_private_indices.push_back(
+        (clipped_count + num_index_points * cell_index) * (max_event_code + 1) + 0 /*event_code*/);
+  }
+
+  for (const auto &[cell_index, cell_value] : sketch_1) {
+    if (cell_value == 0) {
+      continue;
+    }
+    uint64_t clipped_count = std::clamp(cell_value, 0ul, max_count);
+
+    // Expected private indices for event vector {1}
+    expected_private_indices.push_back(
+        (clipped_count + num_index_points * cell_index) * (max_event_code + 1) + 1 /*event_code*/);
+  }
+
+  CB_ASSERT_OK_AND_ASSIGN(std::vector<uint64_t> indices,
+                          PrepareIndexVectorForStringCountsReport(
+                              observation, metric_def, report_def, num_cells_per_hash, num_hashes));
+  EXPECT_THAT(indices, UnorderedElementsAreArray(expected_private_indices));
 }
 
 // Checks that the `max_count` bound of a StringCounts report is enforced on each
@@ -637,6 +759,8 @@
   EXPECT_EQ(indices, std::vector<uint64_t>({expected_index}));
 }
 
+// TODO(https://fxbug.dev/322409910): Delete this test after clients no longer store legacy string
+// hash.
 TEST_F(PrivacyEncoderTest, UniqueDeviceStringCounts) {
   size_t num_cells_per_hash = 2;
   size_t num_hashes = 2;
@@ -675,34 +799,122 @@
 
   // The general formula for an expected index is:
   // sketch cell index * num_event_vectors + event_vector_index.
-  // Cells with count 0 are omitted.
-  CountMin<uint64_t> count_min = CountMin<uint64_t>::MakeSketch(num_cells_per_hash, num_hashes);
-  std::vector<size_t> blobfs_indices =
-      count_min.GetCellIndices(string_histogram_obs->string_hashes(0));
-  std::vector<size_t> thinfs_indices =
-      count_min.GetCellIndices(string_histogram_obs->string_hashes(1));
+  auto count_min = CountMin<uint64_t>::MakeSketch(num_cells_per_hash, num_hashes);
+  std::unordered_map<size_t, uint64_t> sketch_0;
+  std::unordered_map<size_t, uint64_t> sketch_1;
 
-  std::vector<uint64_t> expected_private_indices;
-  expected_private_indices.reserve(6);
-
-  for (size_t index : blobfs_indices) {
-    // Expected private indices for a count of 1 for "blobfs" with event vector {0}
-    expected_private_indices.push_back(index * (max_event_code + 1) + 0);
+  // Increment cells for "blobfs" with event vector {0} by 1
+  for (const size_t index : count_min.GetCellIndices(string_histogram_obs->string_hashes(0))) {
+    IncrementCellBy(sketch_0, index, /*count=*/1);
+  }
+  for (const size_t index : count_min.GetCellIndices(string_histogram_obs->string_hashes(1))) {
+    // Increment cells for "thinfs" with event vector {0} by 1
+    IncrementCellBy(sketch_0, index, /*count=*/1);
+    // Increment cells for "thinfs" with event vector {1} by 1
+    IncrementCellBy(sketch_1, index, /*count=*/1);
   }
 
-  for (size_t index : thinfs_indices) {
-    // Expected private indices for a count of 1 for "thinfs" with event vector {0}
-    expected_private_indices.push_back(index * (max_event_code + 1) + 0);
-    // Expected private indices for a count of 1 for "thinfs" with event vector {1}
-    expected_private_indices.push_back(index * (max_event_code + 1) + 1);
+  std::vector<uint64_t> expected_private_indices;
+  for (const auto &[cell_index, cell_value] : sketch_0) {
+    if (cell_value == 0) {
+      continue;
+    }
+
+    // Expected private indices for event vector {0}
+    expected_private_indices.push_back(cell_index * (max_event_code + 1) + 0 /*event_code*/);
+  }
+
+  for (const auto &[cell_index, cell_value] : sketch_1) {
+    if (cell_value == 0) {
+      continue;
+    }
+
+    // Expected private indices for event vector {1}
+    expected_private_indices.push_back(cell_index * (max_event_code + 1) + 1 /*event_code*/);
   }
 
   CB_ASSERT_OK_AND_ASSIGN(std::vector<uint64_t> indices,
                           PrepareIndexVectorForUniqueDeviceStringCountsReport(
                               observation, metric_def, report_def, num_cells_per_hash, num_hashes));
-  std::sort(indices.begin(), indices.end());
-  std::sort(expected_private_indices.begin(), expected_private_indices.end());
-  EXPECT_EQ(indices, expected_private_indices);
+  EXPECT_THAT(indices, UnorderedElementsAreArray(expected_private_indices));
+}
+
+TEST_F(PrivacyEncoderTest, UniqueDeviceStringCountsFF64) {
+  size_t num_cells_per_hash = 2;
+  size_t num_hashes = 2;
+  uint32_t max_event_code = 1;
+
+  // |metric_def| has 2 valid event vectors.
+  MetricDefinition metric_def;
+  metric_def.set_metric_type(MetricDefinition::STRING);
+  MetricDefinition::MetricDimension *metric_dim = metric_def.add_metric_dimensions();
+  metric_dim->set_dimension("dimension 0");
+  metric_dim->set_max_event_code(max_event_code);
+
+  ReportDefinition report_def;
+
+  // Prepare a StringHistogramObservation with 2 IndexHistograms with count 1 for each included
+  // string:
+  // - with event vector {0}: ("blobfs", count = 1), ("thinfs", count = 1)
+  // - with event vector {1}: ("thinfs", count = 1)
+  Observation observation;
+  StringHistogramObservation *string_histogram_obs = observation.mutable_string_histogram();
+
+  string_histogram_obs->add_string_hashes_ff64(util::FarmhashFingerprint64("blobfs"));
+  string_histogram_obs->add_string_hashes_ff64(util::FarmhashFingerprint64("thinfs"));
+
+  IndexHistogram *histogram_1 = string_histogram_obs->add_string_histograms();
+  histogram_1->add_event_codes(0u);
+  histogram_1->add_bucket_indices(0u);
+  histogram_1->add_bucket_counts(1u);
+  histogram_1->add_bucket_indices(1u);
+  histogram_1->add_bucket_counts(1u);
+
+  IndexHistogram *histogram_2 = string_histogram_obs->add_string_histograms();
+  histogram_2->add_event_codes(1u);
+  histogram_2->add_bucket_indices(1u);
+  histogram_2->add_bucket_counts(1u);
+
+  // The general formula for an expected index is:
+  // sketch cell index * num_event_vectors + event_vector_index.
+  auto count_min = CountMin<uint64_t>::MakeSketch(num_cells_per_hash, num_hashes);
+  std::unordered_map<size_t, uint64_t> sketch_0;
+  std::unordered_map<size_t, uint64_t> sketch_1;
+
+  // Increment cells for "blobfs" with event vector {0} by 1
+  for (const size_t index : count_min.GetCellIndices(string_histogram_obs->string_hashes_ff64(0))) {
+    IncrementCellBy(sketch_0, index, /*count=*/1);
+  }
+  for (const size_t index : count_min.GetCellIndices(string_histogram_obs->string_hashes_ff64(1))) {
+    // Increment cells for "thinfs" with event vector {0} by 1
+    IncrementCellBy(sketch_0, index, /*count=*/1);
+    // Increment cells for "thinfs" with event vector {1} by 1
+    IncrementCellBy(sketch_1, index, /*count=*/1);
+  }
+
+  std::vector<uint64_t> expected_private_indices;
+  for (const auto &[cell_index, cell_value] : sketch_0) {
+    if (cell_value == 0) {
+      continue;
+    }
+
+    // Expected private indices for event vector {0}
+    expected_private_indices.push_back(cell_index * (max_event_code + 1) + 0 /*event_code*/);
+  }
+
+  for (const auto &[cell_index, cell_value] : sketch_1) {
+    if (cell_value == 0) {
+      continue;
+    }
+
+    // Expected private indices for event vector {1}
+    expected_private_indices.push_back(cell_index * (max_event_code + 1) + 1 /*event_code*/);
+  }
+
+  CB_ASSERT_OK_AND_ASSIGN(std::vector<uint64_t> indices,
+                          PrepareIndexVectorForUniqueDeviceStringCountsReport(
+                              observation, metric_def, report_def, num_cells_per_hash, num_hashes));
+  EXPECT_THAT(indices, UnorderedElementsAreArray(expected_private_indices));
 }
 
 TEST_F(PrivacyEncoderTest, ObservationsFromIndicesNoIndices) {
@@ -934,6 +1146,8 @@
 TEST_F(PrivacyEncoderTest, MaxIndexForReportStringCounts) {
   uint64_t max_event_code = 9;
   uint32_t num_index_points = 6;
+  int32_t num_cells_per_hash = 15;
+  int32_t num_hashes = 11;
 
   // |metric_def| has 10 valid event vectors.
   MetricDefinition metric_def;
@@ -946,10 +1160,10 @@
   ReportDefinition report_def;
   report_def.set_report_type(ReportDefinition::STRING_COUNTS);
   report_def.set_num_index_points(num_index_points);
-
-  CB_ASSERT_OK_AND_ASSIGN(size_t num_cells_per_hash,
-                          PrivacyEncoder::GetNumCountMinCellsPerHash(report_def));
-  CB_ASSERT_OK_AND_ASSIGN(size_t num_hashes, PrivacyEncoder::GetNumCountMinHashes(report_def));
+  StringSketchParameters sketch_params;
+  sketch_params.set_num_cells_per_hash(num_cells_per_hash);
+  sketch_params.set_num_hashes(num_hashes);
+  *report_def.mutable_string_sketch_params() = sketch_params;
 
   // The expected max index is:
   // (# of valid event vectors) * (# valid count values) * (size of count min sketch) - 1
@@ -964,6 +1178,8 @@
 
 TEST_F(PrivacyEncoderTest, MaxIndexForReportUniqueDeviceStringCounts) {
   uint64_t max_event_code = 9;
+  int32_t num_cells_per_hash = 15;
+  int32_t num_hashes = 11;
 
   // |metric_def| has 10 valid event vectors.
   MetricDefinition metric_def;
@@ -974,10 +1190,10 @@
 
   ReportDefinition report_def;
   report_def.set_report_type(ReportDefinition::UNIQUE_DEVICE_STRING_COUNTS);
-
-  CB_ASSERT_OK_AND_ASSIGN(size_t num_cells_per_hash,
-                          PrivacyEncoder::GetNumCountMinCellsPerHash(report_def));
-  CB_ASSERT_OK_AND_ASSIGN(size_t num_hashes, PrivacyEncoder::GetNumCountMinHashes(report_def));
+  StringSketchParameters sketch_params;
+  sketch_params.set_num_cells_per_hash(num_cells_per_hash);
+  sketch_params.set_num_hashes(num_hashes);
+  *report_def.mutable_string_sketch_params() = sketch_params;
 
   // The expected max index is: (# of valid event vectors) * (size of count min sketch) - 1 = 10 *
   // 50 - 1 = 499.
@@ -1071,4 +1287,5 @@
   EXPECT_EQ(ClipCount(count, report_def), max_count);
 }
 
+}  // namespace
 }  // namespace cobalt::logger
diff --git a/src/logger/project_context.cc b/src/logger/project_context.cc
index ba85b5f..84761db 100644
--- a/src/logger/project_context.cc
+++ b/src/logger/project_context.cc
@@ -6,6 +6,7 @@
 
 #include <sstream>
 
+#include "absl/strings/str_cat.h"
 #include "src/logging.h"
 #include "src/public/lib/registry_identifiers.h"
 #include "src/registry/metric_definition.pb.h"
@@ -14,7 +15,7 @@
 
 namespace {
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 void PopulateProject(uint32_t customer_id, uint32_t project_id, const std::string& customer_name,
                      const std::string& project_name, Project* project) {
   project->set_customer_id(customer_id);
@@ -34,7 +35,7 @@
 #ifdef PROTO_LITE
   return project_->project_name();
 #else
-  return project_->DebugString();
+  return absl::StrCat(*project_);
 #endif
 }
 
@@ -107,7 +108,7 @@
 #ifdef PROTO_LITE
   return project_.project_name();
 #else
-  return project_.DebugString();
+  return absl::StrCat(project_);
 #endif
 }
 
diff --git a/src/logging.h b/src/logging.h
index 0022482..2b714b3 100644
--- a/src/logging.h
+++ b/src/logging.h
@@ -19,7 +19,7 @@
 
 #define INIT_LOGGING(val)
 
-#define VLOG(verboselevel) FX_VLOGST(verboselevel, "core")
+#define VLOG(verboselevel) ((void)verboselevel); FX_LOGST(DEBUG, "core")
 #define LOG(level) FX_LOGST(level, "core")
 #define LOG_FIRST_N(verboselevel, n) FX_LOGST_FIRST_N(verboselevel, n, "core")
 
diff --git a/src/observation_store/envelope_maker.cc b/src/observation_store/envelope_maker.cc
index fc78514..fbbcc50 100644
--- a/src/observation_store/envelope_maker.cc
+++ b/src/observation_store/envelope_maker.cc
@@ -11,7 +11,7 @@
 
 namespace cobalt::observation_store {
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 EnvelopeMaker::EnvelopeMaker(size_t max_bytes_each_observation, size_t max_num_bytes)
     : max_bytes_each_observation_(max_bytes_each_observation), max_num_bytes_(max_num_bytes) {}
 
diff --git a/src/observation_store/envelope_maker_test.cc b/src/observation_store/envelope_maker_test.cc
index 4adf2b7..40871dd 100644
--- a/src/observation_store/envelope_maker_test.cc
+++ b/src/observation_store/envelope_maker_test.cc
@@ -260,7 +260,7 @@
 
     // Check each one of the observations.
     for (int i = 0; i < expected_num_observations; i++) {
-      // TODO(b/278913456): Extract the serialized observation.
+      // TODO(https://fxbug.dev/278913456): Extract the serialized observation.
     }
   }
 
diff --git a/src/observation_store/file_observation_store.cc b/src/observation_store/file_observation_store.cc
index f8bf83d..27fedde 100644
--- a/src/observation_store/file_observation_store.cc
+++ b/src/observation_store/file_observation_store.cc
@@ -51,7 +51,7 @@
   // Check if root_directory_ already exists.
   if (!fs_.ListFiles(root_directory_).ok()) {
     // If it doesn't exist, create it here.
-    // TODO(b/278922576): If MakeDirectory doesn't work, we should fail over to
+    // TODO(https://fxbug.dev/278922576): If MakeDirectory doesn't work, we should fail over to
     // MemoryObservationStore.
     if (!fs_.MakeDirectory(root_directory_)) {
       LOG(ERROR) << "Failed to create FileObservationStore directory: " << root_directory_;
diff --git a/src/observation_store/file_observation_store.h b/src/observation_store/file_observation_store.h
index 665499f..f7e06cd 100644
--- a/src/observation_store/file_observation_store.h
+++ b/src/observation_store/file_observation_store.h
@@ -54,9 +54,10 @@
     // (e.g. /system/data/cobalt_legacy)
     //
     // |file_name|. The file name for the file containing the observations.
-    FileEnvelopeHolder(util::FileSystem &fs, FileObservationStore &store,
-                       // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
-                       std::string root_directory, const std::string &file_name)
+    FileEnvelopeHolder(
+        util::FileSystem &fs, FileObservationStore &store,
+        // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+        std::string root_directory, const std::string &file_name)
         : fs_(fs),
           store_(store),
           root_directory_(std::move(root_directory)),
diff --git a/src/observation_store/file_observation_store_test.cc b/src/observation_store/file_observation_store_test.cc
index de37969..65c5936 100644
--- a/src/observation_store/file_observation_store_test.cc
+++ b/src/observation_store/file_observation_store_test.cc
@@ -372,7 +372,7 @@
 }
 
 #if __has_feature(address_sanitizer) && defined(__Fuchsia__)
-// Skip this test under ASAN in Fuchsia. See: fxbug.dev/85575
+// Skip this test under ASAN in Fuchsia. See: fxbug.dev/42166489
 #else
 TEST_F(FileObservationStoreTest, StressTest) {
   std::random_device rd;
diff --git a/src/observation_store/observation_store.cc b/src/observation_store/observation_store.cc
index c75df48..4a50ea7 100644
--- a/src/observation_store/observation_store.cc
+++ b/src/observation_store/observation_store.cc
@@ -17,7 +17,7 @@
 
 }
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 ObservationStore::ObservationStore(size_t max_bytes_per_observation, size_t max_bytes_per_envelope,
                                    size_t max_bytes_total)
     : max_bytes_per_observation_(max_bytes_per_observation),
diff --git a/src/observation_store/observation_store.h b/src/observation_store/observation_store.h
index 7d0e0f6..7010482 100644
--- a/src/observation_store/observation_store.h
+++ b/src/observation_store/observation_store.h
@@ -116,7 +116,8 @@
     // store with a different EncryptedMessageMaker, in this case, the envelope that is produced
     // will have observations encrypted with two (or more) different EncryptedMessageMakers.)
     //
-    // TODO(b/278924679): Make ObservationStore *only* store unencrypted observations.
+    // TODO(https://fxbug.dev/278924679): Make ObservationStore *only* store unencrypted
+    // observations.
     virtual const Envelope& GetEnvelope(util::EncryptedMessageMaker* encrypter) = 0;
 
     // Returns an estimated size on the wire of the resulting Envelope owned by
diff --git a/src/pb/BUILD.gn b/src/pb/BUILD.gn
index c803106..d97495a 100644
--- a/src/pb/BUILD.gn
+++ b/src/pb/BUILD.gn
@@ -8,7 +8,6 @@
 
 proto_library("pb") {
   sources = [
-    "clearcut_extensions.proto",
     "common.proto",
     "encrypted_message.proto",
     "envelope.proto",
@@ -23,7 +22,6 @@
   generate_python = false
   cc_generator_options = "lite"
   deps = [
-    "$cobalt_root/src/lib/clearcut:clearcut_proto",
     "$cobalt_root/src/registry:cobalt_registry_proto",
     "//third_party/boringssl",
   ]
diff --git a/src/pb/clearcut_extensions.proto b/src/pb/clearcut_extensions.proto
deleted file mode 100644
index 2fc4492..0000000
--- a/src/pb/clearcut_extensions.proto
+++ /dev/null
@@ -1,16 +0,0 @@
-syntax = "proto2";
-
-package cobalt.clearcut_extensions;
-
-import "src/pb/encrypted_message.proto";
-import "src/lib/clearcut/clearcut.proto";
-
-message LogEventExtension {
-  extend cobalt.lib.clearcut.LogEvent {
-    optional LogEventExtension ext = 66566376;
-  }
-
-  oneof value {
-    cobalt.EncryptedMessage cobalt_encrypted_envelope = 705;
-  }
-}
diff --git a/src/pb/common.proto b/src/pb/common.proto
index c6969dd..3dd94bd 100644
--- a/src/pb/common.proto
+++ b/src/pb/common.proto
@@ -7,6 +7,7 @@
 option java_multiple_files = true;
 option java_package = "com.google.cobalt";
 
+
 ////////////////////////////////////////////////////////////////////////////////
 // NOTE: This file is used by the Cobalt clients and the Cobalt servers.
 // The source-of-truth of this file is located in Google's internal code
diff --git a/src/pb/encrypted_message.proto b/src/pb/encrypted_message.proto
index 4a2af52..11f5e1a 100644
--- a/src/pb/encrypted_message.proto
+++ b/src/pb/encrypted_message.proto
@@ -4,9 +4,11 @@
 
 
 
+
 option java_multiple_files = true;
 option java_package = "com.google.cobalt";
 
+
 ////////////////////////////////////////////////////////////////////////////////
 // NOTE: This file is used by the Cobalt client and the Cobalt servers.
 // The source-of-truth of this file is located in Google's internal code
@@ -25,6 +27,7 @@
 // the EncryptedMessage proto to carry the ciphertext in both cases.
 //
 message EncryptedMessage {
+
   // The different schemes used in Cobalt to encrypt a message.
   enum EncryptionScheme {
     // The message is not encrypted. |ciphertext| contains plaintext bytes of a
diff --git a/src/pb/observation.proto b/src/pb/observation.proto
index 620f90c..299ae0c 100644
--- a/src/pb/observation.proto
+++ b/src/pb/observation.proto
@@ -137,11 +137,17 @@
 // STRING_COUNTS
 // UNIQUE_DEVICE_STRING_COUNTS
 message StringHistogramObservation {
-  // List of hashes of strings (hashed using Farmhash Fingerprint128).
+  // TODO(https://fxbug.dev/322409910): Delete string_hashes after clients stop sending
+  // `field.
+  repeated bytes string_hashes = 1 [deprecated = true];
+
+  // List of hashes of strings (hashed using Farmhash Fingerprint64).
   // The string that hashes to the bytes value in the ith position in
   // |string_hashes| corresponds to the bucket with index i in each of the
   // |bucket_indices| values in |string_histograms|.
-  repeated bytes string_hashes = 1;
+  //
+  // Only one of `string_hashes` or `string_hashes_ff64` should be used.
+  repeated bytes string_hashes_ff64 = 3;
 
   repeated IndexHistogram string_histograms = 2;
 }
@@ -153,11 +159,11 @@
 // privacy level will be transported from client to server in the form of
 // PrivateIndexObservations.
 //
-// The field |index| of a PrivateIndexObservation is an index into an enumeration
-// of the set of possible Observations of another type (after bucketing some
-// parameters, as specified in the ReportDefinition for which the Observation was
-// generated.) The details of this enumeration depend on the type of Observation
-// which was given to the PrivacyEncoder.
+// The field |index| of a PrivateIndexObservation is an index into an
+// enumeration of the set of possible Observations of another type (after
+// bucketing some parameters, as specified in the ReportDefinition for which the
+// Observation was generated.) The details of this enumeration depend on the
+// type of Observation which was given to the PrivacyEncoder.
 message PrivateIndexObservation {
   uint64 index = 1;
 }
@@ -165,18 +171,19 @@
 // A pair consisting of a bucket index and a count. Each bucket is
 // an integer range. The definition of the buckets and their indices
 // is given in the MetricDefinition.
-// TODO(b/262785064): don't use this for the public LoggerInterface.
+// TODO(https://fxbug.dev/262785064): don't use this for the public LoggerInterface.
 message HistogramBucket {
   // The index of one of the buckets.
   uint32 index = 1;
   // The count for that bucket.
   uint64 count = 2;
-};
+}
 
 // Observations of this type are used to signal that a given device was
 // collecting data for a given report, over some window of time. This
 // Observation type has no required fields.
 //
-// ReportParticipationObservations are produced by the PrivacyEncoder and consumed by the
-// ReportGenerator for reports that use local differential privacy.
+// ReportParticipationObservations are produced by the PrivacyEncoder and
+// consumed by the ReportGenerator for reports that use local differential
+// privacy.
 message ReportParticipationObservation {}
diff --git a/src/public/cobalt_config.h b/src/public/cobalt_config.h
index 9d0b39e..8d85f59 100644
--- a/src/public/cobalt_config.h
+++ b/src/public/cobalt_config.h
@@ -282,8 +282,8 @@
   // TimeZonePolicy. In the future, `civil_time_converter` will be required to create a
   // CobaltService.
   //
-  // TODO(b/278924073): Update this comment and CHECK in the CobaltService constructor
-  // when all client implementations provide a civil time converter.
+  // TODO(https://fxbug.dev/278924073): Update this comment and CHECK in the CobaltService
+  // constructor when all client implementations provide a civil time converter.
   std::unique_ptr<util::CivilTimeConverterInterface> civil_time_converter;
 
   // |diagnostics|: The implementation to send diagnostic information about the functioning of the
@@ -292,7 +292,7 @@
 
   // |enable_replacement_metrics|: DEPRECATED Determines whether or not replacement_metric_id should
   // be honored
-  // TODO(fxbug.dev/111165): Remove this once it is no longer referenced anywhere.
+  // TODO(https://fxbug.dev/42062466): Remove this once it is no longer referenced anywhere.
   bool enable_replacement_metrics = false;
 
   // |start_worker_threads|: Determines whether or not to start the worker threads.
diff --git a/src/public/diagnostics_interface.h b/src/public/diagnostics_interface.h
index 3ea5dc5..c9fa5fc 100644
--- a/src/public/diagnostics_interface.h
+++ b/src/public/diagnostics_interface.h
@@ -51,8 +51,8 @@
                             const std::string& project) = 0;
 
   // Used to track how much data is stored per-class on disk.
-  // TODO(b/278930401): use named types when InternalMetrics::TrackDiskUsage is fixed.
-  // NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+  // TODO(https://fxbug.dev/278930401): use named types when InternalMetrics::TrackDiskUsage is
+  // fixed. NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
   virtual void TrackDiskUsage(int storageClass, int64_t bytes, int64_t byte_limit) = 0;
 
   // Used to track what the quota state of each project on the system is.
@@ -60,7 +60,8 @@
   // |event_type|: tracks what the state is (1: Below quota, 2: Above quota, 3: Above quota and log
   //               rejected).
   //
-  // TODO(fxbug.dev/96540): Remove default implementation once this is implemented in Fuchsia.
+  // TODO(https://fxbug.dev/42178669): Remove default implementation once this is implemented in
+  // Fuchsia.
   virtual void LocalAggregationQuotaEvent(const lib::ProjectIdentifier& project, int event_type) {}
 };
 
diff --git a/src/public/lib/BUILD.gn b/src/public/lib/BUILD.gn
index 8942f3e..0eb605a 100644
--- a/src/public/lib/BUILD.gn
+++ b/src/public/lib/BUILD.gn
@@ -20,7 +20,10 @@
     "status_codes.cc",
     "status_codes.h",
   ]
-  configs += [ "$cobalt_root:cobalt_config" ]
+  configs += [
+    "$cobalt_root:cobalt_config",
+    "$cobalt_root:no_deprecated_pragma_config",
+  ]
 }
 
 source_set("registry_identifiers") {
diff --git a/src/registry/cobalt_registry.proto b/src/registry/cobalt_registry.proto
index 8c7cd52..8835f23 100644
--- a/src/registry/cobalt_registry.proto
+++ b/src/registry/cobalt_registry.proto
@@ -45,6 +45,11 @@
   repeated MetricDefinition metrics = 3;
   string project_contact = 4;
 
+  // Identifier of the app that is expected to log the metrics for this project.
+  // Depending on the platform, this may represent an "app" or "component", and
+  // the format of package identifiers may be different.
+  string app_package_identifier = 7;
+
   // Experiment namespaces supported for experiment ids in this project.
   repeated string experiments_namespaces = 5;
 
diff --git a/src/registry/metric_definition.proto b/src/registry/metric_definition.proto
index 59edcd7..7f4294f 100644
--- a/src/registry/metric_definition.proto
+++ b/src/registry/metric_definition.proto
@@ -32,9 +32,9 @@
 // Next ID: 27
 message MetricDefinition {
   reserved 6, 7, 9, 13, 14, 15, 17, 21, 23, 24;
-  reserved "event_codes", "event_code_buffer_max", "max_event_code", "parts", "proto_name",
-      "string_buffer_max", "replacement_metric_id", "no_replacement_metric", "customer_name",
-      "project_name";
+  reserved "event_codes", "event_code_buffer_max", "max_event_code", "parts",
+      "proto_name", "string_buffer_max", "replacement_metric_id",
+      "no_replacement_metric", "customer_name", "project_name";
 
   // Unique name for this Metric within its owning project.
   // The name must obey the syntax of a C variable name and must have length
@@ -54,8 +54,8 @@
   // Next ID: 12
   enum MetricType {
     reserved 1, 2, 3, 4, 5, 6, 7, 9999;
-    reserved "CUSTOM", "ELAPSED_TIME", "EVENT_COUNT", "EVENT_OCCURRED", "FRAME_RATE",
-        "INT_HISTOGRAM", "MEMORY_USAGE", "STRING_USED";
+    reserved "CUSTOM", "ELAPSED_TIME", "EVENT_COUNT", "EVENT_OCCURRED",
+        "FRAME_RATE", "INT_HISTOGRAM", "MEMORY_USAGE", "STRING_USED";
 
     UNSET = 0;
 
@@ -114,7 +114,7 @@
     // not recommended to associate the zero event code with a meaningful
     // label. Instead, omit the zero event code and your reports will contain
     // event code labels of `<code 0>` when the event code was not specified,
-    // or specify an explict zero value with a label of `Unknown` or `Unset`.
+    // or specify an explicit zero value with a label of `Unknown` or `Unset`.
     map<uint32, string> event_codes = 2;
 
     // max_event_code is the maximal value for any event in this dimension.
@@ -179,6 +179,10 @@
   repeated MetricSemantics metric_semantics = 20;
 
   // The path to a list of candidate strings for a metric of type STRING.
+  // The path should be relative to the root of the Cobalt registry, for
+  // instance "fuchsia/test_app2/application_names.txt". String candidate
+  // files should ideally be placed in the same registry and directory as the
+  // project that uses it.
   //
   // This is a required field for metrics of type STRING.
   string string_candidate_file = 22;
@@ -206,8 +210,8 @@
   // The TimeZonePolicy for this Metric (Optional. Defaults to UTC)
   TimeZonePolicy time_zone_policy = 10;
 
-  // An IANA time zone identifier (https://iana.org/time-zones). Should be set if
-  // and only if the metric's `time_zone_policy` is OTHER_TIME_ZONE.
+  // An IANA time zone identifier (https://iana.org/time-zones). Should be set
+  // if and only if the metric's `time_zone_policy` is OTHER_TIME_ZONE.
   string other_time_zone = 25;
 
   message Metadata {
diff --git a/src/registry/report_definition.proto b/src/registry/report_definition.proto
index 672da68..e0e86be 100644
--- a/src/registry/report_definition.proto
+++ b/src/registry/report_definition.proto
@@ -417,7 +417,7 @@
   // user population size. For now, it should be set manually in the Cobalt
   // registry in consultation with the Cobalt team.
   //
-  // TODO(b/295053509): update this comment once the field is populated by
+  // TODO(https://fxbug.dev/295053509): update this comment once the field is populated by
   // the registry parser.
   double poisson_mean = 30;
 
@@ -429,7 +429,7 @@
   // user population size. For now, it should be set manually in the Cobalt
   // registry in consultation with the Cobalt team.
   //
-  // TODO(b/278932979): update this comment once the field is populated by
+  // TODO(https://fxbug.dev/278932979): update this comment once the field is populated by
   // the registry parser.
   uint32 num_index_points = 22;
 
@@ -441,7 +441,7 @@
   // user population size. For now, it should be set manually in the Cobalt
   // registry in consultation with the Cobalt team.
   //
-  // TODO(b/278932979): update this comment once the field is populated by
+  // TODO(https://fxbug.dev/278932979): update this comment once the field is populated by
   // the registry parser.
   StringSketchParameters string_sketch_params = 27;
 
@@ -522,7 +522,7 @@
     // Numerical statistic aggregation procedures to be used with reports
     // of type UNIQUE_DEVICE_HISTOGRAMS, HOURLY_VALUE_HISTOGRAMS,
     // UNIQUE_DEVICE_NUMERIC_STATS and HOURLY_VALUE_NUMERIC_STATS.
-    // TODO(fxbug.dev/87151): Rename these to remove the '_PROCEDURE' suffix.
+    // TODO(https://fxbug.dev/42168241): Rename these to remove the '_PROCEDURE' suffix.
     SUM_PROCEDURE = 1;
     MIN_PROCEDURE = 2;
     MAX_PROCEDURE = 3;
@@ -608,7 +608,7 @@
   bool expedited_sending = 29;
 
   ///////////////////  Fields used by all report types ///////////////////
-  // Next id: 108
+  // Next id: 109
 
   // The list of SystemProfileFields to include in each row of the report.
   // Optional.
@@ -635,6 +635,14 @@
   // Maximum ReleaseStage for which this Report is allowed to be collected.
   ReleaseStage max_release_stage = 105;
 
+  // Report can be collected even if the user/device has not consented.
+  // This field can only be set to true on reports that use privacy mechanisms
+  // that include differential privacy (i.e. not DE_IDENTIFICATION). The use of
+  // this field is for collecting anonymized data that is allowed even when
+  // the consent is not given. These use cases need to be specially approved
+  // by privacy reviewers.
+  bool exempt_from_consent = 108;
+
   // New Privacy API
 
   // This enum identifies what privacy protection is applied to the report.
@@ -678,7 +686,7 @@
     // ShuffledDifferentialPrivacyConfig. For now, it should be set manually in
     // the Cobalt registry in consultation with the Cobalt team.
     //
-    // TODO(b/295053509): update this comment once the field is auto populated by
+    // TODO(https://fxbug.dev/295053509): update this comment once the field is auto populated by
     // the registry parser.
     double poisson_mean = 4;
   }
diff --git a/src/registry/testing/testing.cc b/src/registry/testing/testing.cc
index f4c1e73..239e204 100644
--- a/src/registry/testing/testing.cc
+++ b/src/registry/testing/testing.cc
@@ -8,7 +8,7 @@
 
 std::unique_ptr<CobaltRegistry> MutateProject(
     std::unique_ptr<CobaltRegistry> registry,
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     uint32_t customer_id, uint32_t project_id,
     const std::function<void(ProjectConfig* project)>& mutate_project) {
   for (auto& customer : *registry->mutable_customers()) {
diff --git a/src/system_data/BUILD.gn b/src/system_data/BUILD.gn
index 31781a3..8b880b2 100644
--- a/src/system_data/BUILD.gn
+++ b/src/system_data/BUILD.gn
@@ -10,6 +10,7 @@
     "configuration_data.h",
   ]
   configs += [ "$cobalt_root:cobalt_config" ]
+  deps = [ "$cobalt_root/src/lib/clearcut:clearcut_proto" ]
   visibility += [ "//src/cobalt/bin/app:lib" ]
 }
 
diff --git a/src/system_data/configuration_data.cc b/src/system_data/configuration_data.cc
index a71e822..0a4f540 100644
--- a/src/system_data/configuration_data.cc
+++ b/src/system_data/configuration_data.cc
@@ -4,14 +4,14 @@
 
 #include "src/system_data/configuration_data.h"
 
+#include "src/lib/clearcut/clearcut_log_source.pb.h"
+
 namespace cobalt::system_data {
 
-// IDs of the Clearcut log sources (Cobalt Shuffler Input) that Cobalt can write to.
-//
-// Can be used to write logs for Clearcut's demo application.
-// static const int32_t kClearcutDemoSource = 177;
-static const int32_t kLogSourceIdDevel = 844;
-static const int32_t kLogSourceIdProd = 1176;
+using cobalt::clearcut_protos::LogSourceEnum::LogSource;
+using cobalt::clearcut_protos::LogSourceEnum::TURQUOISE_COBALT_SHUFFLER_INPUT_DEVEL;
+using cobalt::clearcut_protos::LogSourceEnum::TURQUOISE_COBALT_SHUFFLER_INPUT_PROD;
+using cobalt::clearcut_protos::LogSourceEnum::UNKNOWN;
 
 const char* EnvironmentString(const Environment& environment) {
   switch (environment) {
@@ -28,14 +28,14 @@
   return os << EnvironmentString(environment);
 }
 
-int32_t ConfigurationData::GetLogSourceId() const {
+LogSource ConfigurationData::GetLogSourceId() const {
   switch (environment_) {
     case PROD:
-      return kLogSourceIdProd;
+      return TURQUOISE_COBALT_SHUFFLER_INPUT_PROD;
     case DEVEL:
-      return kLogSourceIdDevel;
+      return TURQUOISE_COBALT_SHUFFLER_INPUT_DEVEL;
     case LOCAL:
-      return 0;
+      return UNKNOWN;
   }
 }
 
diff --git a/src/system_data/configuration_data.h b/src/system_data/configuration_data.h
index d7edf1c..9e61512 100644
--- a/src/system_data/configuration_data.h
+++ b/src/system_data/configuration_data.h
@@ -8,6 +8,8 @@
 #include <iostream>
 #include <string>
 
+#include "src/lib/clearcut/clearcut_log_source.pb.h"
+
 namespace cobalt::system_data {
 
 // The environment that the Cobalt system should talk to.
@@ -41,7 +43,7 @@
   [[nodiscard]] const char* GetEnvironmentString() const { return EnvironmentString(environment_); }
 
   // Get the Clearcut Log Source ID that Cobalt should write its logs to.
-  [[nodiscard]] int32_t GetLogSourceId() const;
+  [[nodiscard]] cobalt::clearcut_protos::LogSourceEnum::LogSource GetLogSourceId() const;
 
  private:
   const Environment environment_;
diff --git a/src/system_data/system_data.cc b/src/system_data/system_data.cc
index 98533f2..d89be00 100644
--- a/src/system_data/system_data.cc
+++ b/src/system_data/system_data.cc
@@ -97,7 +97,7 @@
 
 }  // namespace
 
-// TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+// TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
 SystemData::SystemData(const std::string& product_name, const std::string& board_name_suggestion,
                        ReleaseStage release_stage, const std::string& version,
                        SystemProfile::BuildType build_type,
diff --git a/src/uploader/BUILD.gn b/src/uploader/BUILD.gn
index 507a390..3e96ab9 100644
--- a/src/uploader/BUILD.gn
+++ b/src/uploader/BUILD.gn
@@ -15,7 +15,9 @@
     "$cobalt_root/src/lib/util:posix_file_system",
     "$cobalt_root/src/lib/util:protected_fields",
     "$cobalt_root/src/lib/util:proto_serialization",
+    "$cobalt_root/src/lib/util:thread",
     "$cobalt_root/src/logger:internal_metrics",
+    "$cobalt_root/src/pb",
     "$cobalt_root/src/public:cobalt_config",
     "$cobalt_root/src/public:diagnostics_interface",
     "$cobalt_root/src/public/lib:registry_identifiers",
@@ -25,6 +27,7 @@
   ]
   public_deps = [
     "$cobalt_root/src/lib/clearcut",
+    "$cobalt_root/src/lib/clearcut:clearcut_proto",
     "$cobalt_root/src/logger:logger_interface",
     "$cobalt_root/src/observation_store",
     "$cobalt_root/src/observation_store:observation_store_update_recipient",
diff --git a/src/uploader/shipping_manager.cc b/src/uploader/shipping_manager.cc
index 2ef0b5d..a3fc61e 100644
--- a/src/uploader/shipping_manager.cc
+++ b/src/uploader/shipping_manager.cc
@@ -7,11 +7,14 @@
 #include <mutex>
 #include <utility>
 
+#include "src/lib/clearcut/clearcut.pb.h"
+#include "src/lib/clearcut/clearcut_log_source.pb.h"
 #include "src/lib/util/protected_fields.h"
+#include "src/lib/util/thread.h"
 #include "src/logger/internal_metrics_config.cb.h"
 #include "src/logger/logger_interface.h"
 #include "src/logging.h"
-#include "src/pb/clearcut_extensions.pb.h"
+#include "src/pb/encrypted_message.pb.h"
 #include "src/public/lib/registry_identifiers.h"
 #include "src/uploader/upload_scheduler.h"
 #include "third_party/protobuf/src/google/protobuf/util/delimited_message_util.h"
@@ -20,7 +23,8 @@
 
 using observation_store::ObservationStore;
 using EnvelopeHolder = ObservationStore::EnvelopeHolder;
-using cobalt::clearcut_extensions::LogEventExtension;
+using cobalt::clearcut_protos::LogRequest;
+using cobalt::clearcut_protos::LogSourceEnum::LogSource;
 
 namespace {
 
@@ -71,6 +75,7 @@
 
   std::thread t([this] { this->Run(); });
   worker_thread_ = std::move(t);
+  util::NameThread("upload-thread", worker_thread_);
 }
 
 void ShippingManager::NotifyObservationsAdded() {
@@ -313,13 +318,13 @@
 
 ClearcutV1ShippingManager::ClearcutV1ShippingManager(
     const UploadScheduler& upload_scheduler, ObservationStore& observation_store,
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
     util::EncryptedMessageMaker* encrypt_to_shuffler,
     util::EncryptedMessageMaker* encrypt_to_analyzer,
     std::unique_ptr<lib::clearcut::ClearcutUploaderInterface> clearcut,
     DiagnosticsInterface* diagnostics,
-    // TODO(b/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
-    int32_t log_source_id, size_t max_attempts_per_upload, std::string api_key)
+    // TODO(https://fxbug.dev/278930401): NOLINTNEXTLINE(bugprone-easily-swappable-parameters)
+    LogSource log_source_id, size_t max_attempts_per_upload, std::string api_key)
     : ShippingManager(upload_scheduler, observation_store, encrypt_to_analyzer),
       max_attempts_per_upload_(max_attempts_per_upload),
       clearcut_(std::move(clearcut)),
@@ -352,10 +357,9 @@
 
 Status ClearcutV1ShippingManager::SendEnvelopeToClearcutDestination(const Envelope& envelope,
                                                                     size_t envelope_size) {
-  auto log_extension = std::make_unique<LogEventExtension>();
+  EncryptedMessage encrypted_message;
 
-  if (!encrypt_to_shuffler_->Encrypt(envelope,
-                                     log_extension->mutable_cobalt_encrypted_envelope())) {
+  if (!encrypt_to_shuffler_->Encrypt(envelope, &encrypted_message)) {
     LOG_FIRST_N(ERROR, 10) << "Failed to encrypt an envelope to the shuffler. Dropping envelope.";
     return Status::OkStatus();
   }
@@ -372,9 +376,9 @@
   VLOG(5) << name() << " worker: Sending Envelope of size " << envelope_size
           << " bytes to clearcut.";
 
-  lib::clearcut::LogRequest request;
+  LogRequest request;
   request.set_log_source(log_source_id_);
-  request.add_log_event()->SetAllocatedExtension(LogEventExtension::ext, log_extension.release());
+  request.add_log_event()->set_source_extension(encrypted_message.SerializeAsString());
 
   Status status;
   {
diff --git a/src/uploader/shipping_manager.h b/src/uploader/shipping_manager.h
index c98ff20..25ba7d6 100644
--- a/src/uploader/shipping_manager.h
+++ b/src/uploader/shipping_manager.h
@@ -15,6 +15,7 @@
 #include <thread>
 #include <vector>
 
+#include "src/lib/clearcut/clearcut_log_source.pb.h"
 #include "src/lib/clearcut/uploader.h"
 #include "src/lib/util/encrypted_message_util.h"
 #include "src/lib/util/file_system.h"
@@ -61,7 +62,7 @@
   // retrieved.
   //
   // encrypt_to_analyzer: An EncryptedMessageMaker that will be used to encrypt the observations in
-  // an envelope before sending. TODO(b/278924679): add nullptr check once storing
+  // an envelope before sending. TODO(https://fxbug.dev/278924679): add nullptr check once storing
   // unencrypted observations is enabled.
   ShippingManager(const UploadScheduler& upload_scheduler,
                   observation_store::ObservationStore& observation_store,
@@ -251,16 +252,16 @@
 // You must call ResetInternalMetrics() to set up internal logging.
 class ClearcutV1ShippingManager : public ShippingManager {
  public:
-  ClearcutV1ShippingManager(
-      const UploadScheduler& upload_scheduler,
-      observation_store::ObservationStore& observation_store,
-      util::EncryptedMessageMaker* encrypt_to_shuffler,
-      util::EncryptedMessageMaker* encrypt_to_analyzer,
-      std::unique_ptr<lib::clearcut::ClearcutUploaderInterface> clearcut,
-      DiagnosticsInterface* diagnostics,
-      int32_t log_source_id = system_data::defaultConfigurationData.GetLogSourceId(),
-      size_t max_attempts_per_upload = lib::clearcut::kMaxRetries,
-      std::string api_key = "cobalt-default-api-key");
+  ClearcutV1ShippingManager(const UploadScheduler& upload_scheduler,
+                            observation_store::ObservationStore& observation_store,
+                            util::EncryptedMessageMaker* encrypt_to_shuffler,
+                            util::EncryptedMessageMaker* encrypt_to_analyzer,
+                            std::unique_ptr<lib::clearcut::ClearcutUploaderInterface> clearcut,
+                            DiagnosticsInterface* diagnostics,
+                            cobalt::clearcut_protos::LogSourceEnum::LogSource log_source_id =
+                                system_data::defaultConfigurationData.GetLogSourceId(),
+                            size_t max_attempts_per_upload = lib::clearcut::kMaxRetries,
+                            std::string api_key = "cobalt-default-api-key");
 
   // The destructor will stop the worker thread and wait for it to stop
   // before exiting.
@@ -297,7 +298,7 @@
 
   const std::string api_key_;
   util::EncryptedMessageMaker* encrypt_to_shuffler_;
-  const int32_t log_source_id_;
+  const cobalt::clearcut_protos::LogSourceEnum::LogSource log_source_id_;
 };
 
 // A concrete subclass of ShippingManager for capturing data locally to a file.
diff --git a/src/uploader/shipping_manager_test.cc b/src/uploader/shipping_manager_test.cc
index f7b9cd1..267636c 100644
--- a/src/uploader/shipping_manager_test.cc
+++ b/src/uploader/shipping_manager_test.cc
@@ -14,6 +14,7 @@
 #include <gtest/gtest.h>
 
 #include "src/lib/clearcut/clearcut.pb.h"
+#include "src/lib/clearcut/clearcut_log_source.pb.h"
 #include "src/lib/util/not_null.h"
 #include "src/lib/util/posix_file_system.h"
 #include "src/logger/fake_logger.h"
@@ -21,13 +22,15 @@
 #include "src/logging.h"
 #include "src/observation_store/memory_observation_store.h"
 #include "src/observation_store/observation_store.h"
-#include "src/pb/clearcut_extensions.pb.h"
+#include "src/pb/encrypted_message.pb.h"
 #include "src/system_data/fake_system_data.h"
 #include "third_party/protobuf/src/google/protobuf/util/delimited_message_util.h"
 
 namespace cobalt::uploader {
 
-using cobalt::clearcut_extensions::LogEventExtension;
+using cobalt::clearcut_protos::LogRequest;
+using cobalt::clearcut_protos::LogResponse;
+using cobalt::clearcut_protos::LogSourceEnum::TURQUOISE_COBALT_SHUFFLER_INPUT_DEVEL;
 using lib::statusor::StatusOr;
 using logger::PerProjectBytesUploadedMigratedMetricDimensionStatus::Succeeded;
 using logger::testing::FakeLogger;
@@ -54,16 +57,15 @@
       lib::HTTPRequest request, std::chrono::steady_clock::time_point /*ignored*/) override {
     std::unique_lock<std::mutex> lock(mutex);
 
-    lib::clearcut::LogRequest req;
+    LogRequest req;
     req.ParseFromString(request.body);
     EXPECT_GT(req.log_event_size(), 0);
     for (const auto& event : req.log_event()) {
-      EXPECT_TRUE(event.HasExtension(LogEventExtension::ext));
-      const clearcut_extensions::LogEventExtension& log_event =
-          event.GetExtension(LogEventExtension::ext);
+      EXPECT_TRUE(event.has_source_extension());
+      EncryptedMessage encrypted_envelope;
+      encrypted_envelope.ParseFromString(event.source_extension());
       Envelope recovered_envelope;
-      EXPECT_TRUE(
-          recovered_envelope.ParseFromString(log_event.cobalt_encrypted_envelope().ciphertext()));
+      EXPECT_TRUE(recovered_envelope.ParseFromString(encrypted_envelope.ciphertext()));
       EXPECT_EQ(1, recovered_envelope.batch_size());
       EXPECT_EQ(kMetricId, recovered_envelope.batch(0).meta_data().metric_id());
       observation_count += recovered_envelope.batch(0).encrypted_observation_size();
@@ -72,7 +74,7 @@
 
     lib::HTTPResponse response;
     response.http_code = http_response_code_to_return;
-    lib::clearcut::LogResponse resp;
+    LogResponse resp;
     resp.SerializeToString(&response.response);
 
     std::promise<StatusOr<lib::HTTPResponse>> response_promise;
@@ -124,8 +126,7 @@
         encrypt_to_analyzer_.get(),
         std::make_unique<lib::clearcut::ClearcutUploader>(
             "https://test.com", util::TESTONLY_TakeRawPointer(http_client_)),
-        diagnostics,
-        /*log_source_id=*/11, /*max_attempts_per_upload=*/1);
+        diagnostics, TURQUOISE_COBALT_SHUFFLER_INPUT_DEVEL, /*max_attempts_per_upload=*/1);
     shipping_manager_->Start();
   }
 
diff --git a/third_party/boringssl/BUILD.gn b/third_party/boringssl/BUILD.gn
index 347f025..73f7c8a 100644
--- a/third_party/boringssl/BUILD.gn
+++ b/third_party/boringssl/BUILD.gn
@@ -45,10 +45,10 @@
   assert(false, "unsupported OS or CPU: $current_os/$current_cpu")
 }
 
-# TODO(https://fxbug.dev/46139): remove this added source.
+# TODO(https://fxbug.dev/42122741): remove this added source.
 crypto_sources += [ "src/decrepit/xts/xts.c" ]
 
-# TODO(https://fxbug.dev/46139): Required for TPM2.0 TSS Library //third_party/tpm2-tss.
+# TODO(https://fxbug.dev/42122741): Required for TPM2.0 TSS Library //third_party/tpm2-tss.
 crypto_sources += [ "src/decrepit/cfb/cfb.c" ]
 
 ################
@@ -85,12 +85,12 @@
       configs -= [ ":export_symbols" ]
     }
     if (is_fuchsia) {
-      # TODO(https://fxbug.dev/60545): profile instrumentation significantly affects performance.
+      # TODO(https://fxbug.dev/42138737): profile instrumentation significantly affects performance.
       configs += [ "//build/config:no_profile" ]
 
       # boringssl should always be optimized for speed because otherwise performance is
       # significantly worse, impacting pave and boot times on debug builds. See
-      # https://fxbug.dev/55456.
+      # https://fxbug.dev/42133086.
       configs -= [ "//build/config:default_optimize" ]
       configs += [ "//build/config:optimize_speed" ]
 
@@ -294,7 +294,7 @@
   # GNI files; we rename it to avoid colliding with the similarly-named parameter
   # on the fuzzer_package.
   #
-  # TODO(https://fxbug.dev/105707): Remove once `fuzzers` parameter is removed.
+  # TODO(https://fxbug.dev/42056966): Remove once `fuzzers` parameter is removed.
   fuzzer_names = fuzzers
   fuzzers = []
 
diff --git a/third_party/pyyaml b/third_party/pyyaml
index 25e9754..8cdff2c 160000
--- a/third_party/pyyaml
+++ b/third_party/pyyaml
@@ -1 +1 @@
-Subproject commit 25e97546488eee166b1abb229a27856cecd8b7ac
+Subproject commit 8cdff2c80573b8be8e8ad28929264a913a63aa33
diff --git a/tools/cobalt.ensure.exceptions b/tools/cobalt.ensure.exceptions
index 0d7d7e7..72a4373 100644
--- a/tools/cobalt.ensure.exceptions
+++ b/tools/cobalt.ensure.exceptions
@@ -5,11 +5,11 @@
 # The following dependencies diverge from those used in fuchsia for some reason.
 
 # Updating to the fuchsia version causes the build failure: `undefined symbol: __sanitizer_cov_trace_pc_guard_init`
-# TODO(b/278929805): Remove this exception once issues are resolved
+# TODO(https://fxbug.dev/278929805): Remove this exception once issues are resolved
 @Subdir bin
 gn/gn/${platform} git_revision:239533d2d91a04b3317ca9101cf7189f4e651e4d
 
 # Godepfile is not present in the fuchsia prebuilts file
-# TODO(b/278929805): Switch to go's builtin depfile generator
+# TODO(https://fxbug.dev/278929805): Switch to go's builtin depfile generator
 @Subdir golang/bin
 fuchsia/tools/godepfile/${platform} git_revision:6922d7833617841e853a0be52a285f6cd07a0a10
diff --git a/tools/error_calculator.py b/tools/error_calculator.py
deleted file mode 100755
index bc71503..0000000
--- a/tools/error_calculator.py
+++ /dev/null
@@ -1,251 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright 2020 The Fuchsia Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-"""This script is used to run the error_calculator binary.
-
-  ./error_calculator.py 1 657579885 14 1
-
-This script assumes it is being run from a third_party embedding in a fuchsia
-checkout. If you are running from the cobalt standalone repository, run
-'cobaltb.py calculate_error' instead. If the build configuration doesn't match
-the assumed fuchsia or cobalt checkout, use the --bin_dir flag to specify the
-path to the error_calculator and config_parser binaries.
-
-A new version of the registry is generated every time.
-"""
-
-import argparse
-import os
-import subprocess
-import sys
-
-THIS_DIR = os.path.abspath(os.path.dirname(__file__))
-COBALT_ROOT_DIR = os.path.abspath(os.path.join(THIS_DIR, '..'))
-REGISTRY_PROTO_DEFAULT = os.path.join(COBALT_ROOT_DIR, 'out', 'registry.pb')
-POPULATION_DEFAULT = 100000
-
-
-def estimate_from_args(error_binary, args):
-  """Unpacks and validates command line arguments before calling error calculator.
-
-  Args:
-    error_binary: The filepath of the error calculator binary.
-    args: Python arguments defined below.
-  """
-  _validate_args(args)
-  estimate(
-      error_binary,
-      args.registry_proto,
-      args.customer_id,
-      args.project_id,
-      args.metric_id,
-      args.report_id,
-      args.population,
-      args.epsilon,
-      args.min_denominator,
-      args.min_value,
-      args.max_value,
-      args.max_count,
-      args.simple,
-  )
-
-
-def estimate(
-    error_binary,
-    registry_proto,
-    customer_id,
-    project_id,
-    metric_id,
-    report_id,
-    population,
-    epsilon=None,
-    min_denominator=None,
-    min_value=None,
-    max_value=None,
-    max_count=None,
-    simple=False,
-):
-  """Calls the error_calculator binary with the specified arguments.
-
-  Args:
-    error_binary: The string filepath to the error calculator binary.
-    registry_proto: The string filepath to the registry formatted as a
-      serialized proto.
-    customer_id: The integer customer id.
-    project_id: The integer project id.
-    metric_id: The integer metric id.
-    report_id: The integer report id.
-    population: Integer value estimating the number of reporting devices.
-    epsilon: Optional; Epsilon value for which to estimate error.
-    min_denominator: Optional; estimated minimum number of unique contributing
-      devices per day.
-    min_value: Optional; override the report's min_value.
-    max_value: Optional; override the report's max_value.
-    max_count: Optional; override the report's max_count.
-    simple: Outputs a single estimate.
-  """
-  error_args = [
-      '-registry_proto',
-      registry_proto,
-      '--privacy_params',
-      PRIVACY_PARAMS,
-      '--population',
-      str(population),
-  ]
-  if epsilon:
-    error_args = error_args + ['--epsilon', str(epsilon)]
-  if min_denominator:
-    error_args = error_args + ['--min_denominator', str(min_denominator)]
-  if simple:
-    error_args = error_args + ['--simple']
-  if min_value:
-    error_args = error_args + ['--min_value', str(min_value)]
-  if max_value:
-    error_args = error_args + ['--max_value', str(max_value)]
-  if max_count:
-    error_args = error_args + ['--max_count', str(max_count)]
-  error_args = error_args + [
-      str(customer_id),
-      str(project_id),
-      str(metric_id),
-      str(report_id),
-  ]
-  subprocess.check_call([error_binary] + error_args)
-
-
-# TODO(b/228513646): Use the build system to generate the registry only when
-# necessary.
-def generate_registry(registry_proto, config_dir, config_parser):
-  """Generates a binary encoding of the cobalt registry.
-
-  Args:
-    registry_proto: The filepath of the error calculator binary. This is the
-      output of the config_parser and the input for the error_calculator
-    config_dir: Location of the cobalt config.
-    config_parser: Location of the binary used to generate the registry_proto.
-  """
-  if not config_parser:
-    sys.exit(
-        "Run 'config_parser --out_filename %s' and try again." % registry_proto
-    )
-  subprocess.check_call([
-      config_parser,
-      '--output_file',
-      registry_proto,
-      '--config_dir',
-      config_dir,
-  ])
-  print('Wrote binary encoding of registry to %s.\n' % registry_proto)
-
-
-def add_parse_args(parser):
-  """Adds the standard arguments required for the error_calculator.
-
-  This is used to set arguments for this script and the cobaltb.py script.
-
-  Args:
-    parser: An ArgumentParser to be augmented with error calculator arguments.
-  """
-  parser.add_argument(
-      '--registry_proto',
-      help=(
-          'Set a specific filepath for the binary encoding of the Cobalt'
-          ' Registry. Default: %s'
-      )
-      % REGISTRY_PROTO_DEFAULT,
-      default=REGISTRY_PROTO_DEFAULT,
-  )
-  parser.add_argument(
-      '--population',
-      help='Expected number of devices contributing to the report. Default: %s'
-      % POPULATION_DEFAULT,
-      default=POPULATION_DEFAULT,
-  )
-  parser.add_argument(
-      '--bin_dir',
-      help=(
-          'Directory containing the error_calculator and config_parser'
-          ' binaries. If unset, the script attempts to find binaries based on'
-          ' the default build configuration for Fuchsia and Cobalt.'
-      ),
-      default=None,
-  )
-  parser.add_argument(
-      '--epsilon',
-      help='If set, estimates the error using the specified epsilon value.',
-  )
-  parser.add_argument(
-      '--min_denominator',
-      help='Estimated minimum number of unique contributing devices per day.',
-  )
-  parser.add_argument(
-      '--min_value', help="Optionally overrides the report's MinValue field."
-  )
-  parser.add_argument(
-      '--max_value', help="Optionally overrides the report's MaxValue field."
-  )
-  parser.add_argument(
-      '--max_count', help="Optionally overrides the report's MaxCount field."
-  )
-  parser.add_argument(
-      '--simple',
-      default=False,
-      action='store_true',
-      help='Output a single error estimate.',
-  )
-  parser.add_argument(
-      'customer_id', help="a report's parent customer id", type=int
-  )
-  parser.add_argument(
-      'project_id', help="a report's parent project id.", type=int
-  )
-  parser.add_argument(
-      'metric_id', help="a report's parent metric id.", type=int
-  )
-  parser.add_argument(
-      'report_id', help="a report's parent report id.", type=int
-  )
-
-
-def _validate_args(args):
-  if args.registry_proto == None:
-    sys.exit('--registry_proto is required')
-  if args.population == None:
-    sys.exit('--population flag is required')
-  if not os.path.exists(args.registry_proto):
-    sys.exit(
-        "No serialized proto found. Try running 'config_parser --out_filename"
-        " %s' and try again."
-        % args.registry_proto
-    )
-
-
-if __name__ == '__main__':
-  # Assumes the user is running from a Fuchsia checkout.
-  # If you're running this script directly from a cobalt checkout, use
-  # `cobaltb.py calculate_error` instead.
-  parser = argparse.ArgumentParser()
-  add_parse_args(parser)
-  args = parser.parse_args()
-
-  if args.bin_dir:
-    bin_dir = args.bin_dir
-  else:
-    out = subprocess.check_output(['fx', 'get-build-dir'])
-    out_dir = out.decode('utf-8').rstrip()
-    bin_dir = os.path.join(out_dir, 'host_x64')
-
-  error_binary = os.path.join(bin_dir, 'error_calculator')
-  config_parser = os.path.join(bin_dir, 'config_parser')
-  config_dir = os.path.join(COBALT_ROOT_DIR, '..', 'cobalt_config')
-  config_dir = os.path.abspath(config_dir)
-  if not os.path.exists(error_binary):
-    sys.exit(
-        "Error Calculator binary not found: %s.\nRun 'fx build' and try again."
-        % error_binary
-    )
-
-  generate_registry(args.registry_proto, config_dir, config_parser)
-  estimate_from_args(error_binary, args)
diff --git a/tools/lint_todos.py b/tools/lint_todos.py
index 3b6116a..cc8a94b 100755
--- a/tools/lint_todos.py
+++ b/tools/lint_todos.py
@@ -17,18 +17,14 @@
     os.path.join(os.path.join(os.path.dirname(__file__), '..'))
 )
 
-VALID_MONORAIL_FORMATS = (
-    'TODO(fxbug.dev/12345): With a colon if a message is present'
-)
-VALID_BUGANIZER_FORMATS = 'TODO(b/12345): With a colon if a message is present'
+VALID_FORMATS = 'TODO(https://fxbug.dev/42074368): $MESSAGE'
 SKIP_VALIDATION = [
     'src/public/lib/statusor/statusor.h',  # Copied from external source
     'src/pb/report_row.proto',  # Copied from external source
 ]
 
 todo = re.compile(r'TODO\((.*)\)')
-valid = re.compile(r'TODO\(((https?://)?((fxb)(ug.dev)?|b)/\d+)\)(: .+|)')
-deprecated = re.compile(r'TODO\((https?://fxbug.dev|fxb)/\d+\)')
+valid = re.compile(r'TODO\(https://fxbug\.dev/\d+\): .+')
 
 BOLD = '\033[1m'
 END = '\033[0m'
@@ -60,24 +56,11 @@
             print(f'{CYAN}{line}{END}')
             print(
                 f"{' ' * match.start(0)}^ {BOLD}{RED}ERROR: TODO must use the"
-                f' format {VALID_MONORAIL_FORMATS} for Monorail Issues or'
-                f' {VALID_BUGANIZER_FORMATS} for Buganizer Issues.{END}'
+                f' format {VALID_FORMATS} for issues.{END}'
             )
             print()
             if should_error:
               errors += 1
-          if deprecated.search(line):
-            print(f'{file}:{i+1}')
-            print(f'{CYAN}{line}{END}')
-            print(
-                f"{' ' * match.start(0)}^ {BOLD}{YELLOW}WARNING: Using"
-                ' deprecated TODO format. Please use'
-                f' {VALID_MONORAIL_FORMATS} for Monorail Issues or'
-                f' {VALID_BUGANIZER_FORMATS} for Buganizer Issues.{END}'
-            )
-            print()
-            if strict:
-              errors += 1
 
   return errors