blob: 5884396863c50a79c2c28245b54f07b4fb3a43ff [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
################################################################################
# Cobalt Project: local_storage
################################################################################
metric_definitions:
################################################################################
# Link Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to link a file, in nanoseconds.
################################################################################
- id: 1
replacement_metric_id: 35
metric_name: link_latency
metric_type: INT_HISTOGRAM
metric_dimensions: &vnode_dimensions
- &source_dimension
dimension: source
event_codes:
0: unknown
1: fvm
2: blobfs
3: minfs
4: fxfs
int_buckets: &ll_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: link_latency_histogram
id: 2203712145 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 35
metric_name: link_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: *ll_buckets
metric_semantics: [LATENCY]
metric_units: NANOSECONDS
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: link_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Close Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to close an open file, in nanoseconds.
################################################################################
- id: 2
replacement_metric_id: 36
metric_name: close_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &cl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: close_latency_histogram
id: 3976766327 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 36
metric_name: close_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *cl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: close_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Read Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to read file contents, in nanoseconds.
################################################################################
- id: 3
replacement_metric_id: 37
metric_name: read_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &rl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: read_latency_histogram
id: 4000776865 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 37
metric_name: read_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *rl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: read_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Write Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to write a file, in nanoseconds.
################################################################################
- id: 4
replacement_metric_id: 38
metric_name: write_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &wl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: write_latency_histogram
id: 2787504078 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 38
metric_name: write_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *wl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: write_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Append Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent appending to a file, in nanoseconds.
################################################################################
- id: 5
replacement_metric_id: 39
metric_name: append_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &al_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: append_latency_histogram
id: 4159068221 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 39
metric_name: append_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *al_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: append_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Truncate Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent truncating a file's size, in nanoseconds.
################################################################################
- id: 6
replacement_metric_id: 40
metric_name: truncate_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &tl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: truncate_latency_histogram
id: 4157122023 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 40
metric_name: truncate_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *tl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: truncate_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# SetAttr Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to update its attributes, in nanoseconds.
################################################################################
- id: 7
replacement_metric_id: 41
metric_name: set_attr_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &sal_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: set_attr_latency_histogram
id: 2824692351 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 41
metric_name: set_attr_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *sal_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: set_attr_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# GetAttr Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to return its attributes, in nanoseconds.
################################################################################
- id: 8
replacement_metric_id: 42
metric_name: get_attr_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &gal_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 1
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: get_attr_latency_histogram
id: 296311099 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 42
metric_name: get_attr_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *gal_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: get_attr_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Sync Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent syncing its state to disk, in nanoseconds.
################################################################################
- id: 9
replacement_metric_id: 43
metric_name: sync_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &sl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: sync_latency_histogram
id: 3117694120 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 43
metric_name: sync_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *sl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: sync_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# ReadDir Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent reading directory entries, in nanoseconds.
################################################################################
- id: 10
replacement_metric_id: 44
metric_name: read_dir_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &rdl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: read_dir_latency_histogram
id: 1707841577 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 44
metric_name: read_dir_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *rdl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: read_dir_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# LookUp Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent looking up a child, in nanoseconds.
################################################################################
- id: 11
replacement_metric_id: 45
metric_name: lookup_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &lookupl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: lookup_latency_histogram
id: 3629837219 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 45
metric_name: lookup_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *lookupl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: lookup_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Create Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the the time spent creating a new vnode, in nanoseconds.
################################################################################
- id: 12
replacement_metric_id: 46
metric_name: create_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &createl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: create_latency_histogram
id: 3690266605 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 46
metric_name: create_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *createl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: create_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# UnLink Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the the time a vnode spent unlinking, in nanoseconds.
################################################################################
- id: 13
replacement_metric_id: 47
metric_name: unlink_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *vnode_dimensions
int_buckets: &ul_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: unlink_latency_histogram
id: 3128110576 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 47
metric_name: unlink_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *vnode_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *ul_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: unlink_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Corruption Event
#
# A corruption event logs a '1' every time a component detects corruption. This
# can be either data or metadata corruption depending on the source.
################################################################################
- id: 14
replacement_metric_id: 48
metric_name: corruption_events
# This will always log a single event (1).
metric_type: EVENT_COUNT
metric_dimensions: &ce_dimensions
- dimension: "source"
event_codes:
0: Unknown
1: Fvm
2: BlobFs
3: MinFs
4: FxFs
- dimension: "corruption_type"
event_codes:
0: Unknown
1: Data
2: Metadata
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: corruption_summary
id: 1809599655 # legacy long report_id
report_type: EVENT_COMPONENT_OCCURRENCE_COUNT
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: corruption_per_device
id: 1110247575 # legacy long report_id
report_type: PER_DEVICE_NUMERIC_STATS
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 48
metric_name: corruption_events_migrated
# This will always log a single event (1).
metric_type: OCCURRENCE
metric_semantics: [USAGE_COUNTING]
metric_dimensions: *ce_dimensions
reports:
############################################################################
# A fleet-wide summary.
############################################################################
# This metric relies on component strings, and cannot be automatically migrated to Cobalt 1.1
# - report_name: corruption_summary
# id: 1
# report_type: EVENT_COMPONENT_OCCURRENCE_COUNT
# system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: corruption_per_device
id: 2
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
# id: 15 deprecated.
################################################################################
# Compression format
#
# Total file sizes (in bytes) per compression format.
#
# Currently only supported by blobfs. When blobfs is starting up after boot, we
# compute the total (uncompressed) sizes of all blobs per compression format,
# and log the counts to Cobalt.
#
################################################################################
- id: 16
replacement_metric_id: 49
metric_name: compression_format
metric_type: EVENT_COUNT
metric_dimensions: &cf_dimensions
- dimension: "source"
event_codes:
0: Unknown
1: BlobFs
- dimension: "format"
event_codes:
0: Unknown
1: Uncompressed
2: LZ4
3: ZSTD
4: ZSTDSeekable
5: ZSTDChunked
reports:
############################################################################
# A per-device histogram report.
############################################################################
- report_name: compression_per_device_histogram
id: 1
report_type: PER_DEVICE_HISTOGRAM
##########################################################################
# Since we only log the count once per boot, we use MAX here in case a
# device reboots several times during the day.
##########################################################################
aggregation_type: MAX
int_buckets: &cpdh_buckets
linear:
step_size: 20971520 # 20MB
num_buckets: 100
window_size:
- 1 # 1 day
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 49
metric_name: compression_format_migrated
metric_type: INTEGER
metric_units: BYTES
metric_semantics: [DATA_SIZE]
metric_dimensions: *cf_dimensions
reports:
############################################################################
# A per-device histogram report.
############################################################################
- report_name: compression_per_device_histogram
id: 1
report_type: UNIQUE_DEVICE_HISTOGRAMS
system_profile_selection: SELECT_LAST
##########################################################################
# Since we only log the count once per boot, we use MAX here in case a
# device reboots several times during the day.
##########################################################################
local_aggregation_procedure: MAX_PROCEDURE
int_buckets: *cpdh_buckets
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Page fault latency
#
# Histogram of latencies in nanoseconds for satisfying a page fault.
#
# Each time a page fault occurs (a client accesses a not-present page in a
# pager-backed VMO), the latency of the userspace fault handling is recorded
# and its latency (in nanoseconds) is added to the histogram.
#
# Blobfs currently flushes this metric to Cobalt every five minutes (at the
# same time all other metrics are flushed.)
#
# Currently only supported by blobfs, and only when blobfs.userpager=true is set
# in the build.
#
################################################################################
- id: 17
replacement_metric_id: 50
metric_name: page_fault_latency
metric_type: INT_HISTOGRAM
int_buckets: &pfl_buckets
exponential:
floor: 0
num_buckets: 15
initial_step: 10000
step_multiplier: 2
metric_dimensions: &pfl_dimensions
- dimension: "source"
event_codes:
0: Unknown
1: BlobFs
- dimension: "format"
event_codes:
0: Unknown
1: Uncompressed
2: LZ4
3: ZSTD
4: ZSTDSeekable
5: ZSTDChunked
- dimension: "read_ahead_size"
event_codes:
0: 8KiB
1: 16KiB
2: 32KiB
3: 64KiB
4: 128KiB
5: 256KiB
6: 512KiB
7: LargerThan512KiB
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: page_fault_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 50
metric_name: page_fault_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *pfl_buckets
metric_dimensions: *pfl_dimensions
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: page_fault_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal write data latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to write data, in nanoseconds.
# Typically a "write" here is just a mem-copy into a ring buffer. Payload of the
# write is user data instead of filesystem metadata.
# This is a filesystem facing async api metric. A high latency may mean that
# the ring-buffer is full maybe because there is a back-pressure from slower
# underlying disk.
################################################################################
- id: 18
replacement_metric_id: 51
metric_name: journal_write_data_latency
metric_type: INT_HISTOGRAM
metric_dimensions: &journal_dimensions
- *source_dimension
int_buckets: &hwdl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_write_data_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 51
metric_name: journal_write_data_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *hwdl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_write_data_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal write metadata latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to write metadata, in nanoseconds.
# Typically a "write" here is just a mem-copy into a ring buffer. Payload of the
# write is filesystem metadata.
# This is a filesystem facing async api metric. A high latency may mean that
# the ring-buffer is full maybe because there is a back-pressure from slower
# underlying disk.
################################################################################
- id: 19
replacement_metric_id: 52
metric_name: journal_write_metadata_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *journal_dimensions
int_buckets: &jwml_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_write_metadata_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 52
metric_name: journal_write_metadata_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *jwml_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_write_metadata_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal trim data latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to trim data, in nanoseconds.
# Trim helps nand based storage to mark a block as unused. Trimmed blocks help
# storage software to improve nand performance.
# This api queues a trim command in journal buffer without actually issuing
# trim to underlying device.
# This is a filesystem facing async api metric. A high latency may mean that
# the ring-buffer is full maybe because there is a back-pressure from slower
# underlying disk.
################################################################################
- id: 20
replacement_metric_id: 53
metric_name: journal_trim_data_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *journal_dimensions
int_buckets: &jtdl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_trim_data_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 53
metric_name: journal_trim_data_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *jtdl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_trim_data_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal sync latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to sync, in nanoseconds.
# Sync provides a way to ensure that all the queued operations before sync are
# complete.
# This is a filesystem facing async api metric. A high latency may mean that
# the ring-buffer is full maybe because there is a back-pressure from slower
# underlying disk.
################################################################################
- id: 21
replacement_metric_id: 54
metric_name: journal_sync_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *journal_dimensions
int_buckets: &jsl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_sync_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 54
metric_name: journal_sync_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *jsl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_sync_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal schedule task latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to schedule task, in nanoseconds.
# A task is a collection of journal operations(like data write, metadata write,
# etc.). These operations are executed on background task.
################################################################################
- id: 22
replacement_metric_id: 55
metric_name: journal_schedule_task_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *journal_dimensions
int_buckets: &jstl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_schedule_task_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 55
metric_name: journal_schedule_task_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *jstl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_schedule_task_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal writer write data latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to write data, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing IOs to
# the underlying disk.
# This is latency to write user data as opposed to filesystem metadata to disk
# by journal writer.
# See comments for journal_write_data
################################################################################
- id: 23
replacement_metric_id: 56
metric_name: journal_writer_write_data_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *journal_dimensions
int_buckets: &jwwdl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_write_data_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 56
metric_name: journal_writer_write_data_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *jwwdl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_write_data_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal writer write metadata latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to write metadata, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing blocking
# IOs to the underlying disk.
# This is latency to write filesystem metadata to disk by journal writer.
# See comments for journal_write_metadata
################################################################################
- id: 24
replacement_metric_id: 57
metric_name: journal_writer_write_metadata_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *journal_dimensions
int_buckets: &jwwml_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_write_metadata_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 57
metric_name: journal_writer_write_metadata_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *jwwml_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_write_metadata_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal trim data latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to trim data, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing blocking
# IOs to the underlying disk.
# This is latency to trim blocks as seen by journal writer.
# See comments for journal_trim_data.
################################################################################
- id: 25
replacement_metric_id: 58
metric_name: journal_writer_trim_data_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *journal_dimensions
int_buckets: &jwtdl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_trim_data_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 58
metric_name: journal_writer_trim_data_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *jwtdl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_trim_data_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal writer sync latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to sync, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing blocking
# IOs to the underlying disk.
# This is latency to sync journal operation as seen by journal writer.
# See comments for journal_sync.
################################################################################
- id: 26
replacement_metric_id: 59
metric_name: journal_writer_sync_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *journal_dimensions
int_buckets: &jwsl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_sync_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 59
metric_name: journal_writer_sync_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *jwsl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_sync_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Journal writer write info block latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to write info block, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing blocking
# IOs to the underlying disk.
# Journal updates its headers on-disk when certain conditions are met (like ring
# buffer running full). This metric tracks latency of such writes.
################################################################################
- id: 27
replacement_metric_id: 60
metric_name: journal_writer_write_info_block_latency
metric_type: INT_HISTOGRAM
metric_dimensions: *journal_dimensions
int_buckets: &jwwibl_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_write_info_block_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 60
metric_name: journal_writer_write_info_block_latency_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *journal_dimensions
metric_units: NANOSECONDS
metric_semantics: [LATENCY]
int_buckets: *jwwibl_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_write_info_block_latency_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Oldest on-disk version
#
# The component string field tracks both the format version and the oldest on-disk version in the
# format `<format>/<oldest-revision>`. A dimension is used to track the real storage sub-component.
# The enumeration of possible version number combinations is tracked by versions.txt. This metric
# is incremented upon every mount.
################################################################################
- id: 28
replacement_metric_id: 61
metric_name: version
metric_type: EVENT_COUNT
metric_dimensions: &v_dimensions
- *source_dimension
reports:
- report_name: version_mount_counts
id: 1
candidate_file: fuchsia/local_storage/versions.txt
report_type: EVENT_COMPONENT_OCCURRENCE_COUNT
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 61
metric_name: version_migrated
metric_type: STRING
string_candidate_file: fuchsia/local_storage/versions.txt
metric_semantics: [USAGE_COUNTING]
metric_dimensions: *v_dimensions
reports:
- report_name: version_mount_counts
id: 1
report_type: STRING_COUNTS
privacy_level: NO_ADDED_PRIVACY
string_buffer_max: 10
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
#####################################################################
# Total inodes
#
# Total number of inodes, either free or in use, in the system.
# The metrics will help us understand the size of inode table. This number in
# might also help understand other fragmentation metrics, namely file_in_use and
# extent_containers_in_use better.
#####################################################################
- id: 29
replacement_metric_id: 62
metric_name: total_inodes
metric_type: MEMORY_USAGE
metric_dimensions: &fragmentation_dimensions
- *source_dimension
reports:
- report_name: total_inodes_count
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 62
metric_name: total_inodes_migrated
metric_type: INTEGER
metric_dimensions: *fragmentation_dimensions
metric_units_other: "inodes"
metric_semantics: [MEMORY_USAGE]
reports:
- report_name: total_inodes_count
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
#####################################################################
# Files in use
#
# Total number of files, (or blobs in case of blobfs), in use in the system.
# This number may vary largely across builds or when the device has additional
# packages installed other than system base packages.
#####################################################################
- id: 30
replacement_metric_id: 63
metric_name: file_in_use
metric_type: MEMORY_USAGE
metric_dimensions: *fragmentation_dimensions
reports:
- report_name: files_in_use_count
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 63
metric_name: file_in_use_migrated
metric_type: INTEGER
metric_dimensions: *fragmentation_dimensions
metric_units_other: "files"
metric_semantics: [MEMORY_USAGE]
reports:
- report_name: files_in_use_count
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
#####################################################################
# Extent containers in use
#
# File/blob may use one or more extents, a pointer to a set of blocks. Extent
# containers are collection of one or more extents. Large the number of extent
# containers in use indicate higher fragmentation of the storage.
#####################################################################
- id: 31
replacement_metric_id: 64
metric_name: extent_containers_in_use
metric_type: MEMORY_USAGE
metric_dimensions: *fragmentation_dimensions
reports:
- report_name: extent_containers_in_use_count
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 64
metric_name: extent_containers_in_use_migrated
metric_type: INTEGER
metric_dimensions: *fragmentation_dimensions
metric_units_other: "extent containers"
metric_semantics: [MEMORY_USAGE]
reports:
- report_name: extent_containers_in_use_count
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Extents per file fragmentation
#
# A histogram, aggregated in-process and logged to Cobalt every time filesystem
# is mounted or unmounted, of extents used by a file/blob.
# The histogram presents fragmentation at file/blob level. This metric will help
# in understanding how defragmentation of certain files will help either to
# recover extents or to improve read performance.
################################################################################
- id: 32
replacement_metric_id: 65
metric_name: extents_per_file
metric_type: INT_HISTOGRAM
metric_dimensions: *fragmentation_dimensions
int_buckets: &epf_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: fragmentation_extents_per_file_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 65
metric_name: extents_per_file_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *fragmentation_dimensions
metric_units_other: "extent containers"
metric_semantics: [MEMORY_USAGE]
int_buckets: *epf_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: fragmentation_extents_per_file_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# In use block fragmentation
#
# A histogram, aggregated in-process and logged to Cobalt every time filesystem
# is mounted or unmounted, of used contiguous blocks that represent contiguous
# offset of a file.
# The histogram shows used block fragmentation. This metric will help
# in understand the effects of block allocation policy. The metric will also
# help in targeting what blocks to move if we decide to defragment the system.
################################################################################
- id: 33
replacement_metric_id: 66
metric_name: in_use_fragments
metric_type: INT_HISTOGRAM
metric_dimensions: *fragmentation_dimensions
int_buckets: &iuf_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: fragmentation_in_use_fragments_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 66
metric_name: in_use_fragments_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *fragmentation_dimensions
metric_units_other: "fragments"
metric_semantics: [MEMORY_USAGE]
int_buckets: *iuf_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: fragmentation_in_use_fragments_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Free fragments.
#
# A histogram, aggregated in-process and logged to Cobalt every time filesystem
# is mounted or unmounted, of contiguous free blocks.
# The histogram shows free block fragmentation. This metric will help
# in understand likelyhood of running out of extents during a OTA.
################################################################################
- id: 34
replacement_metric_id: 67
metric_name: free_fragments
metric_type: INT_HISTOGRAM
metric_dimensions: *fragmentation_dimensions
int_buckets: &ff_buckets
exponential:
floor: 0
num_buckets: 10
initial_step: 10
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: fragmentation_free_fragments_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 67
metric_name: free_fragments_migrated
metric_type: INTEGER_HISTOGRAM
metric_dimensions: *fragmentation_dimensions
metric_units_other: "fragments"
metric_semantics: [MEMORY_USAGE]
int_buckets: *ff_buckets
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: fragmentation_free_fragments_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
# Next ID: 70
################################################################################
# FTL and Nand Metrics
# Reserved Range: [2000, 3000)
################################################################################
# FTL: Wear Count
#
# A counter representing the highest wear count of the FTL driver.
#
# A NAND block has an expected lifetime in erase cycles. An erase must occur
# before anything can be written (updating data).
#
# This count is the highest number of erase cycles per block, across all
# erase blocks in the NAND device.
#
# This metric allows observing how the FTL wear leveling algorithm is performing,
# such that the device lifetime is maximized, because the higher the wear the
# more probable the block is expected to fail.
#
# Sampling Frequency: 1 sample per hour.
################################################################################
- id: 2000
replacement_metric_id: 2003
metric_name: ftl_wear_count
# Current version of cobalt doesn't support logging an integer sample.
# The closest is |MEMORY_USAGE|, which will be mapped to |Integer| once
# Cobalt 1.1 is released.
metric_type: MEMORY_USAGE
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: ftl_wear_count_per_device
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_wear_count_histogram_per_device
id: 2
report_type: INT_RANGE_HISTOGRAM
int_buckets: &fwchpd_buckets
exponential:
floor: 0
num_buckets: 20
initial_step: 1
step_multiplier: 2
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 2003
metric_name: ftl_wear_count_migrated
metric_type: INTEGER
metric_units_other: wear count
metric_semantics: [USAGE_COUNTING]
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: ftl_wear_count_per_device
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_wear_count_histogram_per_device
id: 2
report_type: FLEETWIDE_HISTOGRAMS
int_buckets: *fwchpd_buckets
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# FTL: Block Operation Count
#
# A counter representing the number of block operations the FTL receives.
#
# Allows measuring the IO pressure on the FTL. When combined with the
# |ftl_nand_operation_count| an approximate operation amplification
# can be estimated.
#
# Sampling Frequency: 1 sample per hour.
#
# |operation_type|: Refers to the type of block operation received in the FTL Layer.
#
################################################################################
- id: 2001
replacement_metric_id: 2004
metric_name: ftl_block_operation_count
metric_type: EVENT_COUNT
metric_dimensions: &fboc_dimensions
- dimension: operation_type
event_codes:
0: Unknown
1: BLOCK_READ
2: BLOCK_WRITE
3: BLOCK_FLUSH
4: BLOCK_TRIM
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: ftl_block_operation_per_device
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: SUM
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_block_max_operations_per_device
id: 2
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_block_operation_histogram_per_device
id: 3
report_type: INT_RANGE_HISTOGRAM
int_buckets: &fbohpd_buckets
exponential:
floor: 0
num_buckets: 20
initial_step: 1
step_multiplier: 2
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 2004
metric_name: ftl_block_operation_count_migrated
metric_type: INTEGER
metric_semantics: [USAGE_COUNTING]
metric_units_other: "block operations"
metric_dimensions: *fboc_dimensions
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: ftl_block_operation_per_device
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: SUM_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_block_max_operations_per_device
id: 2
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_block_operation_histogram_per_device
id: 3
report_type: FLEETWIDE_HISTOGRAMS
int_buckets: *fbohpd_buckets
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# FTL: NAND Operation Count
#
# A counter representing the number of block operations the FTL receives.
#
# Allows measuring the IO pressure on the FTL. When combined with the
# |ftl_nand_operation_count| an approximate operation amplification
# can be estimated.
#
# Sampling Frequency: 1 sample per hour.
#
# |operation_type|: Refers to the type of NAND operation issued to the NAND layer by the ftl,
# in response to a given operation type.
# |source_operation_type|: Refers to the type of block operation received in the FTL Layer.
#
#
################################################################################
- id: 2002
replacement_metric_id: 2005
metric_name: ftl_nand_operation_count
metric_type: EVENT_COUNT
metric_dimensions: &fnoc_dimensions
- dimension: operation_type
event_codes:
0: Unknown
1: NAND_READ
2: NAND_WRITE
3: NAND_ERASE
- dimension: source_operation_type
event_codes:
0: Unknown
1: BLOCK_READ
2: BLOCK_WRITE
3: BLOCK_FLUSH
4: BLOCK_TRIM
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: ftl_nand_operations_per_device
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: SUM
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_nand_operation_max_per_device
id: 2
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_nand_operation_histogram_per_device
id: 3
report_type: INT_RANGE_HISTOGRAM
int_buckets: &fnohpd_buckets
exponential:
floor: 0
num_buckets: 20
initial_step: 1
step_multiplier: 2
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 2005
metric_name: ftl_nand_operation_count_migrated
metric_type: INTEGER
metric_units_other: "nand operations"
metric_semantics: [USAGE_COUNTING]
metric_dimensions: *fnoc_dimensions
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: ftl_nand_operations_per_device
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: SUM_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_nand_operation_max_per_device
id: 2
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_nand_operation_histogram_per_device
id: 3
report_type: FLEETWIDE_HISTOGRAMS
int_buckets: *fnohpd_buckets
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Nand bit flips corrected by ECC per call.
#
# A histogram of the number of bitflips correct by ECC on a read. Excess bit
# flips that cannot be corrected will be set as the max number of correctable
# plus one. For all current use cases, 30 is the max correctable bits.
#
# Provides insight into the frequency and extremity of bit flips observed.
################################################################################
- id: 2006
metric_name: nand_read_ecc_bit_flips
metric_type: INTEGER_HISTOGRAM
metric_semantics: [USAGE_COUNTING]
metric_units_other: "bitflips"
int_buckets:
linear:
# 0, 1, 2... 29, 30, 31
step_size: 1
num_buckets: 32
reports:
############################################################################
# A fleet-wide histogram of bit flips on each read.
############################################################################
- report_name: nand_ecc_bit_flips_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Repeated read attempts to nand before a successful read, or giving up.
#
# A histogram of the number of read attempts used on a single read, always at
# least 1 for succeeding on the first attempt, and if it gives up it reports
# MAX_ULONG and ends up in the overflow bucket.
#
# For insight into how many consecutive read attempts are actually productive,
# and how much benefit we get from our current maximum.
################################################################################
- id: 2007
metric_name: nand_read_attempts_per_read
metric_type: INTEGER_HISTOGRAM
metric_semantics: [USAGE_COUNTING]
metric_units_other: "attempts"
int_buckets:
exponential:
# 0, 1, 2, 4, 8... 64, 128
floor: 0
num_buckets: 9
initial_step: 1
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram of the number of attempts required to read.
############################################################################
- report_name: attempts_histogram
id: 1
report_type: FLEETWIDE_HISTOGRAMS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Number of failed read attempts to the nand.
#
# A count of how many read attempts are sent to the nand which result in
# failure.
#
# For insight into how fleetwide failures might be clustered in a subset of
# devices, and how bad some of those devices might be. Expecting daily values
# to be normally >1,000 but <100,000.
################################################################################
- id: 2008
metric_name: nand_read_attempt_failures
metric_type: OCCURRENCE
metric_semantics: [USAGE_COUNTING]
reports:
############################################################################
# A daily histogram of read failures per device.
############################################################################
- report_name: failures_histogram
id: 1
report_type: UNIQUE_DEVICE_HISTOGRAMS
local_aggregation_period: WINDOW_1_DAY
int_buckets:
exponential:
# 0, 1, 2, 4, 8... 32768, 65536
floor: 0
num_buckets: 18
initial_step: 1
step_multiplier: 2
privacy_level: NO_ADDED_PRIVACY
system_profile_selection: SELECT_LAST
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Number of failed reads where we exhausted all attempts
#
# A count of how many reads where no attempts sent to the nand succeed.
#
# For insight into how fleetwide failures might be clustered in a subset of
# devices, and how bad some of those devices might be. Expecting daily values
# to be normally >1 but <100. Could in theory be higher if instances of this
# didn't usually result in the system becoming inoperable.
################################################################################
- id: 2009
metric_name: nand_read_attempts_exhausted
metric_type: OCCURRENCE
metric_semantics: [USAGE_COUNTING]
reports:
############################################################################
# A daily histogram of exhausted read attempts
############################################################################
- report_name: exhausted_histogram
id: 1
report_type: UNIQUE_DEVICE_HISTOGRAMS
local_aggregation_period: WINDOW_1_DAY
int_buckets:
exponential:
# 0, 1, 2, 4, 8... 64, 128
floor: 0
num_buckets: 9
initial_step: 1
step_multiplier: 2
privacy_level: NO_ADDED_PRIVACY
system_profile_selection: SELECT_LAST
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Number of bad blocks found on a device
#
# A count of how many blocks are found bad as marked by the initial factory
# mark, and how many are found and marked as bad during run time.
#
# For insight into the fleetwide state regarding the availability of the spare
# block pools, determined by how many blocks are currently marked bad. Also the
# split of initial bad blocks vs running bad blocks. For current use-cases 44
# is really the maximum.
################################################################################
- id: 2010
metric_name: ftl_bad_blocks
metric_type: INTEGER
metric_semantics: [USAGE_COUNTING]
metric_units_other: "blocks"
metric_dimensions:
- dimension: bad_block_type
event_codes:
0: Unknown
1: INITIAL
2: RUNNING
reports:
############################################################################
# A daily histogram of bad block information per device.
############################################################################
- report_name: bad_blocks_histogram
id: 1
report_type: UNIQUE_DEVICE_HISTOGRAMS
local_aggregation_period: WINDOW_1_DAY
local_aggregation_procedure: MAX_PROCEDURE
int_buckets:
exponential:
# 0, 1, 2, 4, 8... 32, 64
floor: 0
num_buckets: 8
initial_step: 1
step_multiplier: 2
privacy_level: NO_ADDED_PRIVACY
system_profile_selection: SELECT_LAST
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Intermittent failures reading the last page in a map block
#
# Counts of occurrences where the FTL has encountered a failure reading a page
# in a map block that has subsequently succeeded. A dimension indicates the
# reason for the initial failure.
################################################################################
- id: 2011
metric_name: ftl_map_block_end_page_failures
metric_type: OCCURRENCE
metric_semantics: [USAGE_COUNTING]
metric_dimensions:
- dimension: reason
event_codes:
# Whilst using event code 0 is not recommended, the reporting code always
# reports a code and reason 0 is used. This metric is also intended to
# be temporary.
0: Reason 0 (invalid page)
1: Reason 1 (erased page)
2: Reason 2 (uncorrectable ecc error)
3: Reason 3 (bad block-count)
4: Reason 4 (bad map page number)
5: Reason 5 (reserved for future use)
6: Reason 6 (reserved for future use)
7: Reason 7 (reserved for future use)
8: Reason 8 (reserved for future use)
9: Reason 9 (reserved for future use)
reports:
############################################################################
# A daily histogram of bad block information per device.
############################################################################
- report_name: counts
id: 1
report_type: FLEETWIDE_OCCURRENCE_COUNTS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
# Next ID: 2012
################################################################################
# FVM Metrics
# Reserved Range: [3000, 4000)
################################################################################
# FVM: Slice Allocation Counts
#
# Collects per-partition slice allocation counts from fvm. This assumes there are two fvm
# partitions - blobfs and minfs.
#
# Sampling Frequency: 1 sample per hour.
#
# |partition|: The partition allocating the slices.
################################################################################
- id: 3000
replacement_metric_id: 3001
metric_name: fvm_slice_allocations
metric_type: EVENT_COUNT
metric_dimensions: &fsa_buckets
- dimension: partition
event_codes:
0: Blobfs
1: Minfs
reports:
- report_name: fvm_slice_allocations
id: 3000
report_type: NUMERIC_AGGREGATION
percentiles: [1, 5, 10, 25, 50, 75, 90, 95, 98, 99]
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 3001
metric_name: fvm_slice_allocations_migrated
metric_type: OCCURRENCE
metric_semantics: [USAGE_COUNTING]
metric_dimensions: *fsa_buckets
reports:
- report_name: fvm_slice_allocations
id: 3000
report_type: HOURLY_VALUE_NUMERIC_STATS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
# Next ID: 3002
################################################################################
# Space Metrics
# Reserved Range: [4000, 5000)
################################################################################
# Data Bytes Used
#
# Used to track the number of bytes a partition is using for storing data.
# This is generally updated on a regular interval via Lapis, which queries
# filesystem information from fshost.
################################################################################
- id: 4000
replacement_metric_id: 4001
metric_name: data_bytes_used
# Current version of cobalt doesn't support logging an integer sample.
# The closest is |MEMORY_USAGE|, which will be mapped to |INTEGER| once
# Cobalt 1.1 is released.
metric_type: MEMORY_USAGE
metric_dimensions: &space_dimensions
- *source_dimension
reports:
- report_name: per_device_daily_max
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 4001
metric_name: data_bytes_used_migrated
metric_type: INTEGER
metric_units: BYTES
metric_semantics: [USAGE_COUNTING]
metric_dimensions: *space_dimensions
reports:
- report_name: per_device_daily_max
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Data Bytes Allocated
#
# Used to track the number of bytes a partition has allocated (free + used) for
# storing data.
# This is generally updated on a regular interval via Lapis, which queries
# filesystem information from fshost.
################################################################################
- id: 4002
replacement_metric_id: 4003
metric_name: data_bytes_allocated
# Current version of cobalt doesn't support logging an integer sample.
# The closest is |MEMORY_USAGE|, which will be mapped to |INTEGER| once
# Cobalt 1.1 is released.
metric_type: MEMORY_USAGE
metric_dimensions: *space_dimensions
reports:
- report_name: per_device_daily_max
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 4003
metric_name: data_bytes_allocated_migrated
metric_type: INTEGER
metric_units: BYTES
metric_semantics: [USAGE_COUNTING]
metric_dimensions: *space_dimensions
reports:
- report_name: per_device_daily_max
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Inodes Used
#
# Used to track the number of inodes a partition has in use.
# This is generally updated on a regular interval via Lapis, which queries
# filesystem information from fshost.
################################################################################
- id: 4004
replacement_metric_id: 4005
metric_name: inodes_used
# Current version of cobalt doesn't support logging an integer sample.
# The closest is |MEMORY_USAGE|, which will be mapped to |INTEGER| once
# Cobalt 1.1 is released.
metric_type: MEMORY_USAGE
metric_dimensions: &space_dimensions
- *source_dimension
reports:
- report_name: per_device_daily_max
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 4005
metric_name: inodes_used_migrated
metric_type: INTEGER
metric_units_other: "inodes"
metric_semantics: [USAGE_COUNTING]
metric_dimensions: *space_dimensions
reports:
- report_name: per_device_daily_max
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Inodes Allocated
#
# Used to track the number of inodes a partition has capacity for (free + used).
# This is generally updated on a regular interval via Lapis, which queries
# filesystem information from fshost.
################################################################################
- id: 4006
replacement_metric_id: 4007
metric_name: inodes_allocated
# Current version of cobalt doesn't support logging an integer sample.
# The closest is |MEMORY_USAGE|, which will be mapped to |INTEGER| once
# Cobalt 1.1 is released.
metric_type: MEMORY_USAGE
metric_dimensions: *space_dimensions
reports:
- report_name: per_device_daily_max
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 4007
metric_name: inodes_allocated_migrated
metric_type: INTEGER
metric_units_other: "inodes"
metric_semantics: [USAGE_COUNTING]
metric_dimensions: *space_dimensions
reports:
- report_name: per_device_daily_max
id: 1
report_type: UNIQUE_DEVICE_NUMERIC_STATS
local_aggregation_procedure: MAX_PROCEDURE
local_aggregation_period: WINDOW_1_DAY
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Out-Of-Space Events
#
# Per-filesystem count of times a device fails to extend the underlying volume
# after running out of space.
#
# Can only increase at most 1 every 5 minutes, so the daily max is 288.
################################################################################
- id: 4008
metric_name: out_of_space_events
metric_type: OCCURRENCE
metric_semantics: [USAGE_COUNTING]
metric_dimensions:
- *source_dimension
reports:
- report_name: per_device_daily_counts
id: 1
report_type: UNIQUE_DEVICE_HISTOGRAMS
system_profile_selection: SELECT_LAST
local_aggregation_period: WINDOW_1_DAY
int_buckets:
exponential:
# 0, 1, 2, 4, 8... 64, 128, 256
floor: 0
num_buckets: 10
initial_step: 1
step_multiplier: 2
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
# Next ID: 4009
################################################################################
# Temporary Metrics
# Reserved Range: [9000, 10000)
################################################################################
# Data partition upgrade
#
# Used to track the progress of a one-time data partition upgrade.
# Each `stage` is triggered as a one-off event when that stage begins, which
# occurs during boot. Since the overall procedure is intended to be a one-time
# event, this should only trigger once per device.
################################################################################
- id: 9002
replacement_metric_id: 9003
metric_name: data_partition_upgrade
metric_type: EVENT_COUNT
metric_dimensions: &dpu_buckets
- dimension: stage
event_codes:
0: Unknown
1: Skipped
2: DetectedFailedUpgrade
3: ReadOldData
4: WriteNewData
5: Done
max_event_code: 15
reports:
- report_name: counts
id: 1
report_type: EVENT_COMPONENT_OCCURRENCE_COUNT
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
local_privacy_noise_level: NONE
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
- id: 9003
metric_name: data_partition_upgrade_migrated
metric_type: OCCURRENCE
metric_semantics: [USAGE_COUNTING]
metric_dimensions: *dpu_buckets
reports:
- report_name: counts
id: 1
report_type: FLEETWIDE_OCCURRENCE_COUNTS
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
################################################################################
# Recovered Out-Of-Space Events
#
# Indicates a successful volume extension after performing a corrective action.
# Used to track how many times a filesystem was successfully able to recover
# from a volume extension failure (e.g. by force-flushing the journal).
#
# Can only increase at most 1 every 5 minutes, so the daily max is 288.
################################################################################
- id: 9004
metric_name: recovered_space_from_sync_events
metric_type: OCCURRENCE
metric_semantics: [USAGE_COUNTING]
metric_dimensions:
- *source_dimension
reports:
- report_name: per_device_daily_counts
id: 1
report_type: UNIQUE_DEVICE_HISTOGRAMS
system_profile_selection: SELECT_LAST
local_aggregation_period: WINDOW_1_DAY
int_buckets:
exponential:
# 0, 1, 2, 4, 8... 64, 128, 256
floor: 0
num_buckets: 10
initial_step: 1
step_multiplier: 2
privacy_level: NO_ADDED_PRIVACY
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2023/03/01"
# Next ID: 9005
# Deleted metric IDs that must not be reused.
deleted_metric_ids: [15,68,69,9000,9001]