blob: 99abf8801bdc354cd8fd8527d1c4582a5169ffe7 [file] [log] [blame]
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
################################################################################
# Cobalt Project: local_storage
################################################################################
metric_definitions:
################################################################################
# Link Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to link a file, in nanoseconds.
################################################################################
- id: 1
metric_name: link_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: link_latency_histogram
id: 2203712145 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# Close Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to close an open file, in nanoseconds.
################################################################################
- id: 2
metric_name: close_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: close_latency_histogram
id: 3976766327 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# Read Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to read file contents, in nanoseconds.
################################################################################
- id: 3
metric_name: read_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: read_latency_histogram
id: 4000776865 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# Write Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to write a file, in nanoseconds.
################################################################################
- id: 4
metric_name: write_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: write_latency_histogram
id: 2787504078 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# Append Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent appending to a file, in nanoseconds.
################################################################################
- id: 5
metric_name: append_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: append_latency_histogram
id: 4159068221 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# Truncate Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent truncating a file's size, in nanoseconds.
################################################################################
- id: 6
metric_name: truncate_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: truncate_latency_histogram
id: 4157122023 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# SetAttr Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to update its attributes, in nanoseconds.
################################################################################
- id: 7
metric_name: set_attr_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: set_attr_latency_histogram
id: 2824692351 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# GetAttr Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent to return its attributes, in nanoseconds.
################################################################################
- id: 8
metric_name: get_attr_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 1
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: get_attr_latency_histogram
id: 296311099 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# Sync Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent syncing its state to disk, in nanoseconds.
################################################################################
- id: 9
metric_name: sync_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: sync_latency_histogram
id: 3117694120 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# ReadDir Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent reading directory entries, in nanoseconds.
################################################################################
- id: 10
metric_name: read_dir_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: read_dir_latency_histogram
id: 1707841577 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# LookUp Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a vnode spent looking up a child, in nanoseconds.
################################################################################
- id: 11
metric_name: lookup_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: lookup_latency_histogram
id: 3629837219 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# Create Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the the time spent creating a new vnode, in nanoseconds.
################################################################################
- id: 12
metric_name: create_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: create_latency_histogram
id: 3690266605 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# UnLink Latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the the time a vnode spent unlinking, in nanoseconds.
################################################################################
- id: 13
metric_name: unlink_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: unlink_latency_histogram
id: 3128110576 # legacy long report_id
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
################################################################################
# Corruption Event
#
# A corruption event logs a '1' every time a component detects corruption. This
# can be either data or metadata corruption depending on the source.
################################################################################
- id: 14
metric_name: corruption_events
# This will always log a single event (1).
metric_type: EVENT_COUNT
metric_dimensions:
- dimension: "source"
event_codes:
0: Unknown
1: Fvm
2: BlobFs
3: MinFs
- dimension: "corruption_type"
event_codes:
0: Unknown
1: Data
2: Metadata
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: corruption_summary
id: 1809599655 # legacy long report_id
report_type: EVENT_COMPONENT_OCCURRENCE_COUNT
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: corruption_per_device
id: 1110247575 # legacy long report_id
report_type: PER_DEVICE_NUMERIC_STATS
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION]
meta_data:
max_release_stage: GA
expiration_date: "2020/03/20"
# id: 15 deprecated.
################################################################################
# Compression format
#
# Total file sizes (in bytes) per compression format.
#
# Currently only supported by blobfs. When blobfs is starting up after boot, we
# compute the total (uncompressed) sizes of all blobs per compression format,
# and log the counts to Cobalt.
#
################################################################################
- id: 16
metric_name: compression_format
metric_type: EVENT_COUNT
metric_dimensions:
- dimension: "source"
event_codes:
0: Unknown
1: BlobFs
- dimension: "format"
event_codes:
0: Unknown
1: Uncompressed
2: LZ4
3: ZSTD
4: ZSTDSeekable
5: ZSTDChunked
reports:
############################################################################
# A per-device histogram report.
############################################################################
- report_name: compression_per_device_histogram
id: 1
report_type: PER_DEVICE_HISTOGRAM
##########################################################################
# Since we only log the count once per boot, we use MAX here in case a
# device reboots several times during the day.
##########################################################################
aggregation_type: MAX
int_buckets:
linear:
step_size: 20971520 # 20MB
num_buckets: 100
window_size:
- 1 # 1 day
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/03/20"
################################################################################
# Page fault latency
#
# Histogram of latencies in nanoseconds for satisfying a page fault.
#
# Each time a page fault occurs (a client accesses a not-present page in a
# pager-backed VMO), the latency of the userspace fault handling is recorded
# and its latency (in nanoseconds) is added to the histogram.
#
# Blobfs currently flushes this metric to Cobalt every five minutes (at the
# same time all other metrics are flushed.)
#
# Currently only supported by blobfs, and only when blobfs.userpager=true is set
# in the build.
#
################################################################################
- id: 17
metric_name: page_fault_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 15
initial_step: 10000
step_multiplier: 2
metric_dimensions:
- dimension: "source"
event_codes:
0: Unknown
1: BlobFs
- dimension: "format"
event_codes:
0: Unknown
1: Uncompressed
2: LZ4
3: ZSTD
4: ZSTDSeekable
5: ZSTDChunked
- dimension: "read_ahead_size"
event_codes:
0: 8KiB
1: 16KiB
2: 32KiB
3: 64KiB
4: 128KiB
5: 256KiB
6: 512KiB
7: LargerThan512KiB
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: page_fault_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/04/28"
################################################################################
# Journal write data latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to write data, in nanoseconds.
# Typically a "write" here is just a mem-copy into a ring buffer. Payload of the
# write is user data instead of filesystem metadata.
# This is a filesystem facing async api metric. A high latency may mean that
# the ring-buffer is full maybe because there is a back-pressure from slower
# underlying disk.
################################################################################
- id: 18
metric_name: journal_write_data_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_write_data_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Journal write metadata latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to write metadata, in nanoseconds.
# Typically a "write" here is just a mem-copy into a ring buffer. Payload of the
# write is filesystem metadata.
# This is a filesystem facing async api metric. A high latency may mean that
# the ring-buffer is full maybe because there is a back-pressure from slower
# underlying disk.
################################################################################
- id: 19
metric_name: journal_write_metadata_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_write_metadata_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Journal trim data latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to trim data, in nanoseconds.
# Trim helps nand based storage to mark a block as unused. Trimmed blocks help
# storage software to improve nand performance.
# This api queues a trim command in journal buffer without actually issuing
# trim to underlying device.
# This is a filesystem facing async api metric. A high latency may mean that
# the ring-buffer is full maybe because there is a back-pressure from slower
# underlying disk.
################################################################################
- id: 20
metric_name: journal_trim_data_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_trim_data_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Journal sync latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to sync, in nanoseconds.
# Sync provides a way to ensure that all the queued operations before sync are
# complete.
# This is a filesystem facing async api metric. A high latency may mean that
# the ring-buffer is full maybe because there is a back-pressure from slower
# underlying disk.
################################################################################
- id: 21
metric_name: journal_sync_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_sync_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Journal schedule task latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal spent to schedule task, in nanoseconds.
# A task is a collection of journal operations(like data write, metadata write,
# etc.). These operations are executed on background task.
################################################################################
- id: 22
metric_name: journal_schedule_task_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_schedule_task_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Journal writer write data latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to write data, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing IOs to
# the underlying disk.
# This is latency to write user data as opposed to filesystem metadata to disk
# by journal writer.
# See comments for journal_write_data
################################################################################
- id: 23
metric_name: journal_writer_write_data_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_write_data_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Journal writer write metadata latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to write metadata, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing blocking
# IOs to the underlying disk.
# This is latency to write filesystem metadata to disk by journal writer.
# See comments for journal_write_metadata
################################################################################
- id: 24
metric_name: journal_writer_write_metadata_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_write_metadata_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Journal trim data latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to trim data, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing blocking
# IOs to the underlying disk.
# This is latency to trim blocks as seen by journal writer.
# See comments for journal_trim_data.
################################################################################
- id: 25
metric_name: journal_writer_trim_data_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_trim_data_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Journal writer sync latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to sync, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing blocking
# IOs to the underlying disk.
# This is latency to sync journal operation as seen by journal writer.
# See comments for journal_sync.
################################################################################
- id: 26
metric_name: journal_writer_sync_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_sync_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Journal writer write info block latency
#
# A histogram, aggregated in-process and logged to Cobalt every few minutes,
# of the time a journal writer spent to write info block, in nanoseconds.
# Journal writer is a journal background thread that deals with issuing blocking
# IOs to the underlying disk.
# Journal updates its headers on-disk when certain conditions are met (like ring
# buffer running full). This metric tracks latency of such writes.
################################################################################
- id: 27
metric_name: journal_writer_write_info_block_latency
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10000
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: journal_writer_write_info_block_latency_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/08"
################################################################################
# Oldest on-disk version
#
# The component string field tracks the oldest on-disk version whilst a
# dimension is used to track the real storage sub-component. This metric is
# incremented upon every mount.
################################################################################
- id: 28
metric_name: version
metric_type: EVENT_COUNT
metric_dimensions:
- dimension: component
event_codes:
0: unknown
1: fvm
2: blobfs
3: minfs
reports:
- report_name: version_mount_counts
id: 1
candidate_file: fuchsia/local_storage/versions.txt
report_type: EVENT_COMPONENT_OCCURRENCE_COUNT
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/12/14"
#####################################################################
# Total inodes
#
# Total number of inodes, either free or in use, in the system.
# The metrics will help us understand the size of inode table. This number in
# might also help understand other fragmentation metrics, namely file_in_use and
# extent_containers_in_use better.
#####################################################################
- id: 29
metric_name: total_inodes
metric_type: MEMORY_USAGE
reports:
- report_name: total_inodes_count
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/12/23"
#####################################################################
# Files in use
#
# Total number of files, (or blobs in case of blobfs), in use in the system.
# This number may vary largely across builds or when the device has additional
# packages installed other than system base packages.
#####################################################################
- id: 30
metric_name: file_in_use
metric_type: MEMORY_USAGE
reports:
- report_name: files_in_use_count
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/12/23"
#####################################################################
# Extent containers in use
#
# File/blob may use one or more extents, a pointer to a set of blocks. Extent
# ontainers are collection of one or more extents. Large the number of extent
# containers in use indicate higher fragmentation of the storage.
#####################################################################
- id: 31
metric_name: extent_containers_in_use
metric_type: MEMORY_USAGE
reports:
- report_name: extent_containers_in_use_count
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/12/23"
################################################################################
# Extents per file fragmentation
#
# A histogram, aggregated in-process and logged to Cobalt every time filesystem
# is mounted or unmounted, of extents used by a file/blob.
# The histogram presents fragmentation at file/blob level. This metric will help
# in understanding how defragmentation of certain files will help either to
# recover extents or to improve read performance.
################################################################################
- id: 32
metric_name: extents_per_file
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: fragmentation_extents_per_file_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/12/23"
################################################################################
# In use block fragmentation
#
# A histogram, aggregated in-process and logged to Cobalt every time filesystem
# is mounted or unmounted, of used contiguous blocks that represent contiguous
# offset of a file.
# The histogram shows used block fragmentation. This metric will help
# in understand the effects of block allocation policy. The metric will also
# help in targeting what blocks to move if we decide to defragment the system.
################################################################################
- id: 33
metric_name: in_use_fragments
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: fragmentation_in_use_fragments_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/12/23"
################################################################################
# Free fragments.
#
# A histogram, aggregated in-process and logged to Cobalt every time filesystem
# is mounted or unmounted, of contiguous free blocks.
# The histogram shows free block fragmentation. This metric will help
# in understand likelyhood of running out of extents during a OTA.
################################################################################
- id: 34
metric_name: free_fragments
metric_type: INT_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 10
initial_step: 10
step_multiplier: 2
reports:
############################################################################
# A fleet-wide histogram report
############################################################################
- report_name: fragmentation_free_fragments_histogram
id: 1
report_type: INT_RANGE_HISTOGRAM
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/12/23"
# FTL Metrics
# Reserved Range: [2000, 3000)
################################################################################
# FTL: Wear Count
#
# A counter representing the highest wear count of the FTL driver.
#
# A NAND block has an expected lifetime in erase cycles. An erase must occur
# before anything can be written (updating data).
#
# This count is the highest number of erase cycles per block, across all
# erase blocks in the NAND device.
#
# This metric allows observing how the FTL wear leveling algorithm is performing,
# such that the device lifetime is maximized, because the higher the wear the
# more probable the block is expected to fail.
#
# Sampling Frequency: 1 sample per hour.
################################################################################
- id: 2000
metric_name: ftl_wear_count
# Current version of cobalt doesn't support logging an integer sample.
# The closest is |MEMORY_USAGE|, which will be mapped to |Integer| once
# Cobalt 1.1 is released.
metric_type: MEMORY_USAGE
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: ftl_wear_count_per_device
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_wear_count_histogram_per_device
id: 2
report_type: INT_RANGE_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 20
initial_step: 1
step_multiplier: 2
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/27"
################################################################################
# FTL: Block Operation Count
#
# A counter representing the number of block operations the FTL receives.
#
# Allows measuring the IO pressure on the FTL. When combined with the
# |ftl_nand_operation_count| an approximate operation amplification
# can be estimated.
#
# Sampling Frequency: 1 sample per hour.
#
# |operation_type|: Refers to the type of block operation received in the FTL Layer.
#
################################################################################
- id: 2001
metric_name: ftl_block_operation_count
metric_type: EVENT_COUNT
metric_dimensions:
- dimension: operation_type
event_codes:
0: Unknown
1: BLOCK_READ
2: BLOCK_WRITE
3: BLOCK_FLUSH
4: BLOCK_TRIM
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: ftl_block_operation_per_device
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: SUM
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_block_max_operations_per_device
id: 2
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_block_operation_histogram_per_device
id: 3
report_type: INT_RANGE_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 20
initial_step: 1
step_multiplier: 2
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/27"
################################################################################
# FTL: NAND Operation Count
#
# A counter representing the number of block operations the FTL receives.
#
# Allows measuring the IO pressure on the FTL. When combined with the
# |ftl_nand_operation_count| an approximate operation amplification
# can be estimated.
#
# Sampling Frequency: 1 sample per hour.
#
# |operation_type|: Refers to the type of NAND operation issued to the NAND layer by the ftl,
# in response to a given operation type.
# |source_operation_type|: Refers to the type of block operation received in the FTL Layer.
#
#
################################################################################
- id: 2002
metric_name: ftl_nand_operation_count
metric_type: EVENT_COUNT
metric_dimensions:
- dimension: operation_type
event_codes:
0: Unknown
1: NAND_READ
2: NAND_WRITE
3: NAND_ERASE
- dimension: source_operation_type
event_codes:
0: Unknown
1: BLOCK_READ
2: BLOCK_WRITE
3: BLOCK_ERASE
4: BLOCK_TRIM
reports:
############################################################################
# A fleet-wide summary.
############################################################################
- report_name: ftl_nand_operations_per_device
id: 1
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: SUM
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_nand_operation_max_per_device
id: 2
report_type: PER_DEVICE_NUMERIC_STATS
aggregation_type: MAX
window_size:
- 1
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
- report_name: ftl_nand_operation_histogram_per_device
id: 3
report_type: INT_RANGE_HISTOGRAM
int_buckets:
exponential:
floor: 0
num_buckets: 20
initial_step: 1
step_multiplier: 2
system_profile_field: [BOARD_NAME, PRODUCT_NAME, SYSTEM_VERSION, CHANNEL]
meta_data:
max_release_stage: GA
expiration_date: "2021/10/27"
# FVM Metrics
# Reserved Range: [3000, 4000)
################################################################################
# FVM: Slice Allocation Counts
#
# Collects per-partition slice allocation counts from fvm. This assumes there are two fvm
# partitions - blobfs and minfs.
#
# Sampling Frequency: 1 sample per hour.
#
# |partition|: The partition allocating the slices.
################################################################################
- id: 3000
metric_name: fvm_slice_allocations
metric_type: EVENT_COUNT
metric_dimensions:
- dimension: partition
event_codes:
0: Blobfs
1: Minfs
reports:
- report_name: fvm_slice_allocations
id: 3000
report_type: NUMERIC_AGGREGATION
system_profile_field: [PRODUCT_NAME, CHANNEL, SYSTEM_VERSION]
meta_data:
max_release_stage: GA
expiration_date: "2022/01/13"