blob: e2df4425024960f6ee5b8023f08b6d9908cd655d [file] [log] [blame]
// Copyright 2021 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use diagnostics_assertions::{assert_data_tree, AnyProperty};
use diagnostics_reader::{ArchiveReader, Inspect};
use fidl_fuchsia_component::BinderMarker;
use fidl_fuchsia_diagnostics as fdiagnostics;
use fidl_fuchsia_metrics_test::MetricEventLoggerQuerierMarker;
use fidl_fuchsia_mockrebootcontroller::MockRebootControllerMarker;
use fidl_fuchsia_samplertestcontroller::SamplerTestControllerMarker;
use fuchsia_async as fasync;
use fuchsia_component::client::{connect_to_protocol_at, connect_to_protocol_at_path};
use fuchsia_zircon as zx;
use realm_client::InstalledNamespace;
use utils::{Event, EventVerifier};
mod test_topology;
mod utils;
async fn wait_for_single_counter_inspect(ns: &InstalledNamespace) {
let accessor = connect_to_protocol_at::<fdiagnostics::ArchiveAccessorMarker>(&ns).unwrap();
let _ = ArchiveReader::new()
.with_archive(accessor)
.add_selector(format!("{}:root", test_topology::COUNTER_NAME))
.snapshot::<Inspect>()
.await
.expect("got inspect data");
}
/// Runs the Sampler and a test component that can have its inspect properties
/// manipulated by the test via fidl, and uses cobalt mock and log querier to
/// verify that the sampler observers changes as expected, and logs them to
/// cobalt as expected.
#[fuchsia::test]
async fn event_count_sampler_test() {
let ns = test_topology::create_realm().await.expect("initialized topology");
let test_app_controller = connect_to_protocol_at::<SamplerTestControllerMarker>(&ns).unwrap();
wait_for_single_counter_inspect(&ns).await;
let reboot_controller = connect_to_protocol_at::<MockRebootControllerMarker>(&ns).unwrap();
let logger_querier = connect_to_protocol_at::<MetricEventLoggerQuerierMarker>(&ns).unwrap();
let _sampler_binder = connect_to_protocol_at_path::<BinderMarker>(format!(
"{}/fuchsia.component.SamplerBinder",
ns.prefix()
))
.unwrap();
let mut project_5_events = EventVerifier::new(&logger_querier, 5);
test_app_controller.increment_int(1).await.unwrap();
project_5_events
.validate_with_count(
vec![
Event { id: 101, value: 1, codes: vec![0, 0] },
Event { id: 102, value: 10, codes: vec![0, 0] },
Event { id: 103, value: 20, codes: vec![0, 0] },
],
"initial in event_count",
)
.await;
// We want to guarantee a sample takes place before we increment the value again.
// This is to verify that despite two samples taking place, the count type isn't uploaded with no diff
// and the metric that is upload_once isn't sampled again.
test_app_controller.wait_for_sample().await.unwrap().unwrap();
project_5_events
.validate_with_count(
vec![Event { id: 102, value: 10, codes: vec![0, 0] }],
"second in event_count",
)
.await;
test_app_controller.increment_int(1).await.unwrap();
test_app_controller.wait_for_sample().await.unwrap().unwrap();
project_5_events
.validate_with_count(
vec![
// Even though we incremented metric-1 its value stays at 1 since it's being cached.
Event { id: 101, value: 1, codes: vec![0, 0] },
Event { id: 102, value: 10, codes: vec![0, 0] },
],
"before reboot in event_count",
)
.await;
// trigger_reboot calls the on_reboot callback that drives sampler shutdown. this
// should await until sampler has finished its cleanup, which means we should have some events
// present when we're done, and the sampler task should be finished.
reboot_controller.trigger_reboot().await.unwrap().unwrap();
project_5_events
.validate_with_count(
vec![
// The metric configured to run every 3000 seconds gets polled, and gets an undiffed
// report of its values.
Event { id: 104, value: 2, codes: vec![0, 0] },
Event { id: 102, value: 10, codes: vec![0, 0] },
],
"after reboot in event_count",
)
.await;
}
/// Runs the Sampler and a test component that can have its inspect properties
/// manipulated by the test via fidl, and uses mock services to determine that when
/// the reboot server goes down, sampler continues to run as expected.
#[fuchsia::test]
async fn reboot_server_crashed_test() {
let ns = test_topology::create_realm().await.expect("initialized topology");
let test_app_controller = connect_to_protocol_at::<SamplerTestControllerMarker>(&ns).unwrap();
wait_for_single_counter_inspect(&ns).await;
let reboot_controller = connect_to_protocol_at::<MockRebootControllerMarker>(&ns).unwrap();
let logger_querier = connect_to_protocol_at::<MetricEventLoggerQuerierMarker>(&ns).unwrap();
let _sampler_binder = connect_to_protocol_at_path::<BinderMarker>(format!(
"{}/fuchsia.component.SamplerBinder",
ns.prefix()
))
.unwrap();
let mut project_5_events = EventVerifier::new(&logger_querier, 5);
// Crash the reboot server to verify that sampler continues to sample.
reboot_controller.crash_reboot_channel().await.unwrap().unwrap();
test_app_controller.increment_int(1).await.unwrap();
project_5_events
.validate_with_count(
vec![
Event { id: 101, value: 1, codes: vec![0, 0] },
Event { id: 102, value: 10, codes: vec![0, 0] },
Event { id: 103, value: 20, codes: vec![0, 0] },
],
"initial in crashed",
)
.await;
// We want to guarantee a sample takes place before we increment the value again.
// This is to verify that despite two samples taking place, the count type isn't uploaded with
// no diff and the metric that is upload_once isn't sampled again.
test_app_controller.wait_for_sample().await.unwrap().unwrap();
project_5_events
.validate_with_count(
vec![Event { id: 102, value: 10, codes: vec![0, 0] }],
"second in crashed",
)
.await;
}
/// Runs the Sampler and a test component that can have its inspect properties
/// manipulated by the test via fidl. Verifies that Sampler publishes its own
/// status correctly in its own Inspect data.
#[fuchsia::test]
async fn sampler_inspect_test() {
let ns = test_topology::create_realm().await.expect("initialized topology");
let _sampler_binder = connect_to_protocol_at_path::<BinderMarker>(format!(
"{}/fuchsia.component.SamplerBinder",
ns.prefix()
))
.unwrap();
let hierarchy = loop {
let accessor = connect_to_protocol_at::<fdiagnostics::ArchiveAccessorMarker>(&ns).unwrap();
// Observe verification shows up in inspect.
let mut data = ArchiveReader::new()
.with_archive(accessor)
.add_selector(format!("{}:root", test_topology::SAMPLER_NAME))
.snapshot::<Inspect>()
.await
.expect("got inspect data");
let hierarchy = data.pop().expect("one result").payload.expect("payload is not none");
if hierarchy.get_child("sampler_executor_stats").is_none()
|| hierarchy.get_child("metrics_sent").is_none()
{
fasync::Timer::new(fasync::Time::after(zx::Duration::from_millis(100))).await;
continue;
}
break hierarchy;
};
assert_data_tree!(
hierarchy,
root: {
config: {
minimum_sample_rate_sec: 1 as u64,
configs_path: "/pkg/data/config",
},
sampler_executor_stats: {
healthily_exited_samplers: 0 as u64,
errorfully_exited_samplers: 0 as u64,
reboot_exited_samplers: 0 as u64,
total_project_samplers_configured: 4 as u64,
project_5: {
project_sampler_count: 2 as u64,
metrics_configured: 4 as u64,
cobalt_logs_sent: AnyProperty,
},
project_13: {
project_sampler_count: 2 as u64,
metrics_configured: 6 as u64,
cobalt_logs_sent: AnyProperty,
},
},
metrics_sent: {
"fire_1.json5":
{
"0": {
selector: "single_counter:root/samples:integer_1",
upload_count: 0 as u64
},
"1": {
selector: "single_counter:root/samples:integer_1",
upload_count: 0 as u64
},
"2": {
selector: "single_counter:root/samples:integer_2",
upload_count: 0 as u64
},
"3": {
selector: "single_counter:root/samples:integer_2",
upload_count: 0 as u64
}
},
"fire_2.json5": {
"0": {
selector: "single_counter:root/samples:integer_1",
upload_count: 0 as u64
},
"1": {
selector: "single_counter:root/samples:integer_2",
upload_count: 0 as u64
}
},
"reboot_required_config.json": {
"0": {
selector: "single_counter:root/samples:counter",
upload_count: 0 as u64
}
},
"test_config.json": {
"0": {
selector: "single_counter:root/samples:counter",
upload_count: 0 as u64
},
"1": {
selector: "single_counter:root/samples:integer_1",
upload_count: 0 as u64
},
"2": {
selector: "single_counter:root/samples:integer_2",
upload_count: 0 as u64
}
},
},
"fuchsia.inspect.Health": {
start_timestamp_nanos: AnyProperty,
status: AnyProperty
}
}
);
}