blob: 503a18a12fa653120321b52b0ba778c7494c22c0 [file] [log] [blame]
{{/*
// Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
*/}}
#![allow(unused_imports)]
use {
fidl::{
encoding::{Context, Decode, Decoder, Encoder, with_tls_encode_buf, WireFormatVersion},
handle::Handle, handle::HandleDisposition, handle::HandleInfo, handle::HandleOp,
ObjectType, Rights,
},
fidl_test_benchmarkfidl{{ .CrateSuffix }} as test_benchmarkfidl{{ .CrateSuffix }},
fuchsia_async as fasync,
fuchsia_criterion::criterion::{BatchSize, Bencher, black_box},
futures::{future, stream::StreamExt},
fuchsia_zircon as zx,
gidl_util::{HandleDef, HandleSubtype, copy_handle, create_handles, select_handle_infos},
std::mem::ManuallyDrop
};
// BENCHMARKS is aggregated by a generated benchmark_suite.rs file, which is ultimately
// used in benchmarks in src/tests/benchmarks/fidl/rust/src/main.rs.
pub const BENCHMARKS: [(&'static str, fn(&mut Bencher)); {{ .NumBenchmarks }}] = [
{{- range .Benchmarks }}
("Builder/{{ .ChromeperfPath }}", benchmark_{{ .Name }}_builder),
("Encode/{{ .ChromeperfPath }}", benchmark_{{ .Name }}_encode),
("Decode/{{ .ChromeperfPath }}", benchmark_{{ .Name }}_decode),
{{ if .EnableSendEventBenchmark }}
("SendEvent/{{ .ChromeperfPath }}", benchmark_{{ .Name }}_send_event),
{{- end -}}
{{ if .EnableEchoCallBenchmark }}
("EchoCall/{{ .ChromeperfPath }}", benchmark_{{ .Name }}_echo_call),
{{- end -}}
{{- end }}
];
const _V2_CONTEXT: Context = Context { wire_format_version: WireFormatVersion::V2 };
{{ range .Benchmarks }}
fn benchmark_{{ .Name }}_builder(b: &mut Bencher) {
{{- if .HandleDefs }}
b.iter_batched_ref(
|| {
create_handles(&{{ .HandleDefs }})
},
|handle_defs| {
black_box({{ .Value }}); {{- /* semicolon: include drop time. */}}
},
{{- /* Consider using LargeInput or NumIterations if this causes memory issues. */}}
BatchSize::SmallInput,
);
{{- else }}
b.iter(
|| {
black_box({{ .Value }}); {{- /* semicolon: include drop time. */}}
},
);
{{- end }}
}
fn benchmark_{{ .Name }}_encode(b: &mut Bencher) {
b.iter_batched_ref(
|| {
{{- if .HandleDefs }}
let handle_defs = create_handles(&{{ .HandleDefs }});
{{- end }}
{{ .Value }}
},
|value| {
{{- /* Encode to TLS buffers since that's what the bindings do in practice. */}}
with_tls_encode_buf(|bytes, handles| {
Encoder::encode_with_context::<{{ .ValueType }}>(_V2_CONTEXT, bytes, handles, {{ if not .IsResource }}&*{{ end }}value).unwrap();
{{- /* Return the underlying heap storage of handles, since with_tls_encode_buf
clears it after calling this closure, which otherwise would close all the
handles. By returning the actual handles, we avoid including handle close
time in the benchmark. Note that this means the handle vector must reallocate
on every iteration if handles are used. */}}
{{- if .HandleDefs }}
std::mem::take(handles)
{{- end }}
}) {{- /* no semicolon: exclude drop time. */}}
},
{{- /* Consider using LargeInput or NumIterations if this causes memory issues. */}}
BatchSize::SmallInput,
);
}
fn benchmark_{{ .Name }}_decode(b: &mut Bencher) {
b.iter_batched_ref(
|| {
{{- if .HandleDefs }}
let handle_defs = create_handles(&{{ .HandleDefs }});
{{- end }}
let mut bytes = Vec::<u8>::new();
let mut handles = Vec::<HandleDisposition<'static>>::new();
let original_value = &{{ if .IsResource}}mut {{ end }}{{ .Value }};
Encoder::encode_with_context::<{{ .ValueType }}>(_V2_CONTEXT, &mut bytes, &mut handles, original_value).unwrap();
let handle_infos = fidl::convert_handle_dispositions_to_infos(handles).unwrap();
{{- /* Wrap handle in an Option to allow low-overhead measurement of drop time */}}
(bytes, handle_infos, ManuallyDrop::new(fidl::new_empty!({{ .ValueType }})))
},
|(bytes, handle_infos, manually_drop_value)| {
// Cast &mut ManuallyDrop<T> to &mut T.
let value : &mut {{ .ValueType }} = unsafe { std::mem::transmute(manually_drop_value as *mut _) };
Decoder::decode_with_context::<{{ .ValueType }}>(_V2_CONTEXT, bytes, handle_infos, value).unwrap();
// Count the drop time in the benchmark.
unsafe { ManuallyDrop::drop(manually_drop_value) };
},
{{- /* Consider using LargeInput or NumIterations if this causes memory issues. */}}
BatchSize::SmallInput,
);
}
{{ if .EnableSendEventBenchmark }}
async fn {{ .Name }}_send_event_receiver_thread(receiver_fidl_chan_end: zx::Channel, sender_fifo: std::sync::mpsc::SyncSender<()>) {
let async_receiver_fidl_chan_end = fasync::Channel::from_channel(receiver_fidl_chan_end);
let proxy = {{ .ValueType }}EventProtocolProxy::new(async_receiver_fidl_chan_end);
let mut event_stream = proxy.take_event_stream();
while let Some(_event) = event_stream.next().await {
sender_fifo.send(()).unwrap();
};
}
fn benchmark_{{ .Name }}_send_event(b: &mut Bencher) {
let thread;
{
let (sender_fifo, receiver_fifo) = std::sync::mpsc::sync_channel(1);
let (sender_fidl_chan_end, receiver_fidl_chan_end) = zx::Channel::create();
thread = std::thread::spawn(|| {
fasync::LocalExecutor::new()
.run_singlethreaded(async move {
{{ .Name }}_send_event_receiver_thread(receiver_fidl_chan_end, sender_fifo).await;
});
});
fasync::LocalExecutor::new()
.run_singlethreaded(async move {
let async_sender_fidl_chan_end = fasync::Channel::from_channel(sender_fidl_chan_end);
let sender = <{{ .ValueType }}EventProtocolRequestStream as fidl::endpoints::RequestStream>::from_channel(async_sender_fidl_chan_end);
{{- if .IsResource }}
b.iter_batched(|| {
{{- else }}
b.iter_batched_ref(|| {
{{- end }}
{{- if .HandleDefs }}
let handle_defs = create_handles(&{{ .HandleDefs }});
{{- end }}
{{ .Value }}
},
|value| {
fidl::endpoints::RequestStream::control_handle(&sender).send_send_(value).unwrap();
receiver_fifo.recv().unwrap();
},
BatchSize::SmallInput);
});
}
thread.join().unwrap();
}
{{- end -}}
{{ if .EnableEchoCallBenchmark }}
async fn {{ .Name }}_echo_call_server_thread(server_end: zx::Channel) {
let async_server_end = fasync::Channel::from_channel(server_end);
let stream = <{{ .ValueType }}EchoCallRequestStream as fidl::endpoints::RequestStream>::from_channel(async_server_end);
const MAX_CONCURRENT: usize = 10;
stream.for_each_concurrent(MAX_CONCURRENT, |request| {
match request {
Ok({{ .ValueType }}EchoCallRequest::Echo { val, responder }) => {
responder.send({{ if not .IsResource }}&{{ end }} val).unwrap();
},
Err(_) => {
panic!("unexpected err request")
},
}
future::ready(())
}).await;
}
fn benchmark_{{ .Name }}_echo_call(b: &mut Bencher) {
let thread;
{
let (client_end, server_end) = zx::Channel::create();
thread = std::thread::spawn(|| {
fasync::LocalExecutor::new()
.run_singlethreaded(async move {
{{ .Name }}_echo_call_server_thread(server_end).await;
});
});
let proxy = {{ .ValueType }}EchoCallSynchronousProxy::new(client_end);
{{- if .IsResource }}
b.iter_batched(|| {
{{- else }}
b.iter_batched_ref(|| {
{{- end }}
{{- if .HandleDefs }}
let handle_defs = create_handles(&{{ .HandleDefs }});
{{- end }}
{{ .Value }}
},
|value| {
proxy.echo(value, zx::Time::after(zx::Duration::from_seconds(1))).unwrap();
},
BatchSize::SmallInput);
}
thread.join().unwrap();
}
{{- end -}}
{{ end }}