[zircon_bechmark] add VmoReadOrWriteMapTest VmoReadOrWriteMapRangeTest
Allow comparison with vmo/read/write test to see performance for various
sizes. VmoReadOrWriteMapRangeTest adds the flag ZX_VM_MAP_RANGE, which
prevents demand paging each accessed page.
Test: zircon_benchmarks -p --filter "VmoMap|Vmo/[RW]
Change-Id: I38224d617a186cc7389431a7f61ce5462c977372
diff --git a/bin/zircon_benchmarks/vmo.cc b/bin/zircon_benchmarks/vmo.cc
index cc1cb8c..24b0bcf 100644
--- a/bin/zircon_benchmarks/vmo.cc
+++ b/bin/zircon_benchmarks/vmo.cc
@@ -5,6 +5,7 @@
#include <vector>
#include <fbl/string_printf.h>
+#include <lib/zx/vmar.h>
#include <lib/zx/vmo.h>
#include <perftest/perftest.h>
@@ -20,6 +21,10 @@
ZX_ASSERT(zx::vmo::create(copy_size, 0, &vmo) == ZX_OK);
std::vector<char> buffer(copy_size);
+ // Write the buffer so that the pages are pre-committed. This matters
+ // more for the read case.
+ ZX_ASSERT(vmo.write(buffer.data(), 0, copy_size) == ZX_OK);
+
if (do_write) {
while (state->KeepRunning()) {
ZX_ASSERT(vmo.write(buffer.data(), 0, copy_size) == ZX_OK);
@@ -32,6 +37,51 @@
return true;
}
+// Measure the time taken to write or read a chunk of data to/from a VMO
+// by using map/memcpy.
+bool VmoReadOrWriteMapTestImpl(perftest::RepeatState* state, uint32_t copy_size,
+ bool do_write, int flags) {
+ state->SetBytesProcessedPerRun(copy_size);
+
+ zx::vmo vmo;
+ ZX_ASSERT(zx::vmo::create(copy_size, 0, &vmo) == ZX_OK);
+ std::vector<char> buffer(copy_size);
+ zx_vaddr_t mapped_addr;
+
+ // Write the buffer so that the pages are pre-committed. This matters
+ // more for the read case.
+ ZX_ASSERT(vmo.write(buffer.data(), 0, copy_size) == ZX_OK);
+
+ if (do_write) {
+ while (state->KeepRunning()) {
+ ZX_ASSERT(zx::vmar::root_self()->map(
+ 0, vmo, 0, copy_size,
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | flags, &mapped_addr) == ZX_OK);
+ std::memcpy(reinterpret_cast<void*>(mapped_addr), buffer.data(), copy_size);
+ ZX_ASSERT(zx::vmar::root_self()->unmap(mapped_addr, copy_size) == ZX_OK);
+ }
+ } else { // read
+ while (state->KeepRunning()) {
+ ZX_ASSERT(zx::vmar::root_self()->map(
+ 0, vmo, 0, copy_size,
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | flags, &mapped_addr) == ZX_OK);
+ std::memcpy(buffer.data(), reinterpret_cast<void*>(mapped_addr), copy_size);
+ ZX_ASSERT(zx::vmar::root_self()->unmap(mapped_addr, copy_size) == ZX_OK);
+ }
+ }
+ return true;
+}
+
+bool VmoReadOrWriteMapTest(perftest::RepeatState* state, uint32_t copy_size,
+ bool do_write) {
+ return VmoReadOrWriteMapTestImpl(state, copy_size, do_write, 0);
+}
+
+bool VmoReadOrWriteMapRangeTest(perftest::RepeatState* state,
+ uint32_t copy_size, bool do_write) {
+ return VmoReadOrWriteMapTestImpl(state, copy_size, do_write, ZX_VM_MAP_RANGE);
+}
+
// Measure the time taken to clone a vmo and destroy it.
bool VmoCloneTest(perftest::RepeatState* state, uint32_t copy_size) {
state->DeclareStep("clone");
@@ -85,7 +135,7 @@
}
void RegisterTests() {
- for (unsigned size_in_kbytes : {128, 1000}) {
+ for (unsigned size_in_kbytes : {128, 1000, 10000}) {
for (bool do_write : {false, true}) {
// Read/Write.
const char* rw = do_write ? "Write" : "Read";
@@ -94,6 +144,20 @@
size_in_kbytes * 1024, do_write);
}
+ for (bool do_write : {false, true}) {
+ // Read/Write.
+ const char* rw = do_write ? "Write" : "Read";
+ auto rw_name =
+ fbl::StringPrintf("VmoMap/%s/%ukbytes", rw, size_in_kbytes);
+ perftest::RegisterTest(rw_name.c_str(), VmoReadOrWriteMapTest,
+ size_in_kbytes * 1024, do_write);
+
+ rw_name =
+ fbl::StringPrintf("VmoMapRange/%s/%ukbytes", rw, size_in_kbytes);
+ perftest::RegisterTest(rw_name.c_str(), VmoReadOrWriteMapRangeTest,
+ size_in_kbytes * 1024, do_write);
+ }
+
// Clone (only run it once).
auto clone_name = fbl::StringPrintf("Vmo/Clone/%ukbytes", size_in_kbytes);
perftest::RegisterTest(clone_name.c_str(), VmoCloneTest,