Merge 'upstream/main' into 'main'
Change-Id: I447cdfb9a7257636b9df46f45125028621cbe3b6
Reviewed-on: https://fuchsia-review.googlesource.com/c/third_party/android.googlesource.com/platform/system/libfmq/+/1059082
Reviewed-by: Benjamin Lerman <qsr@google.com>
diff --git a/Android.bp b/Android.bp
index c42772b..cd8c2b3 100644
--- a/Android.bp
+++ b/Android.bp
@@ -56,9 +56,6 @@
product_available: true,
// TODO(b/153609531): remove when no longer needed.
native_bridge_supported: true,
- vndk: {
- enabled: true,
- },
double_loadable: true,
min_sdk_version: "29",
host_supported: true,
@@ -79,3 +76,124 @@
export_include_dirs: ["base"],
min_sdk_version: "29",
}
+
+cc_library {
+ name: "libfmq_erased",
+ shared_libs: [
+ "libbase",
+ "liblog",
+ "libcutils",
+ "libfmq",
+ "android.hardware.common.fmq-V1-ndk",
+ ],
+ srcs: [
+ "ErasedMessageQueue.cpp",
+ ],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+ vendor_available: true,
+ min_sdk_version: "29",
+ host_supported: true,
+}
+
+rust_bindgen {
+ name: "libfmq_bindgen",
+ wrapper_src: "ErasedMessageQueue.hpp",
+ crate_name: "fmq_bindgen",
+ edition: "2021",
+ host_supported: true,
+ source_stem: "fmq",
+ bindgen_flags: [
+ "--no-recursive-allowlist",
+ "--use-core",
+ "--ctypes-prefix=core::ffi",
+ "--raw-line=#![no_std]",
+ "--raw-line=pub use android_hardware_common_fmq::aidl::android::hardware::common::fmq::{MQDescriptor::MQDescriptor, UnsynchronizedWrite::UnsynchronizedWrite, SynchronizedReadWrite::SynchronizedReadWrite, GrantorDescriptor::GrantorDescriptor};",
+ "--raw-line=pub use android_hardware_common::{aidl::android::hardware::common::NativeHandle::NativeHandle, binder::ParcelFileDescriptor};",
+ "--raw-line=pub enum android_MQErased {}",
+ "--raw-line=#[repr(C)]",
+ "--raw-line=pub struct aidl_android_hardware_common_fmq_GrantorDescriptor {",
+ "--raw-line= pub fdIndex: i32,",
+ "--raw-line= pub offset: i32,",
+ "--raw-line= pub extent: i64,",
+ "--raw-line=}",
+ "--opaque-type=ErasedMessageQueueDesc",
+ "--opaque-type=.*AidlMessageQueue",
+ "--with-derive-default",
+ "--no-default=MessageQueueDesc",
+ "--no-default=ErasedMessageQueue",
+ "--allowlist-function=convertGrantor",
+ "--allowlist-function=convertDesc",
+ "--allowlist-function=freeDesc",
+ "--allowlist-type=ndk::ScopedFileDescriptor",
+ "--allowlist-function=descGrantors",
+ "--allowlist-function=descNumGrantors",
+ "--allowlist-function=descHandleFDs",
+ "--allowlist-function=descHandleNumFDs",
+ "--allowlist-function=descHandleInts",
+ "--allowlist-function=descHandleNumInts",
+ "--allowlist-function=descQuantum",
+ "--allowlist-function=descFlags",
+ "--allowlist-type=\\bErasedMessageQueue",
+ "--allowlist-type=\\bErasedMessageQueueDesc",
+ "--allowlist-type=\\bMemTransaction",
+ "--allowlist-type=.*MemTransaction.*MemRegion.*",
+ "--blocklist-type=std::.+",
+ "--allowlist-function=.*ErasedMessageQueue.*",
+ "--allowlist-function=.*beginWrite",
+ "--allowlist-function=.*beginRead",
+ "--allowlist-function=.*commitWrite",
+ "--allowlist-function=.*commitRead",
+ "--allowlist-function=getAddress",
+ "--allowlist-function=getLength.*",
+ "--allowlist-function=get(First|Second)Region",
+ ],
+ rustlibs: [
+ "android.hardware.common.fmq-V1-rust",
+ "android.hardware.common-V2-rust",
+ ],
+ static_libs: [
+ "libfmq",
+ ],
+ whole_static_libs: [
+ "libfmq_erased",
+ ],
+ shared_libs: [
+ "libc++",
+ "liblog",
+ "android.hardware.common.fmq-V1-ndk",
+ ],
+ apex_available: [
+ ],
+}
+
+rust_test {
+ host_supported: true,
+ name: "libfmq_bindgen_test",
+ srcs: [":libfmq_bindgen"],
+ crate_name: "fmq_bindgen_test",
+ test_suites: ["general-tests"],
+ auto_gen_config: true,
+ clippy_lints: "none",
+ lints: "none",
+ rustlibs: [
+ "android.hardware.common.fmq-V1-rust",
+ "android.hardware.common-V2-rust",
+ ],
+}
+
+rust_library {
+ name: "libfmq_rust",
+ host_supported: true,
+ crate_name: "fmq",
+ srcs: ["libfmq.rs"],
+ edition: "2021",
+ rustlibs: [
+ "libfmq_bindgen",
+ "liblog_rust",
+ "libzerocopy",
+ ],
+ proc_macros: [],
+}
diff --git a/ErasedMessageQueue.cpp b/ErasedMessageQueue.cpp
new file mode 100644
index 0000000..7fb03aa
--- /dev/null
+++ b/ErasedMessageQueue.cpp
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ErasedMessageQueue.hpp"
+
+/*
+ * Convert a Rust NativeHandle (passed as its individual fields) to a C++ one.
+ * Duplicates the file descriptors, which are passed as integers.
+ */
+NativeHandle convertHandle(const int* fds, size_t n_fds, const int32_t* ints, size_t n_ints) {
+ std::vector<ndk::ScopedFileDescriptor> fdv;
+ for (size_t i = 0; i < n_fds; i++) {
+ fdv.push_back(std::move(ndk::ScopedFileDescriptor(fds[i])));
+ }
+ std::vector<int32_t> intv(ints, ints + n_ints);
+
+ return NativeHandle(std::move(fdv), intv);
+}
+
+GrantorDescriptor convertGrantor(int32_t fdIndex, int32_t offset, int64_t extent) {
+ return GrantorDescriptor(fdIndex, offset, extent);
+}
+
+ErasedMessageQueueDesc* convertDesc(const GrantorDescriptor* grantors, size_t n_grantors,
+ const int* handle_fds, size_t handle_n_fds,
+ const int32_t* handle_ints, size_t handle_n_ints,
+ int32_t quantum, int32_t flags) {
+ std::vector<GrantorDescriptor> grantorsv(grantors, grantors + n_grantors);
+ auto&& handle = convertHandle(handle_fds, handle_n_fds, handle_ints, handle_n_ints);
+
+ return new ErasedMessageQueueDesc{
+ grantorsv,
+ std::move(handle),
+ quantum,
+ flags,
+ };
+}
+
+void freeDesc(ErasedMessageQueueDesc* desc) {
+ delete desc;
+}
+
+const GrantorDescriptor* descGrantors(const ErasedMessageQueueDesc& desc) {
+ return desc.grantors.data();
+}
+size_t descNumGrantors(const ErasedMessageQueueDesc& desc) {
+ return desc.grantors.size();
+}
+const ndk::ScopedFileDescriptor* descHandleFDs(const ErasedMessageQueueDesc& desc) {
+ return desc.handle.fds.data();
+}
+size_t descHandleNumFDs(const ErasedMessageQueueDesc& desc) {
+ return desc.handle.fds.size();
+}
+const int* descHandleInts(const ErasedMessageQueueDesc& desc) {
+ return desc.handle.ints.data();
+}
+size_t descHandleNumInts(const ErasedMessageQueueDesc& desc) {
+ return desc.handle.ints.size();
+}
+int32_t descQuantum(const ErasedMessageQueueDesc& desc) {
+ return desc.quantum;
+}
+int32_t descFlags(const ErasedMessageQueueDesc& desc) {
+ return desc.flags;
+}
+
+ErasedMessageQueue::ErasedMessageQueue(const ErasedMessageQueueDesc& desc, bool resetPointers)
+ : inner(new android::AidlMessageQueue<MQErased, SynchronizedReadWrite>(desc, resetPointers)) {}
+
+ErasedMessageQueue::ErasedMessageQueue(size_t numElementsInQueue, bool configureEventFlagWord,
+ size_t quantum)
+ : inner(new android::AidlMessageQueue<MQErased, SynchronizedReadWrite>(
+ numElementsInQueue, configureEventFlagWord, quantum)) {}
+
+bool ErasedMessageQueue::beginWrite(size_t nMessages, MemTransaction* memTx) const {
+ MessageQueueBase<AidlMQDescriptorShim, MQErased,
+ FlavorTypeToValue<SynchronizedReadWrite>::value>::MemTransaction memTxInternal;
+ auto result = inner->beginWrite(nMessages, &memTxInternal);
+ memTx->first = memTxInternal.getFirstRegion();
+ memTx->second = memTxInternal.getSecondRegion();
+ return result;
+};
+
+bool ErasedMessageQueue::commitWrite(size_t nMessages) {
+ return inner->commitWrite(nMessages);
+}
+
+bool ErasedMessageQueue::beginRead(size_t nMessages, MemTransaction* memTx) const {
+ MessageQueueBase<AidlMQDescriptorShim, MQErased,
+ FlavorTypeToValue<SynchronizedReadWrite>::value>::MemTransaction memTxInternal;
+ auto result = inner->beginRead(nMessages, &memTxInternal);
+ memTx->first = memTxInternal.getFirstRegion();
+ memTx->second = memTxInternal.getSecondRegion();
+ return result;
+}
+
+bool ErasedMessageQueue::commitRead(size_t nMessages) {
+ return inner->commitRead(nMessages);
+}
+
+ErasedMessageQueueDesc* ErasedMessageQueue::dupeDesc() {
+ return new ErasedMessageQueueDesc(inner->dupeDesc());
+}
diff --git a/ErasedMessageQueue.hpp b/ErasedMessageQueue.hpp
new file mode 100644
index 0000000..b3c4357
--- /dev/null
+++ b/ErasedMessageQueue.hpp
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <fmq/AidlMessageQueue.h>
+
+using aidl::android::hardware::common::NativeHandle;
+using aidl::android::hardware::common::fmq::GrantorDescriptor;
+using aidl::android::hardware::common::fmq::SynchronizedReadWrite;
+
+using namespace android;
+
+struct MemTransaction {
+ MessageQueueBase<AidlMQDescriptorShim, MQErased,
+ FlavorTypeToValue<SynchronizedReadWrite>::value>::MemRegion first;
+ MessageQueueBase<AidlMQDescriptorShim, MQErased,
+ FlavorTypeToValue<SynchronizedReadWrite>::value>::MemRegion second;
+};
+
+typedef MQDescriptor<MQErased, SynchronizedReadWrite> ErasedMessageQueueDesc;
+
+GrantorDescriptor convertGrantor(int32_t fdIndex, int32_t offset, int64_t extent);
+
+/**
+ * Construct a C++ AIDL MQDescriptor<MQErased, SynchronizedReadWrite> (aka an
+ * ErasedMessageQueueDesc) from the fields of a Rust AIDL
+ * MQDescriptor<MQErased, SynchronizedReadWrite>.
+ *
+ * These two types are semantically equivalent but come from separate AIDL
+ * codegen backends, so we must convert between them. To convert in the opposite
+ * direction, use the descFoo methods to access each field, and manually build
+ * the Rust AIDL MQDescriptor<MQErased, SynchronizedReadWrite> instance;
+ * see the Rust MessageQueue<T>::dupe_desc method.
+ *
+ * @param grantors Pointer to the start of the MQDescriptor's GrantorDescriptor
+ * array.
+ * @param n_grantors The length of the MQDescriptor's GrantorDescriptor array.
+ * @param handle_fds Pointer to the start of the MQDescriptor's NativeHandle's
+ * file-descriptor array. Ownership of array and contents is not transferred.
+ * @param handle_n_fds The corresponding length.
+ * @param handle_ints Pointer to the start of the MQDescriptor's NativeHandle's
+ * integer array. Ownership of the array is not transferred.
+ * @param handle_n_ints The corresponding length.
+ * @param quantum The MQDescriptor's quantum.
+ * @param flags The MQDescriptor's flags.
+ *
+ * @return A heap-allocated ErasedMessageQueueDesc instance owned by the caller,
+ * which must be freed with freeDesc.
+ */
+ErasedMessageQueueDesc* convertDesc(const GrantorDescriptor* grantors, size_t n_grantors,
+ const int* handle_fds, size_t handle_n_fds,
+ const int32_t* handle_ints, size_t handle_n_ints,
+ int32_t quantum, int32_t flags);
+
+/**
+ * Free a heap-allocated ErasedMessageQueueDesc. Simply calls delete.
+ *
+ * @param desc The ErasedMessageQueueDesc to free.
+ */
+void freeDesc(ErasedMessageQueueDesc* desc);
+
+/**
+ * The following functions project out individual fields of an
+ * ErasedMessageQueueDesc as FFI-safe types to enable constructing a Rust AIDL
+ * MQDescriptor<MQErased, SynchronizedReadWrite> from a C++ AIDL one. See the
+ * Rust MessageQueue<T>::dupe_desc method.
+ */
+
+const GrantorDescriptor* descGrantors(const ErasedMessageQueueDesc& desc);
+size_t descNumGrantors(const ErasedMessageQueueDesc& desc);
+const ndk::ScopedFileDescriptor* descHandleFDs(const ErasedMessageQueueDesc& desc);
+size_t descHandleNumFDs(const ErasedMessageQueueDesc& desc);
+const int* descHandleInts(const ErasedMessageQueueDesc& desc);
+size_t descHandleNumInts(const ErasedMessageQueueDesc& desc);
+int32_t descQuantum(const ErasedMessageQueueDesc& desc);
+int32_t descFlags(const ErasedMessageQueueDesc& desc);
+
+/**
+ * ErasedMessageQueue is a monomorphized wrapper around AidlMessageQueue that lets
+ * us wrap it in an idiomatic Rust API. It does not statically know its element
+ * type, but treats elements as opaque objects whose size is given by the
+ * MQDescriptor.
+ */
+class ErasedMessageQueue {
+ /* This must be a unique_ptr because bindgen cannot handle by-value fields
+ * of template class type. */
+ std::unique_ptr<AidlMessageQueue<MQErased, SynchronizedReadWrite>> inner;
+
+ public:
+ ErasedMessageQueue(const ErasedMessageQueueDesc& desc, bool resetPointers = true);
+ ErasedMessageQueue(size_t numElementsInQueue, bool configureEventFlagWord, size_t quantum);
+
+ /**
+ * Get a MemTransaction object to write `nMessages` elements.
+ * Once the write is performed using the information from MemTransaction,
+ * the write operation is to be committed using a call to commitWrite().
+ *
+ * @param nMessages Number of messages of the element type.
+ * @param Pointer to MemTransaction struct that describes memory to write
+ * `nMessages` items of the element type. If a write of size `nMessages` is
+ * not possible, the base addresses in the `MemTransaction` object will be
+ * set to nullptr.
+ *
+ * @return Whether it is possible to write `nMessages` items into the FMQ.
+ */
+ bool beginWrite(size_t nMessages, MemTransaction* memTx) const;
+
+ /**
+ * Commit a write of size `nMessages`. To be only used after a call to
+ * `beginWrite()`.
+ *
+ * @param nMessages number of messages of the element type to be written.
+ *
+ * @return Whether the write operation of size `nMessages` succeeded.
+ */
+ bool commitWrite(size_t nMessages);
+
+ /**
+ * Get a MemTransaction object to read `nMessages` elements.
+ * Once the read is performed using the information from MemTransaction,
+ * the read operation is to be committed using a call to `commitRead()`.
+ *
+ * @param nMessages Number of messages of the element type.
+ * @param pointer to MemTransaction struct that describes memory to read
+ * `nMessages` items of the element type. If a read of size `nMessages` is
+ * not possible, the base pointers in the `MemTransaction` object returned
+ * will be set to nullptr.
+ *
+ * @return bool Whether it is possible to read `nMessages` items from the
+ * FMQ.
+ */
+ bool beginRead(size_t nMessages, MemTransaction* memTx) const;
+
+ /**
+ * Commit a read of size `nMessages`. To be only used after a call to
+ * `beginRead()`.
+ *
+ * @param nMessages number of messages of the element type to be read.
+ *
+ * @return bool Whether the read operation of size `nMessages` succeeded.
+ */
+ bool commitRead(size_t nMessages);
+
+ /**
+ * Create a copy of the MQDescriptor for this object. This descriptor can be
+ * sent over IPC to allow constructing a remote object that will access the
+ * same queue over shared memory.
+ *
+ * @return ErasedMessageQueueDesc The copied descriptor, which must be freed
+ * by passing it to freeDesc.
+ */
+ ErasedMessageQueueDesc* dupeDesc();
+};
diff --git a/EventFlag.cpp b/EventFlag.cpp
index 0a4e4c0..eb8fe34 100644
--- a/EventFlag.cpp
+++ b/EventFlag.cpp
@@ -196,6 +196,11 @@
int64_t prevTimeNs = shouldTimeOut ? android::elapsedRealtimeNano() : 0;
status_t status;
while (true) {
+ status = waitHelper(bitmask, efState, timeoutNanoSeconds);
+ if ((status != -EAGAIN) && (status != -EINTR)) {
+ break;
+ }
+
if (shouldTimeOut) {
int64_t currentTimeNs = android::elapsedRealtimeNano();
/*
@@ -210,11 +215,6 @@
break;
}
}
-
- status = waitHelper(bitmask, efState, timeoutNanoSeconds);
- if ((status != -EAGAIN) && (status != -EINTR)) {
- break;
- }
}
return status;
}
diff --git a/OWNERS b/OWNERS
index cc4d814..8b7f0e5 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,3 +1,5 @@
+# Bug component: 655781
+
smoreland@google.com
elsk@google.com
malchev@google.com
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 51c6c97..de0569f 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -7,6 +7,11 @@
"name": "fmq_test"
}
],
+ "postsubmit": [
+ {
+ "name": "libfmq_bindgen_test"
+ }
+ ],
"hwasan-presubmit": [
{
"name": "fmq_unit_tests"
diff --git a/fuzzer/Android.bp b/fuzzer/Android.bp
index 0926c09..6d15b52 100644
--- a/fuzzer/Android.bp
+++ b/fuzzer/Android.bp
@@ -51,18 +51,11 @@
libfuzzer_options: [
"max_len=50000",
],
+ use_for_presubmit: true,
+ },
+ sanitize: {
+ integer_overflow: true,
},
host_supported: true,
-
- sanitize: {
- scs: true,
- cfi: true,
- address: true,
- memtag_heap: true,
- // undefined behavior is expected
- all_undefined: false,
- // integer overflow is expected
- integer_overflow: false,
- },
}
diff --git a/fuzzer/fmq_fuzzer.cpp b/fuzzer/fmq_fuzzer.cpp
index 8c8a78e..47dd7fa 100644
--- a/fuzzer/fmq_fuzzer.cpp
+++ b/fuzzer/fmq_fuzzer.cpp
@@ -36,15 +36,23 @@
typedef int32_t payload_t;
-// The reader will wait for 10 ms
-static constexpr int kBlockingTimeoutNs = 10000000;
+// The reader/writers will wait during blocking calls
+static constexpr int kBlockingTimeoutNs = 100000;
/*
* MessageQueueBase.h contains asserts when memory allocation fails. So we need
* to set a reasonable limit if we want to avoid those asserts.
*/
static constexpr size_t kAlignment = 8;
-static constexpr size_t kMaxNumElements = PAGE_SIZE * 10 / sizeof(payload_t) - kAlignment + 1;
+static const size_t kPageSize = getpagesize();
+static const size_t kMaxNumElements = kPageSize * 10 / sizeof(payload_t) - kAlignment + 1;
+/*
+ * limit the custom grantor case to one page of memory.
+ * If we want to increase this, we need to make sure that all of grantors offset
+ * plus extent are less than the size of the page aligned ashmem region that is
+ * created
+ */
+static const size_t kMaxCustomGrantorMemoryBytes = kPageSize;
/*
* The read counter can be found in the shared memory 16 bytes before the start
@@ -72,8 +80,40 @@
typedef android::hardware::MQDescriptorSync<payload_t> MQDescSync;
typedef android::hardware::MQDescriptorUnsync<payload_t> MQDescUnsync;
-static inline uint64_t* getCounterPtr(payload_t* start, int byteOffset) {
- return reinterpret_cast<uint64_t*>(reinterpret_cast<uint8_t*>(start) - byteOffset);
+// AIDL and HIDL have different ways of accessing the grantors
+template <typename Desc>
+uint64_t* getCounterPtr(payload_t* start, const Desc& desc, int grantorIndx);
+
+uint64_t* createCounterPtr(payload_t* start, uint32_t offset, uint32_t data_offset) {
+ // start is the address of the beginning of the FMQ data section in memory
+ // offset is overall offset of the counter in the FMQ memory
+ // data_offset is the overall offset of the data section in the FMQ memory
+ // start - (data_offset) = beginning address of the FMQ memory
+ return reinterpret_cast<uint64_t*>(reinterpret_cast<uint8_t*>(start) - data_offset + offset);
+}
+
+uint64_t* getCounterPtr(payload_t* start, const MQDescSync& desc, int grantorIndx) {
+ uint32_t offset = desc.grantors()[grantorIndx].offset;
+ uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset;
+ return createCounterPtr(start, offset, data_offset);
+}
+
+uint64_t* getCounterPtr(payload_t* start, const MQDescUnsync& desc, int grantorIndx) {
+ uint32_t offset = desc.grantors()[grantorIndx].offset;
+ uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset;
+ return createCounterPtr(start, offset, data_offset);
+}
+
+uint64_t* getCounterPtr(payload_t* start, const AidlMQDescSync& desc, int grantorIndx) {
+ uint32_t offset = desc.grantors[grantorIndx].offset;
+ uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset;
+ return createCounterPtr(start, offset, data_offset);
+}
+
+uint64_t* getCounterPtr(payload_t* start, const AidlMQDescUnsync& desc, int grantorIndx) {
+ uint32_t offset = desc.grantors[grantorIndx].offset;
+ uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset;
+ return createCounterPtr(start, offset, data_offset);
}
template <typename Queue, typename Desc>
@@ -84,7 +124,7 @@
return;
}
FuzzedDataProvider fdp(&readerData[0], readerData.size());
- payload_t* ring = nullptr;
+ payload_t* ring = reinterpret_cast<payload_t*>(readMq.getRingBufferPtr());
while (fdp.remaining_bytes()) {
typename Queue::MemTransaction tx;
size_t numElements = fdp.ConsumeIntegralInRange<size_t>(0, kMaxNumElements);
@@ -97,11 +137,9 @@
// the ring buffer is only next to the read/write counters when there is
// no user supplied fd
if (!userFd) {
- if (ring == nullptr) {
- ring = firstStart;
- }
if (fdp.ConsumeIntegral<uint8_t>() == 1) {
- uint64_t* writeCounter = getCounterPtr(ring, kWriteCounterOffsetBytes);
+ uint64_t* writeCounter =
+ getCounterPtr(ring, desc, android::hardware::details::WRITEPTRPOS);
*writeCounter = fdp.ConsumeIntegral<uint64_t>();
}
}
@@ -124,7 +162,7 @@
FuzzedDataProvider fdp(&readerData[0], readerData.size());
do {
size_t count = fdp.remaining_bytes()
- ? fdp.ConsumeIntegralInRange<size_t>(1, readMq.getQuantumCount())
+ ? fdp.ConsumeIntegralInRange<size_t>(0, readMq.getQuantumCount() + 1)
: 1;
std::vector<payload_t> data;
data.resize(count);
@@ -142,9 +180,9 @@
void readerBlocking<MessageQueueUnsync, MQDescUnsync>(const MQDescUnsync&, std::vector<uint8_t>&,
std::atomic<size_t>&, std::atomic<size_t>&) {}
-template <typename Queue>
-void writer(Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) {
- payload_t* ring = nullptr;
+template <typename Queue, typename Desc>
+void writer(const Desc& desc, Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) {
+ payload_t* ring = reinterpret_cast<payload_t*>(writeMq.getRingBufferPtr());
while (fdp.remaining_bytes()) {
typename Queue::MemTransaction tx;
size_t numElements = 1;
@@ -159,15 +197,13 @@
// the ring buffer is only next to the read/write counters when there is
// no user supplied fd
if (!userFd) {
- if (ring == nullptr) {
- ring = firstStart;
- }
if (fdp.ConsumeIntegral<uint8_t>() == 1) {
- uint64_t* readCounter = getCounterPtr(ring, kReadCounterOffsetBytes);
+ uint64_t* readCounter =
+ getCounterPtr(ring, desc, android::hardware::details::READPTRPOS);
*readCounter = fdp.ConsumeIntegral<uint64_t>();
}
}
- *firstStart = fdp.ConsumeIntegral<payload_t>();
+ *firstStart = fdp.ConsumeIntegral<uint8_t>();
writeMq.commitWrite(numElements);
}
@@ -179,10 +215,10 @@
std::atomic<size_t>& readersNotFinished) {
android::base::ScopeGuard guard([&writersNotFinished]() { writersNotFinished--; });
while (fdp.remaining_bytes() > sizeof(size_t) && readersNotFinished > 0) {
- size_t count = fdp.ConsumeIntegralInRange<size_t>(1, writeMq.getQuantumCount());
+ size_t count = fdp.ConsumeIntegralInRange<size_t>(0, writeMq.getQuantumCount() + 1);
std::vector<payload_t> data;
for (int i = 0; i < count; i++) {
- data.push_back(fdp.ConsumeIntegral<payload_t>());
+ data.push_back(fdp.ConsumeIntegral<uint8_t>());
}
writeMq.writeBlocking(data.data(), count, kBlockingTimeoutNs);
}
@@ -197,74 +233,132 @@
std::atomic<size_t>&, std::atomic<size_t>&) {}
template <typename Queue, typename Desc>
-void fuzzAidlWithReaders(std::vector<uint8_t>& writerData,
- std::vector<std::vector<uint8_t>>& readerData, bool blocking) {
- FuzzedDataProvider fdp(&writerData[0], writerData.size());
- bool evFlag = blocking || fdp.ConsumeBool();
- android::base::unique_fd dataFd;
- size_t bufferSize = 0;
- size_t numElements = fdp.ConsumeIntegralInRange<size_t>(1, kMaxNumElements);
- bool userFd = fdp.ConsumeBool();
- if (userFd) {
- // run test with our own data region
- bufferSize = numElements * sizeof(payload_t);
- dataFd.reset(::ashmem_create_region("SyncReadWrite", bufferSize));
- }
- Queue writeMq(numElements, evFlag, std::move(dataFd), bufferSize);
- if (!writeMq.isValid()) {
- LOG(ERROR) << "AIDL write mq invalid";
- return;
- }
- const auto desc = writeMq.dupeDesc();
- CHECK(desc.handle.fds[0].get() != -1);
+inline std::optional<Desc> getDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp);
- std::atomic<size_t> readersNotFinished = readerData.size();
- std::atomic<size_t> writersNotFinished = 1;
- std::vector<std::thread> readers;
- for (int i = 0; i < readerData.size(); i++) {
- if (blocking) {
- readers.emplace_back(readerBlocking<Queue, Desc>, std::ref(desc),
- std::ref(readerData[i]), std::ref(readersNotFinished),
- std::ref(writersNotFinished));
-
+template <typename Queue, typename Desc>
+inline std::optional<Desc> getAidlDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp) {
+ if (queue) {
+ // get the existing descriptor from the queue
+ Desc desc = queue->dupeDesc();
+ if (desc.handle.fds[0].get() == -1) {
+ return std::nullopt;
} else {
- readers.emplace_back(reader<Queue, Desc>, std::ref(desc), std::ref(readerData[i]),
- userFd);
+ return std::make_optional(std::move(desc));
}
- }
-
- if (blocking) {
- writerBlocking<Queue>(writeMq, fdp, writersNotFinished, readersNotFinished);
} else {
- writer<Queue>(writeMq, fdp, userFd);
- }
+ // create a custom descriptor
+ std::vector<aidl::android::hardware::common::fmq::GrantorDescriptor> grantors;
+ size_t numGrantors = fdp.ConsumeIntegralInRange<size_t>(0, 4);
+ for (int i = 0; i < numGrantors; i++) {
+ grantors.push_back({fdp.ConsumeIntegralInRange<int32_t>(0, 2) /* fdIndex */,
+ fdp.ConsumeIntegralInRange<int32_t>(
+ 0, kMaxCustomGrantorMemoryBytes) /* offset */,
+ fdp.ConsumeIntegralInRange<int64_t>(
+ 0, kMaxCustomGrantorMemoryBytes) /* extent */});
+ // ashmem region is kPageSize and we need to make sure all of the
+ // pointers and data region fit inside
+ if (grantors.back().offset + grantors.back().extent > kPageSize) return std::nullopt;
+ }
- for (auto& reader : readers) {
- reader.join();
+ android::base::unique_fd fd(
+ ashmem_create_region("AidlCustomGrantors", kMaxCustomGrantorMemoryBytes));
+ ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
+ aidl::android::hardware::common::NativeHandle handle;
+ handle.fds.emplace_back(fd.get());
+
+ return std::make_optional<Desc>(
+ {grantors, std::move(handle), sizeof(payload_t), fdp.ConsumeBool()});
}
}
+template <>
+inline std::optional<AidlMQDescSync> getDesc(std::unique_ptr<AidlMessageQueueSync>& queue,
+ FuzzedDataProvider& fdp) {
+ return getAidlDesc<AidlMessageQueueSync, AidlMQDescSync>(queue, fdp);
+}
+
+template <>
+inline std::optional<AidlMQDescUnsync> getDesc(std::unique_ptr<AidlMessageQueueUnsync>& queue,
+ FuzzedDataProvider& fdp) {
+ return getAidlDesc<AidlMessageQueueUnsync, AidlMQDescUnsync>(queue, fdp);
+}
+
template <typename Queue, typename Desc>
-void fuzzHidlWithReaders(std::vector<uint8_t>& writerData,
- std::vector<std::vector<uint8_t>>& readerData, bool blocking) {
+inline std::optional<Desc> getHidlDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp) {
+ if (queue) {
+ auto desc = queue->getDesc();
+ if (!desc->isHandleValid()) {
+ return std::nullopt;
+ } else {
+ return std::make_optional(std::move(*desc));
+ }
+ } else {
+ // create a custom descriptor
+ std::vector<android::hardware::GrantorDescriptor> grantors;
+ size_t numGrantors = fdp.ConsumeIntegralInRange<size_t>(0, 4);
+ for (int i = 0; i < numGrantors; i++) {
+ grantors.push_back({fdp.ConsumeIntegral<uint32_t>() /* flags */,
+ fdp.ConsumeIntegralInRange<uint32_t>(0, 2) /* fdIndex */,
+ fdp.ConsumeIntegralInRange<uint32_t>(
+ 0, kMaxCustomGrantorMemoryBytes) /* offset */,
+ fdp.ConsumeIntegralInRange<uint64_t>(
+ 0, kMaxCustomGrantorMemoryBytes) /* extent */});
+ // ashmem region is kPageSize and we need to make sure all of the
+ // pointers and data region fit inside
+ if (grantors.back().offset + grantors.back().extent > kPageSize) return std::nullopt;
+ }
+
+ native_handle_t* handle = native_handle_create(1, 0);
+ int ashmemFd = ashmem_create_region("HidlCustomGrantors", kMaxCustomGrantorMemoryBytes);
+ ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE);
+ handle->data[0] = ashmemFd;
+
+ return std::make_optional<Desc>(grantors, handle, sizeof(payload_t));
+ }
+}
+
+template <>
+inline std::optional<MQDescSync> getDesc(std::unique_ptr<MessageQueueSync>& queue,
+ FuzzedDataProvider& fdp) {
+ return getHidlDesc<MessageQueueSync, MQDescSync>(queue, fdp);
+}
+
+template <>
+inline std::optional<MQDescUnsync> getDesc(std::unique_ptr<MessageQueueUnsync>& queue,
+ FuzzedDataProvider& fdp) {
+ return getHidlDesc<MessageQueueUnsync, MQDescUnsync>(queue, fdp);
+}
+
+template <typename Queue, typename Desc>
+void fuzzWithReaders(std::vector<uint8_t>& writerData,
+ std::vector<std::vector<uint8_t>>& readerData, bool blocking) {
FuzzedDataProvider fdp(&writerData[0], writerData.size());
bool evFlag = blocking || fdp.ConsumeBool();
- android::base::unique_fd dataFd;
- size_t bufferSize = 0;
size_t numElements = fdp.ConsumeIntegralInRange<size_t>(1, kMaxNumElements);
+ size_t bufferSize = numElements * sizeof(payload_t);
bool userFd = fdp.ConsumeBool();
- if (userFd) {
- // run test with our own data region
- bufferSize = numElements * sizeof(payload_t);
- dataFd.reset(::ashmem_create_region("SyncReadWrite", bufferSize));
+ bool manualGrantors = fdp.ConsumeBool();
+ std::unique_ptr<Queue> writeMq = nullptr;
+ if (manualGrantors) {
+ std::optional<Desc> customDesc(getDesc<Queue, Desc>(writeMq, fdp));
+ if (customDesc) {
+ writeMq = std::make_unique<Queue>(*customDesc);
+ }
+ } else {
+ android::base::unique_fd dataFd;
+ if (userFd) {
+ // run test with our own data region
+ dataFd.reset(::ashmem_create_region("CustomData", bufferSize));
+ }
+ writeMq = std::make_unique<Queue>(numElements, evFlag, std::move(dataFd), bufferSize);
}
- Queue writeMq(numElements, evFlag, std::move(dataFd), bufferSize);
- if (!writeMq.isValid()) {
- LOG(ERROR) << "HIDL write mq invalid";
+
+ if (writeMq == nullptr || !writeMq->isValid()) {
return;
}
- const auto desc = writeMq.getDesc();
- CHECK(desc->isHandleValid());
+ // get optional desc
+ const std::optional<Desc> desc(std::move(getDesc<Queue, Desc>(writeMq, fdp)));
+ CHECK(desc != std::nullopt);
std::atomic<size_t> readersNotFinished = readerData.size();
std::atomic<size_t> writersNotFinished = 1;
@@ -281,9 +375,9 @@
}
if (blocking) {
- writerBlocking<Queue>(writeMq, fdp, writersNotFinished, readersNotFinished);
+ writerBlocking<Queue>(*writeMq, fdp, writersNotFinished, readersNotFinished);
} else {
- writer<Queue>(writeMq, fdp, userFd);
+ writer<Queue>(*desc, *writeMq, fdp, userFd);
}
for (auto& reader : readers) {
@@ -307,13 +401,11 @@
bool fuzzBlocking = fdp.ConsumeBool();
std::vector<uint8_t> writerData = fdp.ConsumeRemainingBytes<uint8_t>();
if (fuzzSync) {
- fuzzHidlWithReaders<MessageQueueSync, MQDescSync>(writerData, readerData, fuzzBlocking);
- fuzzAidlWithReaders<AidlMessageQueueSync, AidlMQDescSync>(writerData, readerData,
- fuzzBlocking);
+ fuzzWithReaders<MessageQueueSync, MQDescSync>(writerData, readerData, fuzzBlocking);
+ fuzzWithReaders<AidlMessageQueueSync, AidlMQDescSync>(writerData, readerData, fuzzBlocking);
} else {
- fuzzHidlWithReaders<MessageQueueUnsync, MQDescUnsync>(writerData, readerData, false);
- fuzzAidlWithReaders<AidlMessageQueueUnsync, AidlMQDescUnsync>(writerData, readerData,
- false);
+ fuzzWithReaders<MessageQueueUnsync, MQDescUnsync>(writerData, readerData, false);
+ fuzzWithReaders<AidlMessageQueueUnsync, AidlMQDescUnsync>(writerData, readerData, false);
}
return 0;
diff --git a/include/fmq/AidlMessageQueue.h b/include/fmq/AidlMessageQueue.h
index 0536d6a..138760f 100644
--- a/include/fmq/AidlMessageQueue.h
+++ b/include/fmq/AidlMessageQueue.h
@@ -104,6 +104,17 @@
: AidlMessageQueue(numElementsInQueue, configureEventFlagWord, android::base::unique_fd(),
0) {}
+ template <typename V = T>
+ AidlMessageQueue(size_t numElementsInQueue, bool configureEventFlagWord = false,
+ std::enable_if_t<std::is_same_v<V, MQErased>, size_t> quantum = sizeof(T))
+ : AidlMessageQueue(numElementsInQueue, configureEventFlagWord, android::base::unique_fd(),
+ 0, quantum) {}
+
+ template <typename V = T>
+ AidlMessageQueue(size_t numElementsInQueue, bool configureEventFlagWord,
+ android::base::unique_fd bufferFd, size_t bufferSize,
+ std::enable_if_t<std::is_same_v<V, MQErased>, size_t> quantum);
+
MQDescriptor<T, U> dupeDesc();
private:
@@ -124,6 +135,15 @@
numElementsInQueue, configureEventFlagWord, std::move(bufferFd), bufferSize) {}
template <typename T, typename U>
+template <typename V>
+AidlMessageQueue<T, U>::AidlMessageQueue(
+ size_t numElementsInQueue, bool configureEventFlagWord, android::base::unique_fd bufferFd,
+ size_t bufferSize, std::enable_if_t<std::is_same_v<V, MQErased>, size_t> quantum)
+ : MessageQueueBase<AidlMQDescriptorShim, T, FlavorTypeToValue<U>::value>(
+ numElementsInQueue, configureEventFlagWord, std::move(bufferFd), bufferSize,
+ quantum) {}
+
+template <typename T, typename U>
MQDescriptor<T, U> AidlMessageQueue<T, U>::dupeDesc() {
auto* shim = MessageQueueBase<AidlMQDescriptorShim, T, FlavorTypeToValue<U>::value>::getDesc();
if (shim) {
@@ -144,10 +164,10 @@
ints.push_back(shim->handle()->data[data_index]);
}
return MQDescriptor<T, U>{
- .quantum = static_cast<int32_t>(shim->getQuantum()),
.grantors = grantors,
- .flags = static_cast<int32_t>(shim->getFlags()),
.handle = {std::move(fds), std::move(ints)},
+ .quantum = static_cast<int32_t>(shim->getQuantum()),
+ .flags = static_cast<int32_t>(shim->getFlags()),
};
} else {
return MQDescriptor<T, U>();
diff --git a/include/fmq/MessageQueueBase.h b/include/fmq/MessageQueueBase.h
index f6d93c9..72bdeb1 100644
--- a/include/fmq/MessageQueueBase.h
+++ b/include/fmq/MessageQueueBase.h
@@ -36,9 +36,20 @@
namespace android {
+/* sentinel payload type that indicates the MQ will be used with a mismatching
+MQDescriptor type, where type safety must be enforced elsewhere because the real
+element type T is not statically known. This is used to instantiate
+MessageQueueBase instances for Rust where we cannot generate additional template
+instantiations across the language boundary. */
+enum MQErased {};
+
template <template <typename, MQFlavor> class MQDescriptorType, typename T, MQFlavor flavor>
struct MessageQueueBase {
typedef MQDescriptorType<T, flavor> Descriptor;
+ enum Error : int {
+ NONE,
+ POINTER_CORRUPTION, /** Read/write pointers mismatch */
+ };
/**
* @param Desc MQDescriptor describing the FMQ.
@@ -65,28 +76,47 @@
*/
MessageQueueBase(size_t numElementsInQueue, bool configureEventFlagWord,
- android::base::unique_fd bufferFd, size_t bufferSize);
+ android::base::unique_fd bufferFd, size_t bufferSize)
+ : MessageQueueBase(numElementsInQueue, configureEventFlagWord, std::move(bufferFd),
+ bufferSize, sizeof(T)) {
+ /* We must not pass sizeof(T) as quantum for MQErased element type. */
+ static_assert(!std::is_same_v<T, MQErased>,
+ "MessageQueueBase<..., MQErased, ...> must be constructed via a"
+ " constructor that accepts a descriptor or a quantum size");
+ };
MessageQueueBase(size_t numElementsInQueue, bool configureEventFlagWord = false)
: MessageQueueBase(numElementsInQueue, configureEventFlagWord, android::base::unique_fd(),
0) {}
/**
+ * @param errorDetected Optional output parameter which indicates
+ * any errors that the client might care about.
+ * @param errorMessage Optional output parameter for a human-readable
+ * error description.
+ *
* @return Number of items of type T that can be written into the FMQ
* without a read.
*/
- size_t availableToWrite() const;
+ size_t availableToWrite(Error* errorDetected = nullptr,
+ std::string* errorMessage = nullptr) const;
/**
+ * @param errorDetected Optional output parameter which indicates
+ * any errors that the client might care about.
+ * @param errorMessage Optional output parameter for a human-readable
+ * error description.
+ *
* @return Number of items of type T that are waiting to be read from the
* FMQ.
*/
- size_t availableToRead() const;
+ size_t availableToRead(Error* errorDetected = nullptr,
+ std::string* errorMessage = nullptr) const;
/**
* Returns the size of type T in bytes.
*
- * @param Size of T.
+ * @return Size of T.
*/
size_t getQuantumSize() const;
@@ -270,7 +300,10 @@
/**
* Gets the length of the MemRegion in bytes.
*/
- inline size_t getLengthInBytes() const { return length * sizeof(T); }
+ template <class U = T>
+ inline std::enable_if_t<!std::is_same_v<U, MQErased>, size_t> getLengthInBytes() const {
+ return length * kQuantumValue<U>;
+ }
private:
/* Base address */
@@ -358,6 +391,11 @@
inline const MemRegion& getSecondRegion() const { return second; }
private:
+ friend MessageQueueBase<MQDescriptorType, T, flavor>;
+
+ bool copyToSized(const T* data, size_t startIdx, size_t nMessages, size_t messageSize);
+ bool copyFromSized(T* data, size_t startIdx, size_t nMessages, size_t messageSize);
+
/*
* Given a start index and the number of messages to be
* read/written, this helper method calculates the
@@ -425,9 +463,31 @@
*/
bool commitRead(size_t nMessages);
+ /**
+ * Get the pointer to the ring buffer. Useful for debugging and fuzzing.
+ */
+ uint8_t* getRingBufferPtr() const { return mRing; }
+
+ protected:
+ /**
+ * Protected constructor that can manually specify the quantum to use.
+ * The only external consumer of this ctor is ErasedMessageQueue, but the
+ * constructor cannot be private because this is a base class.
+ *
+ * @param quantum Size of the element type, in bytes.
+ * Other parameters have semantics given in the corresponding public ctor.
+ */
+
+ MessageQueueBase(size_t numElementsInQueue, bool configureEventFlagWord,
+ android::base::unique_fd bufferFd, size_t bufferSize, size_t quantum);
+
private:
- size_t availableToWriteBytes() const;
- size_t availableToReadBytes() const;
+ template <class U = T,
+ typename std::enable_if<!std::is_same<U, MQErased>::value, bool>::type = true>
+ static constexpr size_t kQuantumValue = sizeof(T);
+ inline size_t quantum() const;
+ size_t availableToWriteBytes(Error* errorDetected, std::string* errorMessage) const;
+ size_t availableToReadBytes(Error* errorDetected, std::string* errorMessage) const;
MessageQueueBase(const MessageQueueBase& other) = delete;
MessageQueueBase& operator=(const MessageQueueBase& other) = delete;
@@ -459,6 +519,8 @@
* lifetime.
*/
android::hardware::EventFlag* mEventFlag = nullptr;
+
+ const size_t kPageSize = getpagesize();
};
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
@@ -515,6 +577,19 @@
bool MessageQueueBase<MQDescriptorType, T, flavor>::MemTransaction::copyFrom(T* data,
size_t startIdx,
size_t nMessages) {
+ if constexpr (!std::is_same<T, MQErased>::value) {
+ return copyFromSized(data, startIdx, nMessages, kQuantumValue<T>);
+ } else {
+ /* Compile error. */
+ static_assert(!std::is_same<T, MQErased>::value,
+ "copyFrom without messageSize argument cannot be used with MQErased (use "
+ "copyFromSized)");
+ }
+}
+
+template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
+bool MessageQueueBase<MQDescriptorType, T, flavor>::MemTransaction::copyFromSized(
+ T* data, size_t startIdx, size_t nMessages, size_t messageSize) {
if (data == nullptr) {
return false;
}
@@ -532,11 +607,11 @@
}
if (firstReadCount != 0) {
- memcpy(data, firstBaseAddress, firstReadCount * sizeof(T));
+ memcpy(data, firstBaseAddress, firstReadCount * messageSize);
}
if (secondReadCount != 0) {
- memcpy(data + firstReadCount, secondBaseAddress, secondReadCount * sizeof(T));
+ memcpy(data + firstReadCount, secondBaseAddress, secondReadCount * messageSize);
}
return true;
@@ -546,6 +621,19 @@
bool MessageQueueBase<MQDescriptorType, T, flavor>::MemTransaction::copyTo(const T* data,
size_t startIdx,
size_t nMessages) {
+ if constexpr (!std::is_same<T, MQErased>::value) {
+ return copyToSized(data, startIdx, nMessages, kQuantumValue<T>);
+ } else {
+ /* Compile error. */
+ static_assert(!std::is_same<T, MQErased>::value,
+ "copyTo without messageSize argument cannot be used with MQErased (use "
+ "copyToSized)");
+ }
+}
+
+template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
+bool MessageQueueBase<MQDescriptorType, T, flavor>::MemTransaction::copyToSized(
+ const T* data, size_t startIdx, size_t nMessages, size_t messageSize) {
if (data == nullptr) {
return false;
}
@@ -563,11 +651,11 @@
}
if (firstWriteCount != 0) {
- memcpy(firstBaseAddress, data, firstWriteCount * sizeof(T));
+ memcpy(firstBaseAddress, data, firstWriteCount * messageSize);
}
if (secondWriteCount != 0) {
- memcpy(secondBaseAddress, data + firstWriteCount, secondWriteCount * sizeof(T));
+ memcpy(secondBaseAddress, data + firstWriteCount, secondWriteCount * messageSize);
}
return true;
@@ -583,7 +671,7 @@
(mDesc->countGrantors() < hardware::details::kMinGrantorCount)) {
return;
}
- if (mDesc->getQuantum() != sizeof(T)) {
+ if (mDesc->getQuantum() != quantum()) {
hardware::details::logError(
"Payload size differs between the queue instantiation and the "
"MQDescriptor.");
@@ -647,7 +735,8 @@
MessageQueueBase<MQDescriptorType, T, flavor>::MessageQueueBase(const Descriptor& Desc,
bool resetPointers) {
mDesc = std::unique_ptr<Descriptor>(new (std::nothrow) Descriptor(Desc));
- if (mDesc == nullptr) {
+ if (mDesc == nullptr || mDesc->getSize() == 0) {
+ hardware::details::logError("MQDescriptor is invalid or queue size is 0.");
return;
}
@@ -658,18 +747,22 @@
MessageQueueBase<MQDescriptorType, T, flavor>::MessageQueueBase(size_t numElementsInQueue,
bool configureEventFlagWord,
android::base::unique_fd bufferFd,
- size_t bufferSize) {
+ size_t bufferSize, size_t quantum) {
// Check if the buffer size would not overflow size_t
- if (numElementsInQueue > SIZE_MAX / sizeof(T)) {
+ if (numElementsInQueue > SIZE_MAX / quantum) {
hardware::details::logError("Requested message queue size too large. Size of elements: " +
- std::to_string(sizeof(T)) +
+ std::to_string(quantum) +
". Number of elements: " + std::to_string(numElementsInQueue));
return;
}
- if (bufferFd != -1 && numElementsInQueue * sizeof(T) > bufferSize) {
+ if (numElementsInQueue == 0) {
+ hardware::details::logError("Requested queue size of 0.");
+ return;
+ }
+ if (bufferFd != -1 && numElementsInQueue * quantum > bufferSize) {
hardware::details::logError("The supplied buffer size(" + std::to_string(bufferSize) +
") is smaller than the required size(" +
- std::to_string(numElementsInQueue * sizeof(T)) + ").");
+ std::to_string(numElementsInQueue * quantum) + ").");
return;
}
/*
@@ -677,7 +770,7 @@
* read and write pointer counters. If an EventFlag word is to be configured,
* we also need to allocate memory for the same/
*/
- size_t kQueueSizeBytes = numElementsInQueue * sizeof(T);
+ size_t kQueueSizeBytes = numElementsInQueue * quantum;
size_t kMetaDataSize = 2 * sizeof(android::hardware::details::RingBufferPosition);
if (configureEventFlagWord) {
@@ -693,12 +786,12 @@
if (bufferFd != -1) {
// Allocate read counter and write counter only. User-supplied memory will be used for the
// ringbuffer.
- kAshmemSizePageAligned = (kMetaDataSize + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+ kAshmemSizePageAligned = (kMetaDataSize + kPageSize - 1) & ~(kPageSize - 1);
} else {
// Allocate ringbuffer, read counter and write counter.
kAshmemSizePageAligned = (hardware::details::alignToWordBoundary(kQueueSizeBytes) +
- kMetaDataSize + PAGE_SIZE - 1) &
- ~(PAGE_SIZE - 1);
+ kMetaDataSize + kPageSize - 1) &
+ ~(kPageSize - 1);
}
/*
@@ -756,10 +849,10 @@
}
mDesc = std::unique_ptr<Descriptor>(new (std::nothrow)
- Descriptor(grantors, mqHandle, sizeof(T)));
+ Descriptor(grantors, mqHandle, quantum));
} else {
mDesc = std::unique_ptr<Descriptor>(new (std::nothrow) Descriptor(
- kQueueSizeBytes, mqHandle, sizeof(T), configureEventFlagWord));
+ kQueueSizeBytes, mqHandle, quantum, configureEventFlagWord));
}
if (mDesc == nullptr) {
native_handle_close(mqHandle);
@@ -771,10 +864,10 @@
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
MessageQueueBase<MQDescriptorType, T, flavor>::~MessageQueueBase() {
- if (flavor == kUnsynchronizedWrite && mReadPtr != nullptr) {
- delete mReadPtr;
- } else if (mReadPtr != nullptr) {
+ if (flavor == kSynchronizedReadWrite && mReadPtr != nullptr) {
unmapGrantorDescr(mReadPtr, hardware::details::READPTRPOS);
+ } else if (mReadPtr != nullptr) {
+ delete mReadPtr;
}
if (mWritePtr != nullptr) {
unmapGrantorDescr(mWritePtr, hardware::details::WRITEPTRPOS);
@@ -801,8 +894,8 @@
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
bool MessageQueueBase<MQDescriptorType, T, flavor>::write(const T* data, size_t nMessages) {
MemTransaction tx;
- return beginWrite(nMessages, &tx) && tx.copyTo(data, 0 /* startIdx */, nMessages) &&
- commitWrite(nMessages);
+ return beginWrite(nMessages, &tx) &&
+ tx.copyToSized(data, 0 /* startIdx */, nMessages, quantum()) && commitWrite(nMessages);
}
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
@@ -1038,18 +1131,53 @@
}
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
-size_t MessageQueueBase<MQDescriptorType, T, flavor>::availableToWriteBytes() const {
- return mDesc->getSize() - availableToReadBytes();
+inline size_t MessageQueueBase<MQDescriptorType, T, flavor>::quantum() const {
+ if constexpr (std::is_same<T, MQErased>::value) {
+ return mDesc->getQuantum();
+ } else {
+ return kQuantumValue<T>;
+ }
}
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
-size_t MessageQueueBase<MQDescriptorType, T, flavor>::availableToWrite() const {
- return availableToWriteBytes() / sizeof(T);
+size_t MessageQueueBase<MQDescriptorType, T, flavor>::availableToWriteBytes(
+ Error* errorDetected, std::string* errorMessage) const {
+ size_t queueSizeBytes = mDesc->getSize();
+ Error localErrorDetected = Error::NONE;
+ size_t availableBytes = availableToReadBytes(&localErrorDetected, errorMessage);
+ if (localErrorDetected != Error::NONE) {
+ if (errorDetected != nullptr) {
+ *errorDetected = localErrorDetected;
+ }
+ return 0;
+ }
+ if (queueSizeBytes < availableBytes) {
+ std::string errorMsg =
+ "The write or read pointer has become corrupted. Writing to the queue is no "
+ "longer possible. Queue size: " +
+ std::to_string(queueSizeBytes) + ", available: " + std::to_string(availableBytes);
+ hardware::details::logError(errorMsg);
+ if (errorDetected != nullptr) {
+ *errorDetected = Error::POINTER_CORRUPTION;
+ }
+ if (errorMessage != nullptr) {
+ *errorMessage = std::move(errorMsg);
+ }
+ return 0;
+ }
+ return queueSizeBytes - availableBytes;
}
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
-size_t MessageQueueBase<MQDescriptorType, T, flavor>::availableToRead() const {
- return availableToReadBytes() / sizeof(T);
+size_t MessageQueueBase<MQDescriptorType, T, flavor>::availableToWrite(
+ Error* errorDetected, std::string* errorMessage) const {
+ return availableToWriteBytes(errorDetected, errorMessage) / quantum();
+}
+
+template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
+size_t MessageQueueBase<MQDescriptorType, T, flavor>::availableToRead(
+ Error* errorDetected, std::string* errorMessage) const {
+ return availableToReadBytes(errorDetected, errorMessage) / quantum();
}
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
@@ -1067,7 +1195,7 @@
}
auto writePtr = mWritePtr->load(std::memory_order_relaxed);
- if (writePtr % sizeof(T) != 0) {
+ if (writePtr % quantum() != 0) {
hardware::details::logError(
"The write pointer has become misaligned. Writing to the queue is no longer "
"possible.");
@@ -1080,7 +1208,7 @@
* From writeOffset, the number of messages that can be written
* contiguously without wrapping around the ring buffer are calculated.
*/
- size_t contiguousMessages = (mDesc->getSize() - writeOffset) / sizeof(T);
+ size_t contiguousMessages = (mDesc->getSize() - writeOffset) / quantum();
if (contiguousMessages < nMessages) {
/*
@@ -1109,7 +1237,7 @@
*/
__attribute__((no_sanitize("integer"))) bool
MessageQueueBase<MQDescriptorType, T, flavor>::commitWrite(size_t nMessages) {
- size_t nBytesWritten = nMessages * sizeof(T);
+ size_t nBytesWritten = nMessages * quantum();
auto writePtr = mWritePtr->load(std::memory_order_relaxed);
writePtr += nBytesWritten;
mWritePtr->store(writePtr, std::memory_order_release);
@@ -1121,20 +1249,37 @@
}
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
-size_t MessageQueueBase<MQDescriptorType, T, flavor>::availableToReadBytes() const {
+size_t MessageQueueBase<MQDescriptorType, T, flavor>::availableToReadBytes(
+ Error* errorDetected, std::string* errorMessage) const {
/*
* This method is invoked by implementations of both read() and write() and
* hence requires a memory_order_acquired load for both mReadPtr and
* mWritePtr.
*/
- return mWritePtr->load(std::memory_order_acquire) - mReadPtr->load(std::memory_order_acquire);
+ uint64_t writePtr = mWritePtr->load(std::memory_order_acquire);
+ uint64_t readPtr = mReadPtr->load(std::memory_order_acquire);
+ if (writePtr < readPtr) {
+ std::string errorMsg =
+ "The write or read pointer has become corrupted. Reading from the queue is no "
+ "longer possible. Write pointer: " +
+ std::to_string(writePtr) + ", read pointer: " + std::to_string(readPtr);
+ hardware::details::logError(errorMsg);
+ if (errorDetected != nullptr) {
+ *errorDetected = Error::POINTER_CORRUPTION;
+ }
+ if (errorMessage != nullptr) {
+ *errorMessage = std::move(errorMsg);
+ }
+ return 0;
+ }
+ return writePtr - readPtr;
}
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
bool MessageQueueBase<MQDescriptorType, T, flavor>::read(T* data, size_t nMessages) {
MemTransaction tx;
- return beginRead(nMessages, &tx) && tx.copyFrom(data, 0 /* startIdx */, nMessages) &&
- commitRead(nMessages);
+ return beginRead(nMessages, &tx) &&
+ tx.copyFromSized(data, 0 /* startIdx */, nMessages, quantum()) && commitRead(nMessages);
}
template <template <typename, MQFlavor> typename MQDescriptorType, typename T, MQFlavor flavor>
@@ -1159,7 +1304,7 @@
* stores to mReadPtr from a different thread.
*/
auto readPtr = mReadPtr->load(std::memory_order_relaxed);
- if (writePtr % sizeof(T) != 0 || readPtr % sizeof(T) != 0) {
+ if (writePtr % quantum() != 0 || readPtr % quantum() != 0) {
hardware::details::logError(
"The write or read pointer has become misaligned. Reading from the queue is no "
"longer possible.");
@@ -1172,7 +1317,7 @@
return false;
}
- size_t nBytesDesired = nMessages * sizeof(T);
+ size_t nBytesDesired = nMessages * quantum();
/*
* Return if insufficient data to read in FMQ.
*/
@@ -1185,7 +1330,7 @@
* From readOffset, the number of messages that can be read contiguously
* without wrapping around the ring buffer are calculated.
*/
- size_t contiguousMessages = (mDesc->getSize() - readOffset) / sizeof(T);
+ size_t contiguousMessages = (mDesc->getSize() - readOffset) / quantum();
if (contiguousMessages < nMessages) {
/*
@@ -1226,7 +1371,7 @@
return false;
}
- size_t nBytesRead = nMessages * sizeof(T);
+ size_t nBytesRead = nMessages * quantum();
readPtr += nBytesRead;
mReadPtr->store(readPtr, std::memory_order_release);
return true;
@@ -1273,7 +1418,7 @@
}
/*
- * Offset for mmap must be a multiple of PAGE_SIZE.
+ * Offset for mmap must be a multiple of kPageSize.
*/
if (!hardware::details::isAlignedToWordBoundary(grantors[grantorIdx].offset)) {
hardware::details::logError("Grantor (index " + std::to_string(grantorIdx) +
@@ -1282,8 +1427,31 @@
return nullptr;
}
- int mapOffset = (grantors[grantorIdx].offset / PAGE_SIZE) * PAGE_SIZE;
- if (grantors[grantorIdx].extent < 0 || grantors[grantorIdx].extent > INT_MAX - PAGE_SIZE) {
+ /*
+ * Expect some grantors to be at least a min size
+ */
+ for (uint32_t i = 0; i < grantors.size(); i++) {
+ switch (i) {
+ case hardware::details::READPTRPOS:
+ if (grantors[i].extent < sizeof(uint64_t)) return nullptr;
+ break;
+ case hardware::details::WRITEPTRPOS:
+ if (grantors[i].extent < sizeof(uint64_t)) return nullptr;
+ break;
+ case hardware::details::DATAPTRPOS:
+ // We don't expect specific data size
+ break;
+ case hardware::details::EVFLAGWORDPOS:
+ if (grantors[i].extent < sizeof(uint32_t)) return nullptr;
+ break;
+ default:
+ // We don't care about unknown grantors
+ break;
+ }
+ }
+
+ int mapOffset = (grantors[grantorIdx].offset / kPageSize) * kPageSize;
+ if (grantors[grantorIdx].extent < 0 || grantors[grantorIdx].extent > INT_MAX - kPageSize) {
hardware::details::logError(std::string("Grantor (index " + std::to_string(grantorIdx) +
") extent value is too large or negative: " +
std::to_string(grantors[grantorIdx].extent)));
@@ -1293,6 +1461,13 @@
void* address = mmap(0, mapLength, PROT_READ | PROT_WRITE, MAP_SHARED, handle->data[fdIndex],
mapOffset);
+ if (address == MAP_FAILED && errno == EPERM && flavor == kUnsynchronizedWrite) {
+ // If the supplied memory is read-only, it would fail with EPERM.
+ // Try again to mmap read-only for the kUnsynchronizedWrite case.
+ // kSynchronizedReadWrite cannot use read-only memory because the
+ // read pointer is stored in the shared memory as well.
+ address = mmap(0, mapLength, PROT_READ, MAP_SHARED, handle->data[fdIndex], mapOffset);
+ }
if (address == MAP_FAILED) {
hardware::details::logError(std::string("mmap failed: ") + std::to_string(errno));
return nullptr;
@@ -1314,7 +1489,7 @@
return;
}
- int mapOffset = (grantors[grantorIdx].offset / PAGE_SIZE) * PAGE_SIZE;
+ int mapOffset = (grantors[grantorIdx].offset / kPageSize) * kPageSize;
int mapLength = grantors[grantorIdx].offset - mapOffset + grantors[grantorIdx].extent;
void* baseAddress =
reinterpret_cast<uint8_t*>(address) - (grantors[grantorIdx].offset - mapOffset);
@@ -1328,4 +1503,4 @@
}
}
-} // namespace hardware
+} // namespace android
diff --git a/libfmq.rs b/libfmq.rs
new file mode 100644
index 0000000..a215a02
--- /dev/null
+++ b/libfmq.rs
@@ -0,0 +1,435 @@
+//! libfmq Rust wrapper
+
+/*
+* Copyright (C) 2024 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+use fmq_bindgen::{
+ convertDesc, convertGrantor, descFlags, descGrantors, descHandleFDs, descHandleInts,
+ descHandleNumFDs, descHandleNumInts, descNumGrantors, descQuantum, freeDesc,
+ ndk_ScopedFileDescriptor, ErasedMessageQueue, ErasedMessageQueueDesc, GrantorDescriptor,
+ MQDescriptor, MemTransaction, NativeHandle, ParcelFileDescriptor, SynchronizedReadWrite,
+};
+
+use std::ptr::addr_of_mut;
+
+use log::error;
+
+/// A trait indicating that a type is safe to pass through shared memory.
+///
+/// # Safety
+///
+/// This requires that the type must not contain any capabilities such as file
+/// descriptors or heap allocations, and that it must be permitted to access
+/// all bytes of its representation (so it must not contain any padding bytes).
+///
+/// Because being stored in shared memory the allows the type to be accessed
+/// from different processes, it may also be accessed from different threads in
+/// the same process. As such, `Share` is a supertrait of `Sync`.
+pub unsafe trait Share: Sync {}
+
+// SAFETY: All types implementing the `zerocopy::AsBytes` trait implement `Share`.
+unsafe impl<T: zerocopy::AsBytes + zerocopy::FromBytes + Send + Sync> Share for T {}
+
+/// An IPC message queue for values of type T.
+pub struct MessageQueue<T> {
+ inner: ErasedMessageQueue,
+ ty: core::marker::PhantomData<T>,
+}
+
+/** A write completion from the MessageQueue::write() method.
+
+This completion mutably borrows the MessageQueue to prevent concurrent writes;
+these must be forbidden because the underlying AidlMessageQueue only stores the
+number of outstanding writes, not which have and have not completed, so they
+must complete in order. */
+#[must_use]
+pub struct WriteCompletion<'a, T: Share> {
+ inner: MemTransaction,
+ queue: &'a mut MessageQueue<T>,
+ n_elems: usize,
+ n_written: usize,
+}
+
+impl<'a, T: Share> WriteCompletion<'a, T> {
+ /// Obtain a pointer to the location at which the idx'th item should be
+ /// stored.
+ ///
+ /// The returned pointer is only valid while `self` has not been dropped and
+ /// is invalidated by any call to `self.write`. The pointer should be used
+ /// with `std::ptr::write` or a DMA API to initialize the underlying storage
+ /// before calling `assume_written` to indicate how many elements were
+ /// written.
+ ///
+ /// It is only permitted to access at most `contiguous_count(idx)` items
+ /// via offsets from the returned address.
+ ///
+ /// Calling this method with a greater `idx` may return a pointer to another
+ /// memory region of different size than the first.
+ pub fn ptr(&self, idx: usize) -> *mut T {
+ if idx >= self.n_elems {
+ panic!(
+ "indexing out of bound: ReadCompletion for {} elements but idx {} accessed",
+ self.n_elems, idx
+ )
+ }
+ ptr(&self.inner, idx)
+ }
+
+ /// Return the number of contiguous elements that may be stored starting at
+ /// the given index in the backing buffer corresponding to the given index.
+ ///
+ /// Intended for use with the `ptr` method.
+ ///
+ /// Returns 0 if `idx` is greater than or equal to the completion's element
+ /// count.
+ pub fn contiguous_count(&self, idx: usize) -> usize {
+ contiguous_count(&self.inner, idx, self.n_elems)
+ }
+
+ /// Returns how many elements still must be written to this WriteCompletion
+ /// before dropping it.
+ pub fn required_elements(&self) -> usize {
+ assert!(self.n_written <= self.n_elems);
+ self.n_elems - self.n_written
+ }
+
+ /// Write one item to `self`. Fails and returns the item if `self` is full.
+ pub fn write(&mut self, data: T) -> Result<(), T> {
+ if self.required_elements() > 0 {
+ // SAFETY: `self.ptr(self.n_written)` is known to be uninitialized.
+ // The dtor of data, if any, will not run because `data` is moved
+ // out of here.
+ unsafe { self.ptr(self.n_written).write(data) };
+ self.n_written += 1;
+ Ok(())
+ } else {
+ Err(data)
+ }
+ }
+
+ /// Promise to the `WriteCompletion` that `n_newly_written` elements have
+ /// been written with unsafe code or DMA to the pointer returned by the
+ /// `ptr` method.
+ ///
+ /// Panics if `n_newly_written` exceeds the number of elements yet required.
+ ///
+ /// # Safety
+ /// It is UB to call this method except after calling the `ptr` method and
+ /// writing the specified number of values of type T to that location.
+ pub unsafe fn assume_written(&mut self, n_newly_written: usize) {
+ assert!(n_newly_written < self.required_elements());
+ self.n_written += n_newly_written;
+ }
+}
+
+impl<'a, T: Share> Drop for WriteCompletion<'a, T> {
+ fn drop(&mut self) {
+ if self.n_written < self.n_elems {
+ error!(
+ "WriteCompletion dropped without writing to all elements ({}/{} written)",
+ self.n_written, self.n_elems
+ );
+ }
+ let txn = std::mem::take(&mut self.inner);
+ self.queue.commit_write(txn);
+ }
+}
+
+impl<T: Share> MessageQueue<T> {
+ const fn type_size() -> usize {
+ std::mem::size_of::<T>()
+ }
+
+ /// Create a new MessageQueue with capacity for `elems` elements.
+ pub fn new(elems: usize, event_word: bool) -> Self {
+ Self {
+ // SAFETY: Calling bindgen'd constructor. The only argument that
+ // can't be validated by the implementation is the quantum, which
+ // must equal the element size.
+ inner: unsafe { ErasedMessageQueue::new1(elems, event_word, Self::type_size()) },
+ ty: core::marker::PhantomData,
+ }
+ }
+
+ /// Create a MessageQueue connected to another existing instance from its
+ /// descriptor.
+ pub fn from_desc(desc: &MQDescriptor<T, SynchronizedReadWrite>, reset_pointers: bool) -> Self {
+ let mut grantors = desc
+ .grantors
+ .iter()
+ // SAFETY: this just forwards the integers to the GrantorDescriptor
+ // constructor; GrantorDescriptor is POD.
+ .map(|g| unsafe { convertGrantor(g.fdIndex, g.offset, g.extent) })
+ .collect::<Vec<_>>();
+
+ // SAFETY: These pointer/length pairs come from Vecs that will outlive
+ // this function call, and the call itself copies all data it needs out
+ // of them.
+ let cpp_desc = unsafe {
+ convertDesc(
+ grantors.as_mut_ptr(),
+ grantors.len(),
+ desc.handle.fds.as_ptr().cast(),
+ desc.handle.fds.len(),
+ desc.handle.ints.as_ptr(),
+ desc.handle.ints.len(),
+ desc.quantum,
+ desc.flags,
+ )
+ };
+ // SAFETY: Calling bindgen'd constructor which does not store cpp_desc,
+ // but just passes it to the initializer of AidlMQDescriptorShim, which
+ // deep-copies it.
+ let inner = unsafe { ErasedMessageQueue::new(cpp_desc, reset_pointers) };
+ // SAFETY: we must free the desc returned by convertDesc; the pointer
+ // was just returned above so we know it is valid.
+ unsafe { freeDesc(cpp_desc) };
+ Self { inner, ty: core::marker::PhantomData }
+ }
+
+ /// Obtain a copy of the MessageQueue's descriptor, which may be used to
+ /// access it remotely.
+ pub fn dupe_desc(&mut self) -> MQDescriptor<T, SynchronizedReadWrite> {
+ // SAFETY: dupeDesc may be called on any valid ErasedMessageQueue; it
+ // simply forwards to dupeDesc on the inner AidlMessageQueue and wraps
+ // in a heap allocation.
+ let erased_desc: *mut ErasedMessageQueueDesc = unsafe { self.inner.dupeDesc() };
+ let grantor_to_rust =
+ |g: &fmq_bindgen::aidl_android_hardware_common_fmq_GrantorDescriptor| {
+ GrantorDescriptor { fdIndex: g.fdIndex, offset: g.offset, extent: g.extent }
+ };
+
+ let scoped_to_parcel_fd = |fd: &ndk_ScopedFileDescriptor| {
+ use std::os::fd::{BorrowedFd, FromRawFd, OwnedFd};
+ // SAFETY: the fd is already open as an invariant of ndk::ScopedFileDescriptor, so
+ // it will not be -1, as required by BorrowedFd.
+ let borrowed = unsafe { BorrowedFd::borrow_raw(fd._base as i32) };
+ ParcelFileDescriptor::new(match borrowed.try_clone_to_owned() {
+ Ok(fd) => fd,
+ Err(e) => {
+ error!("could not dup NativeHandle fd {}: {}", fd._base, e);
+ // SAFETY: OwnedFd requires the fd is not -1. If we failed to dup the fd,
+ // other code downstream will fail, but we can do no better than pass it on.
+ unsafe { OwnedFd::from_raw_fd(fd._base as i32) }
+ }
+ })
+ };
+
+ // First, we create a desc with the wrong type, because we cannot create one whole cloth of
+ // our desired return type unless T implements Default. This Default requirement is
+ // superfluous (T::default() is never called), so we then transmute to our desired type.
+ let desc = MQDescriptor::<(), SynchronizedReadWrite>::default();
+ // SAFETY: This transmute changes only the element type parameter of the MQDescriptor. The
+ // layout of an MQDescriptor does not depend on T as T appears in it only in PhantomData.
+ let mut desc: MQDescriptor<T, SynchronizedReadWrite> = unsafe { std::mem::transmute(desc) };
+ // SAFETY: These slices are created out of the pointer and length pairs exposed by the
+ // individual descFoo accessors, so we know they are valid pointer/lengths and point to
+ // data that will continue to exist as long as the desc does.
+ //
+ // Calls to the descFoo accessors on erased_desc are sound because we know inner.dupeDesc
+ // returns a valid pointer to a new heap-allocated ErasedMessageQueueDesc.
+ let (grantors, fds, ints, quantum, flags) = unsafe {
+ use std::slice::from_raw_parts;
+ let grantors = from_raw_parts(descGrantors(erased_desc), descNumGrantors(erased_desc));
+ let fds = from_raw_parts(descHandleFDs(erased_desc), descHandleNumFDs(erased_desc));
+ let ints = from_raw_parts(descHandleInts(erased_desc), descHandleNumInts(erased_desc));
+ let quantum = descQuantum(erased_desc);
+ let flags = descFlags(erased_desc);
+ (grantors, fds, ints, quantum, flags)
+ };
+ let fds = fds.iter().map(scoped_to_parcel_fd).collect();
+ let ints = ints.to_vec();
+ desc.grantors = grantors.iter().map(grantor_to_rust).collect();
+ desc.handle = NativeHandle { fds, ints };
+ desc.quantum = quantum;
+ desc.flags = flags;
+ // SAFETY: we must free the desc returned by dupeDesc; the pointer was
+ // just returned above so we know it is valid.
+ unsafe { freeDesc(erased_desc) };
+ desc
+ }
+
+ /// Begin a write transaction. The returned WriteCompletion can be used to
+ /// write into the region allocated for the transaction.
+ pub fn write(&mut self) -> Option<WriteCompletion<T>> {
+ self.write_many(1)
+ }
+
+ /// Begin a write transaction for multiple items. See `MessageQueue::write`.
+ pub fn write_many(&mut self, n: usize) -> Option<WriteCompletion<T>> {
+ let txn = self.begin_write(n)?;
+ Some(WriteCompletion { inner: txn, queue: self, n_elems: n, n_written: 0 })
+ }
+
+ fn commit_write(&mut self, txn: MemTransaction) -> bool {
+ // SAFETY: simply calls commitWrite with the txn length. The txn must
+ // only use its first MemRegion.
+ unsafe { self.inner.commitWrite(txn.first.length + txn.second.length) }
+ }
+
+ fn begin_write(&self, n: usize) -> Option<MemTransaction> {
+ let mut txn: MemTransaction = Default::default();
+ // SAFETY: we pass a raw pointer to txn, which is used only during the
+ // call to beginWrite to write the txn's MemRegion fields, which are raw
+ // pointers and lengths pointing into the queue. The pointer to txn is
+ // not stored.
+ unsafe { self.inner.beginWrite(n, addr_of_mut!(txn)) }.then_some(txn)
+ }
+}
+
+#[inline(always)]
+fn ptr<T: Share>(txn: &MemTransaction, idx: usize) -> *mut T {
+ let (base, region_idx) = if idx < txn.first.length {
+ (txn.first.address, idx)
+ } else {
+ (txn.second.address, idx - txn.first.length)
+ };
+ let byte_count: usize = region_idx.checked_mul(MessageQueue::<T>::type_size()).unwrap();
+ base.wrapping_byte_offset(byte_count.try_into().unwrap()) as *mut T
+}
+
+#[inline(always)]
+fn contiguous_count(txn: &MemTransaction, idx: usize, n_elems: usize) -> usize {
+ if idx > n_elems {
+ return 0;
+ }
+ let region_len = if idx < txn.first.length { txn.first.length } else { txn.second.length };
+ region_len - idx
+}
+
+/** A read completion from the MessageQueue::read() method.
+
+This completion mutably borrows the MessageQueue to prevent concurrent reads;
+these must be forbidden because the underlying AidlMessageQueue only stores the
+number of outstanding reads, not which have and have not completed, so they
+must complete in order. */
+#[must_use]
+pub struct ReadCompletion<'a, T: Share> {
+ inner: MemTransaction,
+ queue: &'a mut MessageQueue<T>,
+ n_elems: usize,
+ n_read: usize,
+}
+
+impl<'a, T: Share> ReadCompletion<'a, T> {
+ /// Obtain a pointer to the location at which the idx'th item is located.
+ ///
+ /// The returned pointer is only valid while `self` has not been dropped and
+ /// is invalidated by any call to `self.read`. The pointer should be used
+ /// with `std::ptr::read` or a DMA API before calling `assume_read` to
+ /// indicate how many elements were read.
+ ///
+ /// It is only permitted to access at most `contiguous_count(idx)` items
+ /// via offsets from the returned address.
+ ///
+ /// Calling this method with a greater `idx` may return a pointer to another
+ /// memory region of different size than the first.
+ pub fn ptr(&self, idx: usize) -> *mut T {
+ if idx >= self.n_elems {
+ panic!(
+ "indexing out of bound: ReadCompletion for {} elements but idx {} accessed",
+ self.n_elems, idx
+ )
+ }
+ ptr(&self.inner, idx)
+ }
+
+ /// Return the number of contiguous elements located starting at the given
+ /// index in the backing buffer corresponding to the given index.
+ ///
+ /// Intended for use with the `ptr` method.
+ ///
+ /// Returns 0 if `idx` is greater than or equal to the completion's element
+ /// count.
+ pub fn contiguous_count(&self, idx: usize) -> usize {
+ contiguous_count(&self.inner, idx, self.n_elems)
+ }
+
+ /// Returns how many elements still must be read from `self` before dropping
+ /// it.
+ pub fn unread_elements(&self) -> usize {
+ assert!(self.n_read <= self.n_elems);
+ self.n_elems - self.n_read
+ }
+
+ /// Read one item from the `self`. Fails and returns `()` if `self` is empty.
+ pub fn read(&mut self) -> Option<T> {
+ if self.unread_elements() > 0 {
+ // SAFETY: `self.ptr(self.n_read)`is known to be filled with a valid
+ // instance of type `T`.
+ let data = unsafe { self.ptr(self.n_read).read() };
+ self.n_read += 1;
+ Some(data)
+ } else {
+ None
+ }
+ }
+
+ /// Promise to the `ReadCompletion` that `n_newly_read` elements have
+ /// been read with unsafe code or DMA from the pointer returned by the
+ /// `ptr` method.
+ ///
+ /// Panics if `n_newly_read` exceeds the number of elements still unread.
+ ///
+ /// Calling this method without actually reading the elements will result
+ /// in them being leaked without destructors (if any) running.
+ pub fn assume_read(&mut self, n_newly_read: usize) {
+ assert!(n_newly_read < self.unread_elements());
+ self.n_read += n_newly_read;
+ }
+}
+
+impl<'a, T: Share> Drop for ReadCompletion<'a, T> {
+ fn drop(&mut self) {
+ if self.n_read < self.n_elems {
+ error!(
+ "ReadCompletion dropped without reading all elements ({}/{} read)",
+ self.n_read, self.n_elems
+ );
+ }
+ let txn = std::mem::take(&mut self.inner);
+ self.queue.commit_read(txn);
+ }
+}
+
+impl<T: Share> MessageQueue<T> {
+ /// Begin a read transaction. The returned `ReadCompletion` can be used to
+ /// write into the region allocated for the transaction.
+ pub fn read(&mut self) -> Option<ReadCompletion<T>> {
+ self.read_many(1)
+ }
+
+ /// Begin a read transaction for multiple items. See `MessageQueue::read`.
+ pub fn read_many(&mut self, n: usize) -> Option<ReadCompletion<T>> {
+ let txn = self.begin_read(n)?;
+ Some(ReadCompletion { inner: txn, queue: self, n_elems: n, n_read: 0 })
+ }
+
+ fn commit_read(&mut self, txn: MemTransaction) -> bool {
+ // SAFETY: simply calls commitRead with the txn length. The txn must
+ // only use its first MemRegion.
+ unsafe { self.inner.commitRead(txn.first.length + txn.second.length) }
+ }
+
+ fn begin_read(&self, n: usize) -> Option<MemTransaction> {
+ let mut txn: MemTransaction = Default::default();
+ // SAFETY: we pass a raw pointer to txn, which is used only during the
+ // call to beginRead to write the txn's MemRegion fields, which are raw
+ // pointers and lengths pointing into the queue. The pointer to txn is
+ // not stored.
+ unsafe { self.inner.beginRead(n, addr_of_mut!(txn)) }.then_some(txn)
+ }
+}
diff --git a/tests/Android.bp b/tests/Android.bp
index 55eb1c7..64e4ba5 100644
--- a/tests/Android.bp
+++ b/tests/Android.bp
@@ -23,16 +23,55 @@
main: "fmq_test.py",
srcs: ["fmq_test.py"],
test_config: "fmq_test.xml",
- target_required: [
+ data_device_bins_both: [
"android.hardware.tests.msgq@1.0-service-test",
+ "android.hardware.tests.msgq@1.0-rust-service-test",
"fmq_test_client",
+ "fmq_rust_test_client",
],
- test_suites: ["general-tests", "vts"],
+ test_suites: [
+ "general-tests",
+ "vts",
+ ],
test_options: {
unit_test: false,
},
}
+rust_test {
+ name: "fmq_rust_test_client",
+ team: "trendy_team_android_kernel",
+ srcs: ["msgq_rust_test_client.rs"],
+ crate_name: "fmq_rust_test_client",
+ rustlibs: [
+ "android.fmq.test-rust",
+ "android.hardware.common.fmq-V1-rust",
+ "libbinder_rs",
+ "libfmq_rust",
+ "liblibc",
+ ],
+ shared_libs: [
+ "libutils",
+ "libcutils",
+ "libbase",
+ "libfmq",
+ ],
+ compile_multilib: "both",
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
+ test_suites: [
+ "general-tests",
+ "vts",
+ ],
+ auto_gen_config: false,
+}
+
cc_test {
name: "fmq_test_client",
tidy_timeout_srcs: ["msgq_test_client.cpp"],
@@ -67,7 +106,7 @@
],
whole_static_libs: [
"android.hardware.tests.msgq@1.0-impl",
- "android.fmq.test-impl"
+ "android.fmq.test-impl",
],
compile_multilib: "both",
@@ -79,12 +118,37 @@
suffix: "64",
},
},
- test_suites: ["general-tests", "vts"],
+ test_suites: [
+ "general-tests",
+ "vts",
+ ],
auto_gen_config: false,
}
+// C++-usable FFI library so we can test the Rust wrapper against C++ clients
+rust_ffi_static {
+ name: "libfmq_rust_test",
+ host_supported: true,
+ crate_name: "fmq_rust_test",
+ srcs: ["fmq_test.rs"],
+ edition: "2021",
+ shared_libs: [
+ "libbase",
+ "liblog",
+ "libcutils",
+ "libfmq",
+ "android.hardware.common.fmq-V1-ndk",
+ ],
+ rustlibs: [
+ "libfmq_rust",
+ "android.hardware.common.fmq-V1-rust",
+ ],
+ proc_macros: [],
+}
+
cc_test {
name: "fmq_unit_tests",
+ host_supported: true,
tidy_timeout_srcs: ["fmq_unit_tests.cpp"],
srcs: ["fmq_unit_tests.cpp"],
@@ -98,6 +162,7 @@
],
static_libs: [
"android.hardware.common.fmq-V1-ndk",
+ "libfmq_rust_test",
],
cflags: [
diff --git a/tests/aidl/Android.bp b/tests/aidl/Android.bp
index e210fda..7345d3b 100644
--- a/tests/aidl/Android.bp
+++ b/tests/aidl/Android.bp
@@ -24,6 +24,9 @@
},
cpp: {
enabled: false,
- }
+ },
+ rust: {
+ enabled: true,
+ },
},
}
diff --git a/tests/aidl/default/Android.bp b/tests/aidl/default/Android.bp
index 35bd043..7e1cc5d 100644
--- a/tests/aidl/default/Android.bp
+++ b/tests/aidl/default/Android.bp
@@ -17,3 +17,60 @@
"TestAidlMsgQ.cpp",
],
}
+
+rust_library {
+ edition: "2021",
+ name: "libfmq_test_service_rust_impl",
+ srcs: ["TestAidlMsgQ.rs"],
+
+ rustlibs: [
+ "libfmq_rust",
+ ],
+ shared_libs: [
+ "libbase",
+ "libcutils",
+ "libutils",
+ ],
+ rlibs: [
+ "android.hardware.common.fmq-V1-rust",
+ "android.fmq.test-rust",
+ ],
+
+ crate_name: "fmq_test_service_rust_impl",
+ host_supported: true,
+ compile_multilib: "both",
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
+}
+
+rust_binary {
+ name: "android.hardware.tests.msgq@1.0-rust-service-test",
+ team: "trendy_team_android_kernel",
+ srcs: ["mq_test_service.rs"],
+ edition: "2021",
+ prefer_rlib: true,
+
+ rustlibs: [
+ "libfmq_test_service_rust_impl",
+ "android.fmq.test-rust",
+ "android.hardware.common.fmq-V1-rust",
+ "liblog_rust",
+ ],
+
+ host_supported: true,
+ compile_multilib: "both",
+ multilib: {
+ lib32: {
+ suffix: "32",
+ },
+ lib64: {
+ suffix: "64",
+ },
+ },
+}
diff --git a/tests/aidl/default/TestAidlMsgQ.cpp b/tests/aidl/default/TestAidlMsgQ.cpp
index 485e75b..d230993 100644
--- a/tests/aidl/default/TestAidlMsgQ.cpp
+++ b/tests/aidl/default/TestAidlMsgQ.cpp
@@ -60,7 +60,14 @@
*_aidl_return = false;
} else {
*mqDesc = std::move(mFmqUnsynchronized->dupeDesc());
- *_aidl_return = true;
+ // set write-protection so readers can't mmap and write
+ int res = ashmem_set_prot_region(mqDesc->handle.fds[0].get(), PROT_READ);
+ if (res == -1) {
+ ALOGE("Failed to set write protection: %s", strerror(errno));
+ *_aidl_return = false;
+ } else {
+ *_aidl_return = true;
+ }
}
return ndk::ScopedAStatus::ok();
diff --git a/tests/aidl/default/TestAidlMsgQ.rs b/tests/aidl/default/TestAidlMsgQ.rs
new file mode 100644
index 0000000..cbfcead
--- /dev/null
+++ b/tests/aidl/default/TestAidlMsgQ.rs
@@ -0,0 +1,149 @@
+//! This module implements the ITestAidlMsgQ AIDL interface
+
+/*
+* Copyright (C) 2024 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+use android_fmq_test::aidl::android::fmq::test::ITestAidlMsgQ::ITestAidlMsgQ;
+use android_fmq_test::binder::{self, Interface, Result as BinderResult};
+
+/// Struct implementing the ITestAidlMsgQ AIDL interface
+#[derive(Default)]
+pub struct MsgQTestService {
+ queue_sync: std::sync::Mutex<Option<fmq::MessageQueue<i32>>>,
+}
+
+impl Interface for MsgQTestService {}
+
+use android_hardware_common_fmq::aidl::android::hardware::common::fmq::{
+ MQDescriptor::MQDescriptor, SynchronizedReadWrite::SynchronizedReadWrite,
+ UnsynchronizedWrite::UnsynchronizedWrite,
+};
+
+impl ITestAidlMsgQ for MsgQTestService {
+ /**
+ * This method requests the service to set up a synchronous read/write
+ * wait-free FMQ using the input descriptor with the client as reader.
+ *
+ * @param mqDesc This structure describes the FMQ that was set up by the
+ * client. Server uses this descriptor to set up a FMQ object at its end.
+ *
+ * @return True if the setup is successful.
+ */
+ fn configureFmqSyncReadWrite(
+ &self,
+ mq_desc: &MQDescriptor<i32, SynchronizedReadWrite>,
+ ) -> BinderResult<bool> {
+ *self.queue_sync.lock().unwrap() = Some(fmq::MessageQueue::from_desc(mq_desc, false));
+ /* TODO(b/339999649) in C++ we set the EventFlag word with bit FMQ_NOT_FULL: */
+ /*auto evFlagWordPtr = mFmqSynchronized->getEventFlagWord();
+ if (evFlagWordPtr != nullptr) {
+ std::atomic_init(evFlagWordPtr, static_cast<uint32_t>(EventFlagBits::FMQ_NOT_FULL));
+ }*/
+
+ Ok(true)
+ }
+
+ /**
+ * This method requests the service to read from the synchronized read/write
+ * FMQ.
+ *
+ * @param count Number to messages to read.
+ *
+ * @return True if the read operation was successful.
+ */
+ fn requestReadFmqSync(&self, count: i32) -> BinderResult<bool> {
+ let mut queue_guard = self.queue_sync.lock().unwrap();
+ let Some(ref mut mq) = *queue_guard else {
+ return Err(binder::Status::new_service_specific_error_str(107, Some("no fmq set up")));
+ };
+ let rc = mq.read_many(count.try_into().unwrap());
+ match rc {
+ Some(mut rc) => {
+ for _ in 0..count {
+ rc.read().unwrap();
+ }
+ Ok(true)
+ }
+ None => {
+ eprintln!("failed to read_many({count})");
+ Ok(false)
+ }
+ }
+ }
+
+ /**
+ * This method requests the service to write into the synchronized read/write
+ * flavor of the FMQ.
+ *
+ * @param count Number to messages to write.
+ *
+ * @return True if the write operation was successful.
+ */
+ fn requestWriteFmqSync(&self, count: i32) -> BinderResult<bool> {
+ let mut queue_guard = self.queue_sync.lock().unwrap();
+ let Some(ref mut mq) = *queue_guard else {
+ return Err(binder::Status::new_service_specific_error_str(107, Some("no fmq set up")));
+ };
+ let wc = mq.write_many(count.try_into().unwrap());
+ match wc {
+ Some(mut wc) => {
+ for i in 0..count {
+ wc.write(i).unwrap();
+ }
+ drop(wc);
+ Ok(true)
+ }
+ None => {
+ eprintln!("failed to write_many({count})");
+ Ok(false)
+ }
+ }
+ }
+
+ fn getFmqUnsyncWrite(
+ &self,
+ _: bool,
+ _: bool,
+ _: &mut MQDescriptor<i32, UnsynchronizedWrite>,
+ ) -> BinderResult<bool> {
+ // The Rust interface to FMQ does not support `UnsynchronizedWrite`.
+ Ok(false)
+ }
+
+ /**
+ * This method requests the service to trigger a blocking read.
+ *
+ * @param count Number of messages to read.
+ *
+ */
+ fn requestBlockingRead(&self, _: i32) -> BinderResult<()> {
+ todo!("b/339999649")
+ }
+ fn requestBlockingReadDefaultEventFlagBits(&self, _: i32) -> BinderResult<()> {
+ todo!("b/339999649")
+ }
+ fn requestBlockingReadRepeat(&self, _: i32, _: i32) -> BinderResult<()> {
+ todo!("b/339999649")
+ }
+ fn requestReadFmqUnsync(&self, _: i32) -> BinderResult<bool> {
+ // The Rust interface to FMQ does not support `UnsynchronizedWrite`.
+ Ok(false)
+ }
+ fn requestWriteFmqUnsync(&self, _: i32) -> BinderResult<bool> {
+ // The Rust interface to FMQ does not support `UnsynchronizedWrite`.
+ Ok(false)
+ }
+}
diff --git a/tests/aidl/default/mq_test_service.rs b/tests/aidl/default/mq_test_service.rs
new file mode 100644
index 0000000..fe53f5c
--- /dev/null
+++ b/tests/aidl/default/mq_test_service.rs
@@ -0,0 +1,41 @@
+//! Test service implementation for FMQ.
+
+/*
+* Copyright (C) 2024 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+use android_fmq_test::aidl::android::fmq::test::ITestAidlMsgQ::BnTestAidlMsgQ;
+
+use fmq_test_service_rust_impl::MsgQTestService;
+
+use android_fmq_test::binder;
+
+fn main() {
+ binder::ProcessState::start_thread_pool();
+
+ let service: MsgQTestService = Default::default();
+
+ const SERVICE_IDENTIFIER: &str = "android.fmq.test.ITestAidlMsgQ/default";
+ log::info!("instance: {SERVICE_IDENTIFIER}");
+
+ // Register AIDL service
+ let test_service_binder =
+ BnTestAidlMsgQ::new_binder(service, binder::BinderFeatures::default());
+ binder::add_service(SERVICE_IDENTIFIER, test_service_binder.as_binder())
+ .expect("Failed to register service");
+ binder::ProcessState::join_thread_pool();
+
+ std::process::exit(1); // Should not be reached
+}
diff --git a/tests/fmq_test.py b/tests/fmq_test.py
index 68114ad..ce5de63 100644
--- a/tests/fmq_test.py
+++ b/tests/fmq_test.py
@@ -29,13 +29,16 @@
class TestFmq(unittest.TestCase):
pass
-def make_test(client, server):
+def make_test(client, server, client_filter=None):
def test(self):
try:
run_cmd("adb shell killall %s >/dev/null 2>&1" % client, ignore_error=True)
run_cmd("adb shell killall %s >/dev/null 2>&1" % server, ignore_error=True)
run_cmd("adb shell \"( %s ) </dev/null >/dev/null 2>&1 &\"" % server)
- run_cmd("adb shell %s" % client)
+ client_cmd = client
+ if client_filter:
+ client_cmd += " --gtest_filter=" + client_filter
+ run_cmd("adb shell %s" % client_cmd)
finally:
run_cmd("adb shell killall %s >/dev/null 2>&1" % client, ignore_error=True)
run_cmd("adb shell killall %s >/dev/null 2>&1" % server, ignore_error=True)
@@ -48,26 +51,46 @@
clients = []
servers = []
+ def add_clients_and_servers(clients: list[str], servers: list[str], base: str):
+ clients += [
+ base + "/fmq_test_client/fmq_test_client",
+ base + "/fmq_rust_test_client/fmq_rust_test_client",
+ ]
+ servers += [base + "/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test"]
+ servers += [base + "/android.hardware.tests.msgq@1.0-rust-service-test/android.hardware.tests.msgq@1.0-rust-service-test"]
+
if has_bitness(32):
- clients += ["/data/nativetest/fmq_test_client/fmq_test_client"]
- servers += ["/data/nativetest/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test"]
+ add_clients_and_servers(clients, servers, "/data/nativetest")
if has_bitness(64):
- clients += ["/data/nativetest64/fmq_test_client/fmq_test_client"]
- servers += ["/data/nativetest64/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test"]
+ add_clients_and_servers(clients, servers, "/data/nativetest64")
assert len(clients) > 0
assert len(servers) > 0
- def short_name(binary):
- if "64" in binary:
+ def bitness(binary_path: str) -> str:
+ if "64" in binary_path:
return "64"
return "32"
+ def short_name(binary_path: str) -> str:
+ base = "rust" if "rust" in binary_path else ""
+ return base + bitness(binary_path)
+
for client in clients:
for server in servers:
test_name = 'test_%s_to_%s' % (short_name(client), short_name(server))
- test = make_test(client, server)
+ # Tests in the C++ test client that are fully supported by the Rust test server
+ rust_tests = ":".join([
+ # Only run AIDL tests 0 and 2, not HIDL tests 1 and 3
+ "SynchronizedReadWriteClient/0.*",
+ "SynchronizedReadWriteClient/2.*",
+ # Skip blocking tests until the Rust FMQ interface supports them: TODO(b/339999649)
+ "-*Blocking*",
+ ])
+ # Enable subset of tests if testing C++ client against the rust server
+ gtest_filter = rust_tests if "rust" in server and not "rust" in client else None
+ test = make_test(client, server, gtest_filter)
setattr(TestFmq, test_name, test)
suite = unittest.TestLoader().loadTestsFromTestCase(TestFmq)
diff --git a/tests/fmq_test.rs b/tests/fmq_test.rs
new file mode 100644
index 0000000..149e3a6
--- /dev/null
+++ b/tests/fmq_test.rs
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Crate to wrap tests of libfmq rust bindings with a trivial C-ABI interface
+//! to test them from C++.
+
+use fmq::MessageQueue;
+
+macro_rules! assert_return {
+ ($e: expr) => {
+ if !$e {
+ eprintln!(stringify!($e));
+ return false;
+ }
+ };
+ ($e: expr, $msg: expr) => {
+ if !$e {
+ eprintln!($msg);
+ return false;
+ }
+ };
+}
+
+fn test_body() -> bool {
+ let mut mq = MessageQueue::<u8>::new(500, false);
+
+ match mq.write_many(4) {
+ Some(mut wc) => {
+ wc.write(200).unwrap();
+ wc.write(201).unwrap();
+ wc.write(202).unwrap();
+ wc.write(203).unwrap();
+ }
+ None => {
+ eprintln!("failed to write_many(4)");
+ return false;
+ }
+ };
+
+ let desc = mq.dupe_desc();
+ let join_handle = std::thread::spawn(move || {
+ let mut mq2 = MessageQueue::from_desc(&desc, false);
+ match mq2.read_many(1) {
+ Some(mut rc) => {
+ assert_return!(rc.read() == Some(200));
+ }
+ None => {
+ eprintln!("failed to read_many(1)");
+ return false;
+ }
+ };
+ true
+ });
+
+ assert_return!(join_handle.join().ok() == Some(true));
+
+ match mq.read_many(3) {
+ Some(mut rc) => {
+ assert_return!(rc.read() == Some(201));
+ assert_return!(rc.read() == Some(202));
+ assert_return!(rc.read() == Some(203));
+ drop(rc);
+ }
+ None => {
+ eprintln!("failed to read_many(4)");
+ return false;
+ }
+ };
+
+ true
+}
+
+/// Test fmq from Rust. Returns 0 on failure, 1 on success.
+#[no_mangle]
+pub extern "C" fn fmq_rust_test() -> u8 {
+ test_body() as u8
+}
diff --git a/tests/fmq_test.xml b/tests/fmq_test.xml
index f7b1f16..fa9841a 100644
--- a/tests/fmq_test.xml
+++ b/tests/fmq_test.xml
@@ -25,9 +25,13 @@
<option name="push" value="fmq_test_client32->/data/nativetest/fmq_test_client/fmq_test_client" />
<option name="push" value="fmq_test_client64->/data/nativetest64/fmq_test_client/fmq_test_client" />
+ <option name="push" value="fmq_rust_test_client32->/data/nativetest/fmq_rust_test_client/fmq_rust_test_client" />
+ <option name="push" value="fmq_rust_test_client64->/data/nativetest64/fmq_rust_test_client/fmq_rust_test_client" />
<option name="push" value="android.hardware.tests.msgq@1.0-service-test32->/data/nativetest/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test" />
<option name="push" value="android.hardware.tests.msgq@1.0-service-test64->/data/nativetest64/android.hardware.tests.msgq@1.0-service-test/android.hardware.tests.msgq@1.0-service-test" />
+ <option name="push" value="android.hardware.tests.msgq@1.0-rust-service-test32->/data/nativetest/android.hardware.tests.msgq@1.0-rust-service-test/android.hardware.tests.msgq@1.0-rust-service-test" />
+ <option name="push" value="android.hardware.tests.msgq@1.0-rust-service-test64->/data/nativetest64/android.hardware.tests.msgq@1.0-rust-service-test/android.hardware.tests.msgq@1.0-rust-service-test" />
<option name="cleanup" value="true" />
</target_preparer>
diff --git a/tests/fmq_unit_tests.cpp b/tests/fmq_unit_tests.cpp
index 33972b8..e1c962e 100644
--- a/tests/fmq_unit_tests.cpp
+++ b/tests/fmq_unit_tests.cpp
@@ -24,6 +24,7 @@
#include <sys/resource.h>
#include <atomic>
#include <cstdlib>
+#include <filesystem>
#include <sstream>
#include <thread>
@@ -412,19 +413,26 @@
close(ashmemFd);
}
-// If this test fails and we do leak FDs, the next test will cause a crash
+long numFds() {
+ return std::distance(std::filesystem::directory_iterator("/proc/self/fd"),
+ std::filesystem::directory_iterator{});
+}
+
TEST_F(AidlOnlyBadQueueConfig, LookForLeakedFds) {
- size_t numElementsInQueue = SIZE_MAX / sizeof(uint32_t) - PAGE_SIZE - 1;
- struct rlimit rlim;
- ASSERT_EQ(getrlimit(RLIMIT_NOFILE, &rlim), 0);
- for (int i = 0; i <= rlim.rlim_cur + 1; i++) {
+ // create/destroy a large number of queues that if we were leaking FDs
+ // we could detect it by looking at the number of FDs opened by the this
+ // test process.
+ constexpr uint32_t kNumQueues = 100;
+ const size_t kPageSize = getpagesize();
+ size_t numElementsInQueue = SIZE_MAX / sizeof(uint32_t) - kPageSize - 1;
+ long numFdsBefore = numFds();
+ for (int i = 0; i < kNumQueues; i++) {
android::AidlMessageQueue<uint32_t, SynchronizedReadWrite> fmq(numElementsInQueue);
ASSERT_FALSE(fmq.isValid());
}
- // try to get another FD
- int fd = ashmem_create_region("test", 100);
- ASSERT_NE(fd, -1);
- close(fd);
+ long numFdsAfter = numFds();
+ EXPECT_LT(numFdsAfter, kNumQueues);
+ EXPECT_EQ(numFdsAfter, numFdsBefore);
}
TEST_F(Hidl2AidlOperation, ConvertDescriptorsSync) {
@@ -732,6 +740,78 @@
}
/*
+ * Test EventFlag wait on a waked flag with a short timeout.
+ */
+TYPED_TEST(BlockingReadWrites, ShortEventFlagWaitWithWakeTest) {
+ std::atomic<uint32_t> eventFlagWord;
+ std::atomic_init(&eventFlagWord, static_cast<uint32_t>(kFmqNotFull));
+ android::hardware::EventFlag* efGroup = nullptr;
+ android::status_t status =
+ android::hardware::EventFlag::createEventFlag(&eventFlagWord, &efGroup);
+ ASSERT_EQ(android::NO_ERROR, status);
+ ASSERT_NE(nullptr, efGroup);
+
+ status = efGroup->wake(kFmqNotEmpty);
+ ASSERT_EQ(android::NO_ERROR, status);
+
+ uint32_t efState = 0;
+ android::status_t ret = efGroup->wait(kFmqNotEmpty, &efState, 1 /* ns */, true /* retry */);
+ ASSERT_EQ(android::NO_ERROR, ret);
+
+ status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
+ ASSERT_EQ(android::NO_ERROR, status);
+}
+
+/*
+ * Test on an EventFlag with no wakeup, short timeout.
+ */
+TYPED_TEST(BlockingReadWrites, ShortEventFlagWaitWithoutWakeTest) {
+ std::atomic<uint32_t> eventFlagWord;
+ std::atomic_init(&eventFlagWord, static_cast<uint32_t>(kFmqNotFull));
+ android::hardware::EventFlag* efGroup = nullptr;
+ android::status_t status =
+ android::hardware::EventFlag::createEventFlag(&eventFlagWord, &efGroup);
+ ASSERT_EQ(android::NO_ERROR, status);
+ ASSERT_NE(nullptr, efGroup);
+
+ uint32_t efState = 0;
+ android::status_t ret = efGroup->wait(kFmqNotEmpty, &efState, 1 /* ns */, true /* retry */);
+ ASSERT_EQ(android::TIMED_OUT, ret);
+
+ status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
+ ASSERT_EQ(android::NO_ERROR, status);
+}
+
+/*
+ * Test FMQ write and read with event flag wait.
+ */
+TYPED_TEST(BlockingReadWrites, FmqWriteAndReadWithShortEventFlagWaitTest) {
+ android::hardware::EventFlag* efGroup = nullptr;
+ android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
+ ASSERT_EQ(android::NO_ERROR, status);
+ ASSERT_NE(nullptr, efGroup);
+
+ /*
+ * After waiting for some time write into the FMQ
+ * and call Wake on kFmqNotEmpty.
+ */
+ const size_t dataLen = 16;
+ uint8_t dataW[dataLen] = {0};
+ uint8_t dataR[dataLen] = {0};
+ ASSERT_TRUE(this->mQueue->write(dataW, dataLen));
+ status = efGroup->wake(kFmqNotEmpty);
+ ASSERT_EQ(android::NO_ERROR, status);
+
+ ASSERT_TRUE(this->mQueue->readBlocking(dataR, dataLen, static_cast<uint32_t>(kFmqNotEmpty),
+ static_cast<uint32_t>(kFmqNotFull), 1 /* timeOutNanos */,
+ efGroup));
+ ASSERT_EQ(0, memcmp(dataW, dataR, dataLen));
+
+ status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
+ ASSERT_EQ(android::NO_ERROR, status);
+}
+
+/*
* Test that odd queue sizes do not cause unaligned error
* on access to EventFlag object.
*/
@@ -1239,3 +1319,21 @@
ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
ASSERT_EQ(data, readData);
}
+
+/*
+ * Ensure that the template specialization of MessageQueueBase to element types
+ * other than MQErased exposes its static knowledge of element size.
+ */
+TEST(MessageQueueErasedTest, MQErasedCompiles) {
+ auto txn = AidlMessageQueueSync::MemRegion();
+ txn.getLengthInBytes();
+}
+
+extern "C" uint8_t fmq_rust_test(void);
+
+/*
+ * Test using the FMQ from Rust.
+ */
+TEST(RustInteropTest, Simple) {
+ ASSERT_EQ(fmq_rust_test(), 1);
+}
diff --git a/tests/msgq_rust_test_client.rs b/tests/msgq_rust_test_client.rs
new file mode 100644
index 0000000..bad57a7
--- /dev/null
+++ b/tests/msgq_rust_test_client.rs
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+use android_fmq_test::aidl::android::fmq::test::ITestAidlMsgQ::ITestAidlMsgQ;
+use binder::Strong;
+use fmq::MessageQueue;
+
+fn wait_get_test_service() -> Result<Strong<dyn ITestAidlMsgQ>, String> {
+ const SERVICE_IDENTIFIER: &str = "android.fmq.test.ITestAidlMsgQ/default";
+ let service = binder::get_interface::<dyn ITestAidlMsgQ>(SERVICE_IDENTIFIER)
+ .map_err(|e| format!("Failed to connect to service {SERVICE_IDENTIFIER}: {e}"))?;
+ Ok(service)
+}
+
+fn setup_test_service() -> (MessageQueue<i32>, Strong<dyn ITestAidlMsgQ>) {
+ let service = wait_get_test_service().expect("failed to obtain test service");
+
+ /* SAFETY: `sysconf` simply returns an integer. */
+ let page_size: usize = unsafe { libc::sysconf(libc::_SC_PAGESIZE) }.try_into().unwrap();
+ let num_elements_in_sync_queue: usize = (page_size - 16) / std::mem::size_of::<i32>();
+
+ /* Create a queue on the client side. */
+ let mut mq = MessageQueue::<i32>::new(
+ num_elements_in_sync_queue,
+ true, /* configure event flag word */
+ );
+ let desc = mq.dupe_desc();
+
+ let result = service.configureFmqSyncReadWrite(&desc);
+ assert!(result.is_ok(), "configuring event queue failed");
+
+ (mq, service)
+}
+
+mod synchronized_read_write_client {
+ use super::*;
+
+ /*
+ * Write a small number of messages to FMQ. Request
+ * mService to read and verify that the write was successful.
+ */
+ #[test]
+ fn small_input_writer_test() {
+ let (mut mq, service) = setup_test_service();
+ const DATA_LEN: usize = 16;
+
+ let data: [i32; DATA_LEN] = init_data();
+ let mut wc = mq.write_many(DATA_LEN).expect("write_many(DATA_LEN) failed");
+ for x in data {
+ wc.write(x).expect("writing i32 failed");
+ }
+ drop(wc);
+ let ret = service.requestReadFmqSync(DATA_LEN as _);
+ assert!(ret.is_ok());
+ }
+}
+
+fn init_data<const N: usize>() -> [i32; N] {
+ let mut data = [0; N];
+ for (i, elem) in data.iter_mut().enumerate() {
+ *elem = i as _;
+ }
+ data
+}
diff --git a/tests/msgq_test_client.cpp b/tests/msgq_test_client.cpp
index 1ff9d50..53a971e 100644
--- a/tests/msgq_test_client.cpp
+++ b/tests/msgq_test_client.cpp
@@ -44,6 +44,7 @@
using android::hardware::tests::msgq::V1_0::ITestMsgQ;
// libhidl
+using android::hardware::isHidlSupported;
using android::hardware::kSynchronizedReadWrite;
using android::hardware::kUnsynchronizedWrite;
using android::hardware::MessageQueue;
@@ -61,7 +62,8 @@
typedef android::hardware::MessageQueue<int32_t, kSynchronizedReadWrite> MessageQueueSync;
typedef android::hardware::MessageQueue<int32_t, kUnsynchronizedWrite> MessageQueueUnsync;
static const std::string kServiceName = "BnTestAidlMsgQ";
-static constexpr size_t kNumElementsInSyncQueue = (PAGE_SIZE - 16) / sizeof(int32_t);
+static const size_t kPageSize = getpagesize();
+static const size_t kNumElementsInSyncQueue = (kPageSize - 16) / sizeof(int32_t);
enum class SetupType {
SINGLE_FD,
@@ -97,6 +99,7 @@
static std::shared_ptr<ITestAidlMsgQ> waitGetTestService() {
const std::string instance = std::string() + ITestAidlMsgQ::descriptor + "/default";
ndk::SpAIBinder binder(AServiceManager_getService(instance.c_str()));
+ CHECK(nullptr != binder);
return ITestAidlMsgQ::fromBinder(binder);
}
bool configureFmqSyncReadWrite(AidlMessageQueueSync* mq) {
@@ -123,11 +126,17 @@
class ClientSyncTestBase<MessageQueueSync> : public ::testing::Test {
protected:
static sp<ITestMsgQ> waitGetTestService() {
- android::hardware::details::setTrebleTestingOverride(true);
- // waitForHwService is required because ITestMsgQ is not in manifest.xml.
- // "Real" HALs shouldn't be doing this.
- waitForHwService(ITestMsgQ::descriptor, "default");
- return ITestMsgQ::getService();
+ if (isHidlSupported()) {
+ android::hardware::details::setTrebleTestingOverride(true);
+ // waitForHwService is required because ITestMsgQ is not in manifest.xml.
+ // "Real" HALs shouldn't be doing this.
+ waitForHwService(ITestMsgQ::descriptor, "default");
+ sp<ITestMsgQ> service = ITestMsgQ::getService();
+ CHECK(nullptr != service);
+ return service;
+ } else {
+ return nullptr;
+ }
}
bool configureFmqSyncReadWrite(MessageQueueSync* mq) {
auto ret = mService->configureFmqSyncReadWrite(*mq->getDesc());
@@ -155,6 +164,7 @@
static std::shared_ptr<ITestAidlMsgQ> waitGetTestService() {
const std::string instance = std::string() + ITestAidlMsgQ::descriptor + "/default";
ndk::SpAIBinder binder(AServiceManager_getService(instance.c_str()));
+ CHECK(nullptr != binder);
return ITestAidlMsgQ::fromBinder(binder);
}
bool getFmqUnsyncWrite(bool configureFmq, bool userFd, std::shared_ptr<ITestAidlMsgQ> service,
@@ -162,7 +172,7 @@
bool result = false;
aidl::android::hardware::common::fmq::MQDescriptor<int32_t, UnsynchronizedWrite> desc;
auto ret = service->getFmqUnsyncWrite(configureFmq, userFd, &desc, &result);
- *queue = new (std::nothrow) AidlMessageQueueUnsync(desc);
+ *queue = new (std::nothrow) AidlMessageQueueUnsync(desc, false);
return result && ret.isOk();
}
@@ -186,7 +196,7 @@
}
AidlMessageQueueUnsync* newQueue() {
if (mQueue->isValid())
- return new (std::nothrow) AidlMessageQueueUnsync(mQueue->dupeDesc());
+ return new (std::nothrow) AidlMessageQueueUnsync(mQueue->dupeDesc(), false);
else
return nullptr;
}
@@ -200,11 +210,17 @@
class ClientUnsyncTestBase<MessageQueueUnsync> : public ::testing::Test {
protected:
static sp<ITestMsgQ> waitGetTestService() {
- android::hardware::details::setTrebleTestingOverride(true);
- // waitForHwService is required because ITestMsgQ is not in manifest.xml.
- // "Real" HALs shouldn't be doing this.
- waitForHwService(ITestMsgQ::descriptor, "default");
- return ITestMsgQ::getService();
+ if (isHidlSupported()) {
+ android::hardware::details::setTrebleTestingOverride(true);
+ // waitForHwService is required because ITestMsgQ is not in manifest.xml.
+ // "Real" HALs shouldn't be doing this.
+ waitForHwService(ITestMsgQ::descriptor, "default");
+ sp<ITestMsgQ> service = ITestMsgQ::getService();
+ CHECK(nullptr != service);
+ return service;
+ } else {
+ return nullptr;
+ }
}
bool getFmqUnsyncWrite(bool configureFmq, bool userFd, sp<ITestMsgQ> service,
MessageQueueUnsync** queue) {
@@ -214,7 +230,7 @@
service->getFmqUnsyncWrite(configureFmq, userFd,
[queue](bool ret, const MQDescriptorUnsync<int32_t>& in) {
ASSERT_TRUE(ret);
- *queue = new (std::nothrow) MessageQueueUnsync(in);
+ *queue = new (std::nothrow) MessageQueueUnsync(in, false);
});
return true;
}
@@ -236,7 +252,7 @@
}
MessageQueueUnsync* newQueue() {
- return new (std::nothrow) MessageQueueUnsync(*mQueue->getDesc());
+ return new (std::nothrow) MessageQueueUnsync(*mQueue->getDesc(), false);
}
sp<ITestMsgQ> mService;
@@ -257,7 +273,7 @@
virtual void SetUp() {
this->mService = this->waitGetTestService();
- ASSERT_NE(this->mService, nullptr);
+ if (this->mService == nullptr) GTEST_SKIP() << "HIDL is not supported";
ASSERT_TRUE(this->mService->isRemote());
static constexpr size_t kSyncElementSizeBytes = sizeof(int32_t);
android::base::unique_fd ringbufferFd;
@@ -288,7 +304,7 @@
virtual void SetUp() {
this->mService = this->waitGetTestService();
- ASSERT_NE(this->mService, nullptr);
+ if (this->mService == nullptr) GTEST_SKIP() << "HIDL is not supported";
ASSERT_TRUE(this->mService->isRemote());
this->getFmqUnsyncWrite(true, false, this->mService, &this->mQueue);
ASSERT_NE(nullptr, this->mQueue);
@@ -425,7 +441,7 @@
ASSERT_TRUE(ret);
}
{
- std::array<int32_t, kNumElementsInSyncQueue> data = {0};
+ std::vector<int32_t> data(kNumElementsInSyncQueue, 0);
ret = this->mQueue->writeBlocking(
data.data(), data.size(),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
@@ -464,7 +480,7 @@
* kNumElementsInSyncQueue will succeed.
*/
{
- std::array<int32_t, kNumElementsInSyncQueue> data = {0};
+ std::vector<int32_t> data(kNumElementsInSyncQueue, 0);
ret = this->mQueue->writeBlocking(data.data(), data.size(), 5000000000 /* timeOutNanos */);
ASSERT_TRUE(ret);
}
@@ -505,7 +521,7 @@
* blocking write should be successful.
*/
{
- std::array<int32_t, kNumElementsInSyncQueue> data = {0};
+ std::vector<int32_t> data(kNumElementsInSyncQueue, 0);
ret = this->mQueue->writeBlocking(
data.data(), data.size(),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
@@ -551,7 +567,7 @@
* blocking write should be successful.
*/
{
- std::array<int32_t, kNumElementsInSyncQueue> data = {0};
+ std::vector<int32_t> data(kNumElementsInSyncQueue, 0);
ret = this->mQueue->writeBlocking(
data.data(), data.size(),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
@@ -596,7 +612,7 @@
* blocking write should be successful.
*/
{
- std::array<int32_t, kNumElementsInSyncQueue> data = {0};
+ std::vector<int32_t> data(kNumElementsInSyncQueue, 0);
ret = this->mQueue->writeBlocking(
data.data(), data.size(),
static_cast<uint32_t>(ITestMsgQ::EventFlagBits::FMQ_NOT_FULL),
@@ -789,7 +805,7 @@
* to read and verify the messages in the FMQ.
*/
TYPED_TEST(SynchronizedReadWriteClient, WriteWhenFull) {
- std::array<int32_t, kNumElementsInSyncQueue> data = {0};
+ std::vector<int32_t> data(kNumElementsInSyncQueue, 0);
initData(data.data(), data.size());
ASSERT_TRUE(this->mQueue->write(data.data(), data.size()));
ASSERT_EQ(0UL, this->mQueue->availableToWrite());
@@ -836,7 +852,7 @@
*/
TYPED_TEST(SynchronizedReadWriteClient, LargeInputTest3) {
- std::array<int32_t, kNumElementsInSyncQueue> data = {0};
+ std::vector<int32_t> data(kNumElementsInSyncQueue, 0);
initData(data.data(), data.size());
ASSERT_TRUE(this->mQueue->write(data.data(), data.size()));
ASSERT_EQ(0UL, this->mQueue->availableToWrite());
@@ -894,7 +910,7 @@
*/
TYPED_TEST(SynchronizedReadWriteClient, ReadWriteWrapAround) {
size_t numMessages = kNumElementsInSyncQueue / 2;
- std::array<int32_t, kNumElementsInSyncQueue> data = {0};
+ std::vector<int32_t> data(kNumElementsInSyncQueue, 0);
initData(data.data(), data.size());
ASSERT_TRUE(this->mQueue->write(&data[0], numMessages));
bool ret = this->requestReadFmqSync(numMessages);
@@ -914,7 +930,7 @@
*/
TYPED_TEST(SynchronizedReadWriteClient, ReadWriteWrapAround2) {
size_t numMessages = kNumElementsInSyncQueue / 2;
- std::array<int32_t, kNumElementsInSyncQueue> data = {0};
+ std::vector<int32_t> data(kNumElementsInSyncQueue, 0);
initData(data.data(), data.size());
ASSERT_TRUE(this->mQueue->write(&data[0], numMessages));
auto ret = this->requestReadFmqSync(numMessages);
@@ -960,11 +976,8 @@
TYPED_TEST(UnsynchronizedWriteClient, SmallInputWriterTest1) {
const size_t dataLen = 16;
ASSERT_LE(dataLen, this->mNumMessagesMax);
- int32_t data[dataLen];
- initData(data, dataLen);
- ASSERT_TRUE(this->mQueue->write(data, dataLen));
- bool ret = this->requestReadFmqUnsync(dataLen, this->mService);
- ASSERT_TRUE(ret);
+ ASSERT_TRUE(this->requestWriteFmqUnsync(dataLen, this->mService));
+ ASSERT_TRUE(this->requestReadFmqUnsync(dataLen, this->mService));
}
/*
@@ -990,9 +1003,9 @@
TYPED_TEST(UnsynchronizedWriteClient, WriteWhenFull) {
std::vector<int32_t> data(this->mNumMessagesMax);
initData(&data[0], this->mNumMessagesMax);
- ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
+ ASSERT_TRUE(this->requestWriteFmqUnsync(this->mNumMessagesMax, this->mService));
ASSERT_EQ(0UL, this->mQueue->availableToWrite());
- ASSERT_TRUE(this->mQueue->write(&data[0], 1));
+ ASSERT_TRUE(this->requestWriteFmqUnsync(1, this->mService));
bool ret = this->requestReadFmqUnsync(this->mNumMessagesMax, this->mService);
ASSERT_FALSE(ret);
}
@@ -1027,23 +1040,20 @@
/*
* Write until FMQ is full.
- * Verify that the number of messages available to write
- * is equal to this->mNumMessagesMax.
* Verify that another write attempt is successful.
- * Request this->mService to read. Verify that read is unsuccessful.
+ * Request this->mService to read. Verify that read is unsuccessful
+ * because of the write rollover.
* Perform another write and verify that the read is successful
* to check if the reader process can recover from the error condition.
*/
TYPED_TEST(UnsynchronizedWriteClient, LargeInputTest3) {
- std::vector<int32_t> data(this->mNumMessagesMax);
- initData(&data[0], this->mNumMessagesMax);
- ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
+ ASSERT_TRUE(this->requestWriteFmqUnsync(this->mNumMessagesMax, this->mService));
ASSERT_EQ(0UL, this->mQueue->availableToWrite());
- ASSERT_TRUE(this->mQueue->write(&data[0], 1));
+ ASSERT_TRUE(this->requestWriteFmqUnsync(1, this->mService));
bool ret = this->requestReadFmqUnsync(this->mNumMessagesMax, this->mService);
ASSERT_FALSE(ret);
- ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
+ ASSERT_TRUE(this->requestWriteFmqUnsync(this->mNumMessagesMax, this->mService));
ret = this->requestReadFmqUnsync(this->mNumMessagesMax, this->mService);
ASSERT_TRUE(ret);
@@ -1079,13 +1089,15 @@
const size_t chunkNum = 5;
const size_t numMessages = chunkSize * chunkNum;
ASSERT_LE(numMessages, this->mNumMessagesMax);
- int32_t data[numMessages];
- initData(data, numMessages);
for (size_t i = 0; i < chunkNum; i++) {
- ASSERT_TRUE(this->mQueue->write(data + i * chunkSize, chunkSize));
+ ASSERT_TRUE(this->requestWriteFmqUnsync(chunkSize, this->mService));
}
- bool ret = this->requestReadFmqUnsync(numMessages, this->mService);
- ASSERT_TRUE(ret);
+ ASSERT_EQ(numMessages, this->mQueue->availableToRead());
+ int32_t readData[numMessages] = {};
+ ASSERT_TRUE(this->mQueue->read(readData, numMessages));
+ // verify that data is filled by the service - the messages will contiain
+ // 'chunkSize' because that's the value we passed to the service each write.
+ ASSERT_TRUE(verifyData(readData, chunkSize));
}
/*
@@ -1096,14 +1108,10 @@
*/
TYPED_TEST(UnsynchronizedWriteClient, ReadWriteWrapAround) {
size_t numMessages = this->mNumMessagesMax / 2;
- std::vector<int32_t> data(this->mNumMessagesMax);
- initData(&data[0], this->mNumMessagesMax);
- ASSERT_TRUE(this->mQueue->write(&data[0], numMessages));
- bool ret = this->requestReadFmqUnsync(numMessages, this->mService);
- ASSERT_TRUE(ret);
- ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
- ret = this->requestReadFmqUnsync(this->mNumMessagesMax, this->mService);
- ASSERT_TRUE(ret);
+ ASSERT_TRUE(this->requestWriteFmqUnsync(numMessages, this->mService));
+ ASSERT_TRUE(this->requestReadFmqUnsync(numMessages, this->mService));
+ ASSERT_TRUE(this->requestWriteFmqUnsync(this->mNumMessagesMax, this->mService));
+ ASSERT_TRUE(this->requestReadFmqUnsync(this->mNumMessagesMax, this->mService));
}
/*