Merge changes Iae9f692f,Ib43f1cb6,I9d4f0c30,I7d06ea35

* changes:
  Move NN HandleError from utils/common to 1.0/utils
  Relocate NN ProtectCallback to 1.0/utils
  Move NN memory utils from utils/common to 1.0/utils
  Remove hal::utils::countNumberOfConsumers
diff --git a/neuralnetworks/1.0/utils/Android.bp b/neuralnetworks/1.0/utils/Android.bp
index 8c51c67..31cdded 100644
--- a/neuralnetworks/1.0/utils/Android.bp
+++ b/neuralnetworks/1.0/utils/Android.bp
@@ -31,6 +31,7 @@
     export_include_dirs: ["include"],
     cflags: ["-Wthread-safety"],
     static_libs: [
+        "libarect",
         "neuralnetworks_types",
         "neuralnetworks_utils_hal_common",
     ],
@@ -40,6 +41,11 @@
     export_static_lib_headers: [
         "neuralnetworks_utils_hal_common",
     ],
+    target: {
+        android: {
+            shared_libs: ["libnativewindow"],
+        },
+    },
 }
 
 cc_test {
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
index 3b32e1d..1ab9dcb 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Callbacks.h
@@ -24,9 +24,10 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
+#include "nnapi/hal/1.0/ProtectCallback.h"
+
 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
 // lifetimes across processes and for protecting asynchronous calls across HIDL.
 
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h
index 5d4bdbc..a770d06 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Conversions.h
@@ -36,6 +36,7 @@
 GeneralResult<Operation> unvalidatedConvert(const hal::V1_0::Operation& operation);
 GeneralResult<Model::OperandValues> unvalidatedConvert(
         const hardware::hidl_vec<uint8_t>& operandValues);
+GeneralResult<SharedHandle> unvalidatedConvert(const hardware::hidl_handle& handle);
 GeneralResult<SharedMemory> unvalidatedConvert(const hardware::hidl_memory& memory);
 GeneralResult<Model> unvalidatedConvert(const hal::V1_0::Model& model);
 GeneralResult<Request::Argument> unvalidatedConvert(
@@ -65,6 +66,7 @@
 nn::GeneralResult<Operation> unvalidatedConvert(const nn::Operation& operation);
 nn::GeneralResult<hidl_vec<uint8_t>> unvalidatedConvert(
         const nn::Model::OperandValues& operandValues);
+nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle);
 nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory);
 nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model);
 nn::GeneralResult<RequestArgument> unvalidatedConvert(const nn::Request::Argument& requestArgument);
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
index db3b2ad..0a6ca3e 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Device.h
@@ -24,7 +24,8 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
+
+#include "nnapi/hal/1.0/ProtectCallback.h"
 
 #include <functional>
 #include <memory>
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h
index e201e25..66497c2 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Execution.h
@@ -22,9 +22,9 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include "PreparedModel.h"
+#include "nnapi/hal/1.0/ProtectCallback.h"
 
 #include <memory>
 #include <utility>
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/HandleError.h
similarity index 79%
rename from neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
rename to neuralnetworks/1.0/utils/include/nnapi/hal/1.0/HandleError.h
index e51f916..8e02633 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/HandleError.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/HandleError.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_HANDLE_ERROR_H
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_HANDLE_ERROR_H
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_HANDLE_ERROR_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_HANDLE_ERROR_H
 
 #include <android/hidl/base/1.0/IBase.h>
 #include <hidl/HidlSupport.h>
@@ -27,7 +27,7 @@
 namespace android::hardware::neuralnetworks::utils {
 
 template <typename Type>
-nn::GeneralResult<Type> handleTransportError(const hardware::Return<Type>& ret) {
+nn::GeneralResult<Type> handleTransportError(const Return<Type>& ret) {
     if (ret.isDeadObject()) {
         return nn::error(nn::ErrorStatus::DEAD_OBJECT)
                << "Return<>::isDeadObject returned true: " << ret.description();
@@ -52,13 +52,13 @@
         std::move(result).value();                                                           \
     })
 
-#define HANDLE_HAL_STATUS(status)                                       \
-    if (const auto canonical = ::android::nn::convert(status).value_or( \
-                ::android::nn::ErrorStatus::GENERAL_FAILURE);           \
-        canonical == ::android::nn::ErrorStatus::NONE) {                \
-    } else                                                              \
+#define HANDLE_STATUS_HIDL(status)                                                            \
+    if (const ::android::nn::ErrorStatus canonical = ::android::nn::convert(status).value_or( \
+                ::android::nn::ErrorStatus::GENERAL_FAILURE);                                 \
+        canonical == ::android::nn::ErrorStatus::NONE) {                                      \
+    } else                                                                                    \
         return NN_ERROR(canonical)
 
 }  // namespace android::hardware::neuralnetworks::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_HANDLE_ERROR_H
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_HANDLE_ERROR_H
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
index 48be595..bdb5b54 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/PreparedModel.h
@@ -22,7 +22,8 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
+
+#include "nnapi/hal/1.0/ProtectCallback.h"
 
 #include <memory>
 #include <tuple>
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/ProtectCallback.h
similarity index 93%
rename from neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h
rename to neuralnetworks/1.0/utils/include/nnapi/hal/1.0/ProtectCallback.h
index 05110bc..7418cfa 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/ProtectCallback.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/ProtectCallback.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H
-#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H
+#ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PROTECT_CALLBACK_H
+#define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PROTECT_CALLBACK_H
 
 #include <android-base/scopeguard.h>
 #include <android-base/thread_annotations.h>
@@ -98,4 +98,4 @@
 
 }  // namespace android::hardware::neuralnetworks::utils
 
-#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_PROTECT_CALLBACK_H
+#endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_0_UTILS_PROTECT_CALLBACK_H
diff --git a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
index 360b338..5c1480e 100644
--- a/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
+++ b/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/Utils.h
@@ -25,7 +25,6 @@
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
-#include <nnapi/hal/HandleError.h>
 
 namespace android::hardware::neuralnetworks::V1_0::utils {
 
diff --git a/neuralnetworks/1.0/utils/src/Callbacks.cpp b/neuralnetworks/1.0/utils/src/Callbacks.cpp
index ea3ea56..7b478ae 100644
--- a/neuralnetworks/1.0/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.0/utils/src/Callbacks.cpp
@@ -17,7 +17,9 @@
 #include "Callbacks.h"
 
 #include "Conversions.h"
+#include "HandleError.h"
 #include "PreparedModel.h"
+#include "ProtectCallback.h"
 #include "Utils.h"
 
 #include <android/hardware/neuralnetworks/1.0/IExecutionCallback.h>
@@ -27,8 +29,6 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
 #include <utility>
@@ -40,19 +40,19 @@
 
 nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
         ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
-    HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "get supported operations failed with " << toString(status);
     return supportedOperations;
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
         ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
-    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
     return NN_TRY(PreparedModel::create(preparedModel));
 }
 
 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
         ErrorStatus status) {
-    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
     return {};
 }
 
diff --git a/neuralnetworks/1.0/utils/src/Conversions.cpp b/neuralnetworks/1.0/utils/src/Conversions.cpp
index c0498eb..daa10fd 100644
--- a/neuralnetworks/1.0/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.0/utils/src/Conversions.cpp
@@ -37,6 +37,11 @@
 
 #include "Utils.h"
 
+#ifdef __ANDROID__
+#include <android/hardware_buffer.h>
+#include <vndk/hardware_buffer.h>
+#endif  // __ANDROID__
+
 namespace {
 
 template <typename Type>
@@ -49,6 +54,7 @@
 namespace android::nn {
 namespace {
 
+using hardware::hidl_handle;
 using hardware::hidl_memory;
 using hardware::hidl_vec;
 
@@ -74,6 +80,121 @@
     return canonical;
 }
 
+nn::GeneralResult<nn::Memory::Unknown::Handle> unknownHandleFromNativeHandle(
+        const native_handle_t* handle) {
+    if (handle == nullptr) {
+        return NN_ERROR() << "unknownHandleFromNativeHandle failed because handle is nullptr";
+    }
+
+    std::vector<base::unique_fd> fds =
+            NN_TRY(nn::dupFds(handle->data + 0, handle->data + handle->numFds));
+
+    std::vector<int> ints(handle->data + handle->numFds,
+                          handle->data + handle->numFds + handle->numInts);
+
+    return nn::Memory::Unknown::Handle{.fds = std::move(fds), .ints = std::move(ints)};
+}
+
+nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory) {
+    CHECK_LE(memory.size(), std::numeric_limits<size_t>::max());
+    if (!memory.valid()) {
+        return NN_ERROR() << "Unable to convert invalid hidl_memory";
+    }
+
+    if (memory.name() == "ashmem") {
+        if (memory.handle()->numFds != 1) {
+            return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
+                              << memory.handle()->numFds << " numFds, but expected 1";
+        }
+        if (memory.handle()->numInts != 0) {
+            return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
+                              << memory.handle()->numInts << " numInts, but expected 0";
+        }
+        auto handle = nn::Memory::Ashmem{
+                .fd = NN_TRY(nn::dupFd(memory.handle()->data[0])),
+                .size = static_cast<size_t>(memory.size()),
+        };
+        return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
+    }
+
+    if (memory.name() == "mmap_fd") {
+        if (memory.handle()->numFds != 1) {
+            return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
+                              << memory.handle()->numFds << " numFds, but expected 1";
+        }
+        if (memory.handle()->numInts != 3) {
+            return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
+                              << memory.handle()->numInts << " numInts, but expected 3";
+        }
+
+        const int fd = memory.handle()->data[0];
+        const int prot = memory.handle()->data[1];
+        const int lower = memory.handle()->data[2];
+        const int higher = memory.handle()->data[3];
+        const size_t offset = nn::getOffsetFromInts(lower, higher);
+
+        return nn::createSharedMemoryFromFd(static_cast<size_t>(memory.size()), prot, fd, offset);
+    }
+
+    if (memory.name() != "hardware_buffer_blob") {
+        auto handle = nn::Memory::Unknown{
+                .handle = NN_TRY(unknownHandleFromNativeHandle(memory.handle())),
+                .size = static_cast<size_t>(memory.size()),
+                .name = memory.name(),
+        };
+        return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
+    }
+
+#ifdef __ANDROID__
+    constexpr auto roundUpToMultiple = [](uint32_t value, uint32_t multiple) -> uint32_t {
+        return (value + multiple - 1) / multiple * multiple;
+    };
+
+    const auto size = memory.size();
+    const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
+    const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
+    const uint32_t width = size;
+    const uint32_t height = 1;  // height is always 1 for BLOB mode AHardwareBuffer.
+    const uint32_t layers = 1;  // layers is always 1 for BLOB mode AHardwareBuffer.
+
+    // AHardwareBuffer_createFromHandle() might fail because an allocator
+    // expects a specific stride value. In that case, we try to guess it by
+    // aligning the width to small powers of 2.
+    // TODO(b/174120849): Avoid stride assumptions.
+    AHardwareBuffer* hardwareBuffer = nullptr;
+    status_t status = UNKNOWN_ERROR;
+    for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
+        const uint32_t stride = roundUpToMultiple(width, alignment);
+        AHardwareBuffer_Desc desc{
+                .width = width,
+                .height = height,
+                .layers = layers,
+                .format = format,
+                .usage = usage,
+                .stride = stride,
+        };
+        status = AHardwareBuffer_createFromHandle(&desc, memory.handle(),
+                                                  AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
+                                                  &hardwareBuffer);
+        if (status == NO_ERROR) {
+            break;
+        }
+    }
+    if (status != NO_ERROR) {
+        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
+               << "Can't create AHardwareBuffer from handle. Error: " << status;
+    }
+
+    return nn::createSharedMemoryFromAHWB(hardwareBuffer, /*takeOwnership=*/true);
+#else   // __ANDROID__
+    LOG(FATAL) << "nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const "
+                  "hidl_memory& memory): Not Available on Host Build";
+    return (NN_ERROR() << "createSharedMemoryFromHidlMemory failed")
+            .
+            operator nn::GeneralResult<nn::SharedMemory>();
+#endif  // __ANDROID__
+}
+
 }  // anonymous namespace
 
 GeneralResult<OperandType> unvalidatedConvert(const hal::V1_0::OperandType& operandType) {
@@ -146,8 +267,20 @@
     return Model::OperandValues(operandValues.data(), operandValues.size());
 }
 
+GeneralResult<SharedHandle> unvalidatedConvert(const hidl_handle& handle) {
+    if (handle.getNativeHandle() == nullptr) {
+        return nullptr;
+    }
+    if (handle->numFds != 1 || handle->numInts != 0) {
+        return NN_ERROR()
+               << "unvalidatedConvert failed because handle does not only hold a single fd";
+    }
+    auto duplicatedFd = NN_TRY(nn::dupFd(handle->data[0]));
+    return std::make_shared<const Handle>(std::move(duplicatedFd));
+}
+
 GeneralResult<SharedMemory> unvalidatedConvert(const hidl_memory& memory) {
-    return hal::utils::createSharedMemoryFromHidlMemory(memory);
+    return createSharedMemoryFromHidlMemory(memory);
 }
 
 GeneralResult<Model> unvalidatedConvert(const hal::V1_0::Model& model) {
@@ -155,7 +288,7 @@
 
     // Verify number of consumers.
     const auto numberOfConsumers =
-            NN_TRY(hal::utils::countNumberOfConsumers(model.operands.size(), operations));
+            NN_TRY(countNumberOfConsumers(model.operands.size(), operations));
     CHECK(model.operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < model.operands.size(); ++i) {
         if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
@@ -260,6 +393,82 @@
     return utils::unvalidatedConvert(canonical);
 }
 
+nn::GeneralResult<hidl_handle> createNativeHandleFrom(std::vector<base::unique_fd> fds,
+                                                      const std::vector<int32_t>& ints) {
+    constexpr size_t kIntMax = std::numeric_limits<int>::max();
+    CHECK_LE(fds.size(), kIntMax);
+    CHECK_LE(ints.size(), kIntMax);
+    native_handle_t* nativeHandle =
+            native_handle_create(static_cast<int>(fds.size()), static_cast<int>(ints.size()));
+    if (nativeHandle == nullptr) {
+        return NN_ERROR() << "Failed to create native_handle";
+    }
+
+    for (size_t i = 0; i < fds.size(); ++i) {
+        nativeHandle->data[i] = fds[i].release();
+    }
+    std::copy(ints.begin(), ints.end(), nativeHandle->data + nativeHandle->numFds);
+
+    hidl_handle handle;
+    handle.setTo(nativeHandle, /*shouldOwn=*/true);
+    return handle;
+}
+
+nn::GeneralResult<hidl_handle> createNativeHandleFrom(base::unique_fd fd,
+                                                      const std::vector<int32_t>& ints) {
+    std::vector<base::unique_fd> fds;
+    fds.push_back(std::move(fd));
+    return createNativeHandleFrom(std::move(fds), ints);
+}
+
+nn::GeneralResult<hidl_handle> createNativeHandleFrom(const nn::Memory::Unknown::Handle& handle) {
+    std::vector<base::unique_fd> fds = NN_TRY(nn::dupFds(handle.fds.begin(), handle.fds.end()));
+    return createNativeHandleFrom(std::move(fds), handle.ints);
+}
+
+nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Ashmem& memory) {
+    auto fd = NN_TRY(nn::dupFd(memory.fd));
+    auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), {}));
+    return hidl_memory("ashmem", std::move(handle), memory.size);
+}
+
+nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Fd& memory) {
+    auto fd = NN_TRY(nn::dupFd(memory.fd));
+
+    const auto [lowOffsetBits, highOffsetBits] = nn::getIntsFromOffset(memory.offset);
+    const std::vector<int> ints = {memory.prot, lowOffsetBits, highOffsetBits};
+
+    auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), ints));
+    return hidl_memory("mmap_fd", std::move(handle), memory.size);
+}
+
+nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::HardwareBuffer& memory) {
+#ifdef __ANDROID__
+    const auto* ahwb = memory.handle.get();
+    AHardwareBuffer_Desc bufferDesc;
+    AHardwareBuffer_describe(ahwb, &bufferDesc);
+
+    const bool isBlob = bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB;
+    const size_t size = isBlob ? bufferDesc.width : 0;
+    const char* const name = isBlob ? "hardware_buffer_blob" : "hardware_buffer";
+
+    const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
+    const hidl_handle hidlHandle(nativeHandle);
+    hidl_handle copiedHandle(hidlHandle);
+
+    return hidl_memory(name, std::move(copiedHandle), size);
+#else   // __ANDROID__
+    LOG(FATAL) << "nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const "
+                  "nn::Memory::HardwareBuffer& memory): Not Available on Host Build";
+    (void)memory;
+    return (NN_ERROR() << "createHidlMemoryFrom failed").operator nn::GeneralResult<hidl_memory>();
+#endif  // __ANDROID__
+}
+
+nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Unknown& memory) {
+    return hidl_memory(memory.name, NN_TRY(createNativeHandleFrom(memory.handle)), memory.size);
+}
+
 }  // anonymous namespace
 
 nn::GeneralResult<OperandType> unvalidatedConvert(const nn::OperandType& operandType) {
@@ -332,8 +541,19 @@
     return hidl_vec<uint8_t>(operandValues.data(), operandValues.data() + operandValues.size());
 }
 
+nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) {
+    if (handle == nullptr) {
+        return {};
+    }
+    base::unique_fd fd = NN_TRY(nn::dupFd(handle->get()));
+    return createNativeHandleFrom(std::move(fd), {});
+}
+
 nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
-    return hal::utils::createHidlMemoryFromSharedMemory(memory);
+    if (memory == nullptr) {
+        return NN_ERROR() << "Memory must be non-empty";
+    }
+    return std::visit([](const auto& x) { return createHidlMemoryFrom(x); }, memory->handle);
 }
 
 nn::GeneralResult<Model> unvalidatedConvert(const nn::Model& model) {
@@ -346,7 +566,7 @@
 
     // Update number of consumers.
     const auto numberOfConsumers =
-            NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), model.main.operations));
+            NN_TRY(countNumberOfConsumers(operands.size(), model.main.operations));
     CHECK(operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < operands.size(); ++i) {
         operands[i].numberOfConsumers = numberOfConsumers[i];
diff --git a/neuralnetworks/1.0/utils/src/Device.cpp b/neuralnetworks/1.0/utils/src/Device.cpp
index 93bd81a..49913a2 100644
--- a/neuralnetworks/1.0/utils/src/Device.cpp
+++ b/neuralnetworks/1.0/utils/src/Device.cpp
@@ -18,6 +18,8 @@
 
 #include "Callbacks.h"
 #include "Conversions.h"
+#include "HandleError.h"
+#include "ProtectCallback.h"
 #include "Utils.h"
 
 #include <android/hardware/neuralnetworks/1.0/IDevice.h>
@@ -29,8 +31,6 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
 #include <functional>
@@ -47,7 +47,7 @@
 
 nn::GeneralResult<nn::Capabilities> capabilitiesCallback(ErrorStatus status,
                                                          const Capabilities& capabilities) {
-    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "getting capabilities failed with " << toString(status);
     return nn::convert(capabilities);
 }
 
@@ -156,7 +156,7 @@
 
     const auto ret = kDevice->prepareModel(hidlModel, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
 
     return cb->get();
 }
diff --git a/neuralnetworks/1.0/utils/src/Execution.cpp b/neuralnetworks/1.0/utils/src/Execution.cpp
index 7a3216b..6e105a6 100644
--- a/neuralnetworks/1.0/utils/src/Execution.cpp
+++ b/neuralnetworks/1.0/utils/src/Execution.cpp
@@ -18,6 +18,8 @@
 
 #include "Callbacks.h"
 #include "Conversions.h"
+#include "HandleError.h"
+#include "ProtectCallback.h"
 #include "Utils.h"
 
 #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
@@ -27,8 +29,6 @@
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <memory>
 #include <utility>
diff --git a/neuralnetworks/1.0/utils/src/PreparedModel.cpp b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
index 3060c65..00e7d22 100644
--- a/neuralnetworks/1.0/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.0/utils/src/PreparedModel.cpp
@@ -20,6 +20,8 @@
 #include "Callbacks.h"
 #include "Conversions.h"
 #include "Execution.h"
+#include "HandleError.h"
+#include "ProtectCallback.h"
 #include "Utils.h"
 
 #include <android/hardware/neuralnetworks/1.0/IPreparedModel.h>
@@ -28,8 +30,6 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <memory>
 #include <tuple>
@@ -84,7 +84,7 @@
 
     const auto ret = kPreparedModel->execute(request, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
 
     auto result = NN_TRY(cb->get());
     if (relocation.output) {
diff --git a/neuralnetworks/utils/common/src/ProtectCallback.cpp b/neuralnetworks/1.0/utils/src/ProtectCallback.cpp
similarity index 98%
rename from neuralnetworks/utils/common/src/ProtectCallback.cpp
rename to neuralnetworks/1.0/utils/src/ProtectCallback.cpp
index 18e1f3b..89539b5 100644
--- a/neuralnetworks/utils/common/src/ProtectCallback.cpp
+++ b/neuralnetworks/1.0/utils/src/ProtectCallback.cpp
@@ -22,7 +22,8 @@
 #include <android/hidl/base/1.0/IBase.h>
 #include <hidl/HidlSupport.h>
 #include <nnapi/Result.h>
-#include <nnapi/hal/HandleError.h>
+
+#include "HandleError.h"
 
 #include <algorithm>
 #include <functional>
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
index 5e224b5..d6bd36a 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Device.h
@@ -23,8 +23,8 @@
 #include <nnapi/OperandTypes.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <functional>
 #include <memory>
diff --git a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
index 09d9fe8..4660ff7 100644
--- a/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
+++ b/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/Utils.h
@@ -26,7 +26,6 @@
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
-#include <nnapi/hal/HandleError.h>
 
 namespace android::hardware::neuralnetworks::V1_1::utils {
 
diff --git a/neuralnetworks/1.1/utils/src/Conversions.cpp b/neuralnetworks/1.1/utils/src/Conversions.cpp
index 467ceb3..5bdbe31 100644
--- a/neuralnetworks/1.1/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.1/utils/src/Conversions.cpp
@@ -100,7 +100,7 @@
 
     // Verify number of consumers.
     const auto numberOfConsumers =
-            NN_TRY(hal::utils::countNumberOfConsumers(model.operands.size(), operations));
+            NN_TRY(countNumberOfConsumers(model.operands.size(), operations));
     CHECK(model.operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < model.operands.size(); ++i) {
         if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
@@ -223,7 +223,7 @@
 
     // Update number of consumers.
     const auto numberOfConsumers =
-            NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), model.main.operations));
+            NN_TRY(countNumberOfConsumers(operands.size(), model.main.operations));
     CHECK(operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < operands.size(); ++i) {
         operands[i].numberOfConsumers = numberOfConsumers[i];
diff --git a/neuralnetworks/1.1/utils/src/Device.cpp b/neuralnetworks/1.1/utils/src/Device.cpp
index 3197ef4..7d54cab 100644
--- a/neuralnetworks/1.1/utils/src/Device.cpp
+++ b/neuralnetworks/1.1/utils/src/Device.cpp
@@ -29,9 +29,9 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/Callbacks.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <functional>
 #include <memory>
@@ -47,7 +47,7 @@
 
 nn::GeneralResult<nn::Capabilities> capabilitiesCallback(V1_0::ErrorStatus status,
                                                          const Capabilities& capabilities) {
-    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "getting capabilities failed with " << toString(status);
     return nn::convert(capabilities);
 }
 
@@ -157,7 +157,7 @@
 
     const auto ret = kDevice->prepareModel_1_1(hidlModel, hidlPreference, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
 
     return cb->get();
 }
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
index ba3c1ba..6dd8138 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Callbacks.h
@@ -27,8 +27,8 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/Callbacks.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
index 272cee7..c3348aa 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Conversions.h
@@ -45,7 +45,6 @@
 GeneralResult<Extension> unvalidatedConvert(const hal::V1_2::Extension& extension);
 GeneralResult<Extension::OperandTypeInformation> unvalidatedConvert(
         const hal::V1_2::Extension::OperandTypeInformation& operandTypeInformation);
-GeneralResult<SharedHandle> unvalidatedConvert(const hardware::hidl_handle& handle);
 
 GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType);
 GeneralResult<Capabilities> convert(const hal::V1_2::Capabilities& capabilities);
@@ -86,7 +85,6 @@
 nn::GeneralResult<Extension> unvalidatedConvert(const nn::Extension& extension);
 nn::GeneralResult<Extension::OperandTypeInformation> unvalidatedConvert(
         const nn::Extension::OperandTypeInformation& operandTypeInformation);
-nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle);
 
 nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType);
 nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities);
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
index b4bef5e..e7ac172 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Device.h
@@ -23,8 +23,8 @@
 #include <nnapi/OperandTypes.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <functional>
 #include <memory>
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h
index 9c66446..867f181 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Execution.h
@@ -21,8 +21,8 @@
 #include <nnapi/IExecution.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include "PreparedModel.h"
 
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
index dae1ff3..8078693 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstController.h
@@ -32,8 +32,8 @@
 #include <nnapi/IPreparedModel.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <atomic>
 #include <chrono>
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h
index f7926f5..500aa0c 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstServer.h
@@ -29,7 +29,7 @@
 #include <nnapi/IBurst.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
-#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 
 #include <atomic>
 #include <chrono>
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h
index c662bc3..c081305 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ExecutionBurstUtils.h
@@ -23,7 +23,7 @@
 #include <hidl/MQDescriptor.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
-#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 
 #include <atomic>
 #include <chrono>
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
index 35abd79..1150e5e 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/PreparedModel.h
@@ -22,8 +22,8 @@
 #include <nnapi/IPreparedModel.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <memory>
 #include <tuple>
diff --git a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
index 5c3b8a7..23e336a 100644
--- a/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
+++ b/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/Utils.h
@@ -28,7 +28,6 @@
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.1/Conversions.h>
 #include <nnapi/hal/1.1/Utils.h>
-#include <nnapi/hal/HandleError.h>
 
 #include <limits>
 
diff --git a/neuralnetworks/1.2/utils/src/Callbacks.cpp b/neuralnetworks/1.2/utils/src/Callbacks.cpp
index 01b5e12..cb61f21 100644
--- a/neuralnetworks/1.2/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.2/utils/src/Callbacks.cpp
@@ -29,10 +29,10 @@
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/Callbacks.h>
 #include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.0/HandleError.h>
 #include <nnapi/hal/1.0/PreparedModel.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
 #include <utility>
@@ -62,7 +62,7 @@
 
 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
         V1_0::ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
-    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
     return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
 }
 
@@ -74,7 +74,7 @@
         return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
                << "execution failed with " << toString(status);
     }
-    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
     return convertExecutionGeneralResultsHelper(outputShapes, timing);
 }
 
diff --git a/neuralnetworks/1.2/utils/src/Conversions.cpp b/neuralnetworks/1.2/utils/src/Conversions.cpp
index 6a80b42..838d9c4 100644
--- a/neuralnetworks/1.2/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.2/utils/src/Conversions.cpp
@@ -28,7 +28,6 @@
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.1/Conversions.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
 
 #include <algorithm>
 #include <functional>
@@ -187,7 +186,7 @@
 
     // Verify number of consumers.
     const auto numberOfConsumers =
-            NN_TRY(hal::utils::countNumberOfConsumers(model.operands.size(), operations));
+            NN_TRY(countNumberOfConsumers(model.operands.size(), operations));
     CHECK(model.operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < model.operands.size(); ++i) {
         if (model.operands[i].numberOfConsumers != numberOfConsumers[i]) {
@@ -264,14 +263,6 @@
     };
 }
 
-GeneralResult<SharedHandle> unvalidatedConvert(const hidl_handle& hidlHandle) {
-    if (hidlHandle.getNativeHandle() == nullptr) {
-        return nullptr;
-    }
-    auto handle = NN_TRY(hal::utils::sharedHandleFromNativeHandle(hidlHandle.getNativeHandle()));
-    return std::make_shared<const Handle>(std::move(handle));
-}
-
 GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) {
     return validatedConvert(deviceType);
 }
@@ -334,6 +325,10 @@
     return V1_0::utils::unvalidatedConvert(operandValues);
 }
 
+nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) {
+    return V1_0::utils::unvalidatedConvert(handle);
+}
+
 nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
     return V1_0::utils::unvalidatedConvert(memory);
 }
@@ -481,7 +476,7 @@
 
     // Update number of consumers.
     const auto numberOfConsumers =
-            NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), model.main.operations));
+            NN_TRY(countNumberOfConsumers(operands.size(), model.main.operations));
     CHECK(operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < operands.size(); ++i) {
         operands[i].numberOfConsumers = numberOfConsumers[i];
@@ -544,13 +539,6 @@
     };
 }
 
-nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) {
-    if (handle == nullptr) {
-        return {};
-    }
-    return hal::utils::hidlHandleFromSharedHandle(*handle);
-}
-
 nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) {
     return validatedConvert(deviceType);
 }
diff --git a/neuralnetworks/1.2/utils/src/Device.cpp b/neuralnetworks/1.2/utils/src/Device.cpp
index 9fe0de2..f12669a 100644
--- a/neuralnetworks/1.2/utils/src/Device.cpp
+++ b/neuralnetworks/1.2/utils/src/Device.cpp
@@ -30,10 +30,10 @@
 #include <nnapi/OperandTypes.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/1.1/Conversions.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <functional>
 #include <memory>
@@ -49,31 +49,31 @@
 
 nn::GeneralResult<nn::Capabilities> capabilitiesCallback(V1_0::ErrorStatus status,
                                                          const Capabilities& capabilities) {
-    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "getting capabilities failed with " << toString(status);
     return nn::convert(capabilities);
 }
 
 nn::GeneralResult<std::string> versionStringCallback(V1_0::ErrorStatus status,
                                                      const hidl_string& versionString) {
-    HANDLE_HAL_STATUS(status) << "getVersionString failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "getVersionString failed with " << toString(status);
     return versionString;
 }
 
 nn::GeneralResult<nn::DeviceType> deviceTypeCallback(V1_0::ErrorStatus status,
                                                      DeviceType deviceType) {
-    HANDLE_HAL_STATUS(status) << "getDeviceType failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "getDeviceType failed with " << toString(status);
     return nn::convert(deviceType);
 }
 
 nn::GeneralResult<std::vector<nn::Extension>> supportedExtensionsCallback(
         V1_0::ErrorStatus status, const hidl_vec<Extension>& extensions) {
-    HANDLE_HAL_STATUS(status) << "getExtensions failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "getExtensions failed with " << toString(status);
     return nn::convert(extensions);
 }
 
 nn::GeneralResult<std::pair<uint32_t, uint32_t>> numberOfCacheFilesNeededCallback(
         V1_0::ErrorStatus status, uint32_t numModelCache, uint32_t numDataCache) {
-    HANDLE_HAL_STATUS(status) << "getNumberOfCacheFilesNeeded failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "getNumberOfCacheFilesNeeded failed with " << toString(status);
     if (numModelCache > nn::kMaxNumberOfCacheFiles) {
         return NN_ERROR() << "getNumberOfCacheFilesNeeded returned numModelCache files greater "
                              "than allowed max ("
@@ -254,7 +254,7 @@
     const auto ret = kDevice->prepareModel_1_2(hidlModel, hidlPreference, hidlModelCache,
                                                hidlDataCache, hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
 
     return cb->get();
 }
@@ -271,7 +271,7 @@
 
     const auto ret = kDevice->prepareModelFromCache(hidlModelCache, hidlDataCache, hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "model preparation from cache failed with " << toString(status);
 
     return cb->get();
 }
diff --git a/neuralnetworks/1.2/utils/src/Execution.cpp b/neuralnetworks/1.2/utils/src/Execution.cpp
index 18d1c90..320b0e1 100644
--- a/neuralnetworks/1.2/utils/src/Execution.cpp
+++ b/neuralnetworks/1.2/utils/src/Execution.cpp
@@ -29,7 +29,6 @@
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
 
 #include <memory>
 #include <utility>
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
index 2746965..a8ded9e 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstController.cpp
@@ -28,9 +28,9 @@
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
 #include <algorithm>
@@ -82,8 +82,8 @@
 
 nn::GeneralResult<sp<IBurstContext>> executionBurstResultCallback(
         V1_0::ErrorStatus status, const sp<IBurstContext>& burstContext) {
-    HANDLE_HAL_STATUS(status) << "IPreparedModel::configureExecutionBurst failed with status "
-                              << toString(status);
+    HANDLE_STATUS_HIDL(status) << "IPreparedModel::configureExecutionBurst failed with status "
+                               << toString(status);
     if (burstContext == nullptr) {
         return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
                << "IPreparedModel::configureExecutionBurst returned nullptr for burst";
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
index 65ec7f5..f30b662 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstServer.cpp
@@ -27,8 +27,8 @@
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/1.0/Conversions.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
 #include <algorithm>
@@ -50,7 +50,7 @@
 
 nn::GeneralResult<std::vector<nn::SharedMemory>> getMemoriesCallback(
         V1_0::ErrorStatus status, const hidl_vec<hidl_memory>& memories) {
-    HANDLE_HAL_STATUS(status) << "getting burst memories failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "getting burst memories failed with " << toString(status);
     std::vector<nn::SharedMemory> canonicalMemories;
     canonicalMemories.reserve(memories.size());
     for (const auto& memory : memories) {
diff --git a/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp b/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp
index 1bdde1e..e0d029a 100644
--- a/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp
+++ b/neuralnetworks/1.2/utils/src/ExecutionBurstUtils.cpp
@@ -27,7 +27,7 @@
 #include <hidl/MQDescriptor.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
-#include <nnapi/hal/ProtectCallback.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 
 #include <atomic>
 #include <chrono>
diff --git a/neuralnetworks/1.2/utils/src/PreparedModel.cpp b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
index c261184..b8a5ae0 100644
--- a/neuralnetworks/1.2/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.2/utils/src/PreparedModel.cpp
@@ -31,9 +31,9 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <chrono>
 #include <memory>
@@ -82,7 +82,7 @@
     const auto ret = kPreparedModel->execute_1_2(request, measure, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
     if (status != V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
-        HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
+        HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
     }
 
     return cb->get();
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
index 643172e..4b8ddc1 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Callbacks.h
@@ -30,8 +30,8 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/Callbacks.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
index b677c62..ec1e530 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Conversions.h
@@ -113,6 +113,9 @@
 nn::GeneralResult<V1_2::MeasureTiming> convert(const nn::MeasureTiming& measureTiming);
 nn::GeneralResult<V1_2::Timing> convert(const nn::Timing& timing);
 
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+        const std::vector<nn::SyncFence>& fences);
+
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
 
 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_1_3_CONVERSIONS_H
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
index 84f606a..c3c6fc4 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Device.h
@@ -23,8 +23,8 @@
 #include <nnapi/OperandTypes.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <functional>
 #include <memory>
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
index 5acba71..480438d 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/PreparedModel.h
@@ -21,8 +21,8 @@
 #include <nnapi/IPreparedModel.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <memory>
 #include <tuple>
diff --git a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
index 28525bd..2812db2 100644
--- a/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
+++ b/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/Utils.h
@@ -30,7 +30,6 @@
 #include <nnapi/hal/1.1/Utils.h>
 #include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/1.2/Utils.h>
-#include <nnapi/hal/HandleError.h>
 
 namespace android::hardware::neuralnetworks::V1_3::utils {
 
diff --git a/neuralnetworks/1.3/utils/src/Buffer.cpp b/neuralnetworks/1.3/utils/src/Buffer.cpp
index ada5265..34925ea 100644
--- a/neuralnetworks/1.3/utils/src/Buffer.cpp
+++ b/neuralnetworks/1.3/utils/src/Buffer.cpp
@@ -25,7 +25,7 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/Conversions.h>
-#include <nnapi/hal/HandleError.h>
+#include <nnapi/hal/1.0/HandleError.h>
 
 #include "Conversions.h"
 #include "Utils.h"
@@ -66,7 +66,7 @@
 
     const auto ret = kBuffer->copyTo(hidlDst);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    HANDLE_HAL_STATUS(status) << "IBuffer::copyTo failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "IBuffer::copyTo failed with " << toString(status);
 
     return {};
 }
@@ -78,7 +78,7 @@
 
     const auto ret = kBuffer->copyFrom(hidlSrc, hidlDimensions);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    HANDLE_HAL_STATUS(status) << "IBuffer::copyFrom failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "IBuffer::copyFrom failed with " << toString(status);
 
     return {};
 }
diff --git a/neuralnetworks/1.3/utils/src/Callbacks.cpp b/neuralnetworks/1.3/utils/src/Callbacks.cpp
index 156216f..f063862 100644
--- a/neuralnetworks/1.3/utils/src/Callbacks.cpp
+++ b/neuralnetworks/1.3/utils/src/Callbacks.cpp
@@ -30,13 +30,13 @@
 #include <nnapi/Types.h>
 #include <nnapi/hal/1.0/Callbacks.h>
 #include <nnapi/hal/1.0/Conversions.h>
+#include <nnapi/hal/1.0/HandleError.h>
 #include <nnapi/hal/1.0/PreparedModel.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/1.2/Callbacks.h>
 #include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/1.2/PreparedModel.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 #include <nnapi/hal/TransferValue.h>
 
 #include <utility>
@@ -71,13 +71,13 @@
 
 nn::GeneralResult<std::vector<bool>> supportedOperationsCallback(
         ErrorStatus status, const hidl_vec<bool>& supportedOperations) {
-    HANDLE_HAL_STATUS(status) << "get supported operations failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "get supported operations failed with " << toString(status);
     return supportedOperations;
 }
 
 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
         ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
-    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
     return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
 }
 
@@ -90,7 +90,7 @@
         return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
                << "execution failed with " << toString(status);
     }
-    HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
     return convertExecutionGeneralResultsHelper(outputShapes, timing);
 }
 
diff --git a/neuralnetworks/1.3/utils/src/Conversions.cpp b/neuralnetworks/1.3/utils/src/Conversions.cpp
index b35b2cd..a1d414c 100644
--- a/neuralnetworks/1.3/utils/src/Conversions.cpp
+++ b/neuralnetworks/1.3/utils/src/Conversions.cpp
@@ -28,7 +28,6 @@
 #include <nnapi/hal/1.0/Conversions.h>
 #include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
 
 #include <algorithm>
 #include <chrono>
@@ -194,7 +193,7 @@
 
     // Verify number of consumers.
     const auto numberOfConsumers =
-            NN_TRY(hal::utils::countNumberOfConsumers(subgraph.operands.size(), operations));
+            NN_TRY(countNumberOfConsumers(subgraph.operands.size(), operations));
     CHECK(subgraph.operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < subgraph.operands.size(); ++i) {
         if (subgraph.operands[i].numberOfConsumers != numberOfConsumers[i]) {
@@ -380,7 +379,7 @@
 }
 
 nn::GeneralResult<hidl_handle> unvalidatedConvert(const nn::SharedHandle& handle) {
-    return V1_2::utils::unvalidatedConvert(handle);
+    return V1_0::utils::unvalidatedConvert(handle);
 }
 
 nn::GeneralResult<hidl_memory> unvalidatedConvert(const nn::SharedMemory& memory) {
@@ -543,7 +542,7 @@
 
     // Update number of consumers.
     const auto numberOfConsumers =
-            NN_TRY(hal::utils::countNumberOfConsumers(operands.size(), subgraph.operations));
+            NN_TRY(countNumberOfConsumers(operands.size(), subgraph.operations));
     CHECK(operands.size() == numberOfConsumers.size());
     for (size_t i = 0; i < operands.size(); ++i) {
         operands[i].numberOfConsumers = numberOfConsumers[i];
@@ -727,4 +726,13 @@
     return V1_2::utils::convert(timing);
 }
 
+nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
+        const std::vector<nn::SyncFence>& syncFences) {
+    std::vector<nn::SharedHandle> handles;
+    handles.reserve(syncFences.size());
+    std::transform(syncFences.begin(), syncFences.end(), std::back_inserter(handles),
+                   [](const nn::SyncFence& syncFence) { return syncFence.getSharedHandle(); });
+    return convert(handles);
+}
+
 }  // namespace android::hardware::neuralnetworks::V1_3::utils
diff --git a/neuralnetworks/1.3/utils/src/Device.cpp b/neuralnetworks/1.3/utils/src/Device.cpp
index d710b85..a73ce82 100644
--- a/neuralnetworks/1.3/utils/src/Device.cpp
+++ b/neuralnetworks/1.3/utils/src/Device.cpp
@@ -33,13 +33,13 @@
 #include <nnapi/OperandTypes.h>
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/1.1/Conversions.h>
 #include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/1.2/Device.h>
 #include <nnapi/hal/1.2/Utils.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <any>
 #include <functional>
@@ -72,7 +72,7 @@
 
 nn::GeneralResult<nn::Capabilities> capabilitiesCallback(ErrorStatus status,
                                                          const Capabilities& capabilities) {
-    HANDLE_HAL_STATUS(status) << "getting capabilities failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "getting capabilities failed with " << toString(status);
     return nn::convert(capabilities);
 }
 
@@ -89,7 +89,7 @@
 
 nn::GeneralResult<nn::SharedBuffer> allocationCallback(ErrorStatus status,
                                                        const sp<IBuffer>& buffer, uint32_t token) {
-    HANDLE_HAL_STATUS(status) << "IDevice::allocate failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "IDevice::allocate failed with " << toString(status);
     return Buffer::create(buffer, static_cast<nn::Request::MemoryDomainToken>(token));
 }
 
@@ -208,7 +208,7 @@
             kDevice->prepareModel_1_3(hidlModel, hidlPreference, hidlPriority, hidlDeadline,
                                       hidlModelCache, hidlDataCache, hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
 
     return cb->get();
 }
@@ -227,7 +227,7 @@
     const auto ret = kDevice->prepareModelFromCache_1_3(hidlDeadline, hidlModelCache, hidlDataCache,
                                                         hidlToken, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
-    HANDLE_HAL_STATUS(status) << "model preparation from cache failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "model preparation from cache failed with " << toString(status);
 
     return cb->get();
 }
diff --git a/neuralnetworks/1.3/utils/src/Execution.cpp b/neuralnetworks/1.3/utils/src/Execution.cpp
index 4dc0ddf..0ec7f56 100644
--- a/neuralnetworks/1.3/utils/src/Execution.cpp
+++ b/neuralnetworks/1.3/utils/src/Execution.cpp
@@ -29,7 +29,6 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
 
 #include <memory>
 #include <utility>
@@ -73,7 +72,7 @@
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> Execution::computeFenced(
         const std::vector<nn::SyncFence>& waitFor, const nn::OptionalTimePoint& deadline,
         const nn::OptionalDuration& timeoutDurationAfterFence) const {
-    const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
+    const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor));
     const auto hidlDeadline = NN_TRY(convert(deadline));
     const auto hidlTimeoutDurationAfterFence = NN_TRY(convert(timeoutDurationAfterFence));
     return kPreparedModel->executeFencedInternal(kRequest, hidlWaitFor, kMeasure, hidlDeadline,
diff --git a/neuralnetworks/1.3/utils/src/PreparedModel.cpp b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
index d5dee9d..2c81cb2 100644
--- a/neuralnetworks/1.3/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/1.3/utils/src/PreparedModel.cpp
@@ -30,12 +30,12 @@
 #include <nnapi/Result.h>
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
+#include <nnapi/hal/1.0/HandleError.h>
+#include <nnapi/hal/1.0/ProtectCallback.h>
 #include <nnapi/hal/1.2/Conversions.h>
 #include <nnapi/hal/1.2/ExecutionBurstController.h>
 #include <nnapi/hal/1.2/ExecutionBurstUtils.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <memory>
 #include <tuple>
@@ -50,14 +50,14 @@
 
 nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionCallbackResults(
         ErrorStatus status, const V1_2::Timing& timingLaunched, const V1_2::Timing& timingFenced) {
-    HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "fenced execution callback info failed with " << toString(status);
     return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced)));
 }
 
 nn::GeneralResult<std::pair<nn::SyncFence, nn::ExecuteFencedInfoCallback>> fencedExecutionCallback(
         ErrorStatus status, const hidl_handle& syncFence,
         const sp<IFencedExecutionCallback>& callback) {
-    HANDLE_HAL_STATUS(status) << "fenced execution failed with " << toString(status);
+    HANDLE_STATUS_HIDL(status) << "fenced execution failed with " << toString(status);
 
     auto resultSyncFence = nn::SyncFence::createAsSignaled();
     if (syncFence.getNativeHandle() != nullptr) {
@@ -127,7 +127,7 @@
             kPreparedModel->execute_1_3(request, measure, deadline, loopTimeoutDuration, cb);
     const auto status = HANDLE_TRANSPORT_FAILURE(ret);
     if (status != ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
-        HANDLE_HAL_STATUS(status) << "execution failed with " << toString(status);
+        HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
     }
 
     return cb->get();
@@ -186,7 +186,7 @@
             &maybeRequestInShared, &relocation));
 
     const auto hidlRequest = NN_TRY(convert(requestInShared));
-    const auto hidlWaitFor = NN_TRY(hal::utils::convertSyncFences(waitFor));
+    const auto hidlWaitFor = NN_TRY(convertSyncFences(waitFor));
     const auto hidlMeasure = NN_TRY(convert(measure));
     const auto hidlDeadline = NN_TRY(convert(deadline));
     const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration));
diff --git a/neuralnetworks/aidl/utils/Android.bp b/neuralnetworks/aidl/utils/Android.bp
index cb67b84..2ff7534 100644
--- a/neuralnetworks/aidl/utils/Android.bp
+++ b/neuralnetworks/aidl/utils/Android.bp
@@ -40,7 +40,6 @@
     shared_libs: [
         "android.hardware.neuralnetworks-V2-ndk",
         "libbinder_ndk",
-        "libhidlbase",
     ],
     target: {
         android: {
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h
index 8651912..168264b 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Callbacks.h
@@ -32,8 +32,7 @@
 namespace aidl::android::hardware::neuralnetworks::utils {
 
 // An AIDL callback class to receive the results of IDevice::prepareModel* asynchronously.
-class PreparedModelCallback final : public BnPreparedModelCallback,
-                                    public hal::utils::IProtectedCallback {
+class PreparedModelCallback final : public BnPreparedModelCallback, public IProtectedCallback {
   public:
     using Data = nn::GeneralResult<nn::SharedPreparedModel>;
 
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ProtectCallback.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ProtectCallback.h
index ab1108c..92ed1cd 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ProtectCallback.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ProtectCallback.h
@@ -23,7 +23,6 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <functional>
 #include <mutex>
@@ -34,19 +33,39 @@
 
 namespace aidl::android::hardware::neuralnetworks::utils {
 
+class IProtectedCallback {
+  public:
+    /**
+     * Marks this object as a dead object.
+     */
+    virtual void notifyAsDeadObject() = 0;
+
+    // Public virtual destructor to allow objects to be stored (and destroyed) as smart pointers.
+    // E.g., std::unique_ptr<IProtectedCallback>.
+    virtual ~IProtectedCallback() = default;
+
+  protected:
+    // Protect the non-destructor special member functions to prevent object slicing.
+    IProtectedCallback() = default;
+    IProtectedCallback(const IProtectedCallback&) = default;
+    IProtectedCallback(IProtectedCallback&&) noexcept = default;
+    IProtectedCallback& operator=(const IProtectedCallback&) = default;
+    IProtectedCallback& operator=(IProtectedCallback&&) noexcept = default;
+};
+
 // Thread safe class
 class DeathMonitor final {
   public:
     static void serviceDied(void* cookie);
     void serviceDied();
     // Precondition: `killable` must be non-null.
-    void add(hal::utils::IProtectedCallback* killable) const;
+    void add(IProtectedCallback* killable) const;
     // Precondition: `killable` must be non-null.
-    void remove(hal::utils::IProtectedCallback* killable) const;
+    void remove(IProtectedCallback* killable) const;
 
   private:
     mutable std::mutex mMutex;
-    mutable std::vector<hal::utils::IProtectedCallback*> mObjects GUARDED_BY(mMutex);
+    mutable std::vector<IProtectedCallback*> mObjects GUARDED_BY(mMutex);
 };
 
 class DeathHandler final {
@@ -62,7 +81,7 @@
     using Cleanup = std::function<void()>;
     // Precondition: `killable` must be non-null.
     [[nodiscard]] ::android::base::ScopeGuard<Cleanup> protectCallback(
-            hal::utils::IProtectedCallback* killable) const;
+            IProtectedCallback* killable) const;
 
     std::shared_ptr<DeathMonitor> getDeathMonitor() const { return kDeathMonitor; }
 
diff --git a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
index 1b149e4..3a45f8d 100644
--- a/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
+++ b/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/Utils.h
@@ -24,7 +24,6 @@
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
-#include <nnapi/hal/HandleError.h>
 
 namespace aidl::android::hardware::neuralnetworks::utils {
 
@@ -75,6 +74,13 @@
     for (const auto status = handleTransportError(ret); !status.ok();) \
     return NN_ERROR(status.error().code) << status.error().message << ": "
 
+#define HANDLE_STATUS_AIDL(status)                                                            \
+    if (const ::android::nn::ErrorStatus canonical = ::android::nn::convert(status).value_or( \
+                ::android::nn::ErrorStatus::GENERAL_FAILURE);                                 \
+        canonical == ::android::nn::ErrorStatus::NONE) {                                      \
+    } else                                                                                    \
+        return NN_ERROR(canonical)
+
 }  // namespace aidl::android::hardware::neuralnetworks::utils
 
 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_AIDL_UTILS_H
diff --git a/neuralnetworks/aidl/utils/src/Burst.cpp b/neuralnetworks/aidl/utils/src/Burst.cpp
index c59c10b..fb00b26 100644
--- a/neuralnetworks/aidl/utils/src/Burst.cpp
+++ b/neuralnetworks/aidl/utils/src/Burst.cpp
@@ -26,7 +26,6 @@
 #include <nnapi/Result.h>
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
-#include <nnapi/hal/HandleError.h>
 
 #include <memory>
 #include <mutex>
diff --git a/neuralnetworks/aidl/utils/src/Callbacks.cpp b/neuralnetworks/aidl/utils/src/Callbacks.cpp
index 8055665..a321477 100644
--- a/neuralnetworks/aidl/utils/src/Callbacks.cpp
+++ b/neuralnetworks/aidl/utils/src/Callbacks.cpp
@@ -38,7 +38,7 @@
 // nn::Version::ANDROID_S. On failure, this function returns with the appropriate nn::GeneralError.
 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
         ErrorStatus status, const std::shared_ptr<IPreparedModel>& preparedModel) {
-    HANDLE_HAL_STATUS(status) << "model preparation failed with " << toString(status);
+    HANDLE_STATUS_AIDL(status) << "model preparation failed with " << toString(status);
     return NN_TRY(PreparedModel::create(preparedModel));
 }
 
diff --git a/neuralnetworks/aidl/utils/src/Conversions.cpp b/neuralnetworks/aidl/utils/src/Conversions.cpp
index ddff3f2..45628c8 100644
--- a/neuralnetworks/aidl/utils/src/Conversions.cpp
+++ b/neuralnetworks/aidl/utils/src/Conversions.cpp
@@ -34,7 +34,6 @@
 #include <nnapi/Types.h>
 #include <nnapi/Validation.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
 
 #include <algorithm>
 #include <chrono>
diff --git a/neuralnetworks/aidl/utils/src/Execution.cpp b/neuralnetworks/aidl/utils/src/Execution.cpp
index 13d4f32..94edd90 100644
--- a/neuralnetworks/aidl/utils/src/Execution.cpp
+++ b/neuralnetworks/aidl/utils/src/Execution.cpp
@@ -25,7 +25,6 @@
 #include <nnapi/Result.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
 
 #include <memory>
 #include <utility>
diff --git a/neuralnetworks/aidl/utils/src/PreparedModel.cpp b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
index 0769016..f25c2c8 100644
--- a/neuralnetworks/aidl/utils/src/PreparedModel.cpp
+++ b/neuralnetworks/aidl/utils/src/PreparedModel.cpp
@@ -30,7 +30,6 @@
 #include <nnapi/TypeUtils.h>
 #include <nnapi/Types.h>
 #include <nnapi/hal/CommonUtils.h>
-#include <nnapi/hal/HandleError.h>
 
 #include <memory>
 #include <tuple>
@@ -51,7 +50,7 @@
 nn::GeneralResult<std::pair<nn::Timing, nn::Timing>> convertFencedExecutionResults(
         ErrorStatus status, const aidl_hal::Timing& timingLaunched,
         const aidl_hal::Timing& timingFenced) {
-    HANDLE_HAL_STATUS(status) << "fenced execution callback info failed with " << toString(status);
+    HANDLE_STATUS_AIDL(status) << "fenced execution callback info failed with " << toString(status);
     return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced)));
 }
 
diff --git a/neuralnetworks/aidl/utils/src/ProtectCallback.cpp b/neuralnetworks/aidl/utils/src/ProtectCallback.cpp
index 124641c..54a673c 100644
--- a/neuralnetworks/aidl/utils/src/ProtectCallback.cpp
+++ b/neuralnetworks/aidl/utils/src/ProtectCallback.cpp
@@ -22,7 +22,6 @@
 #include <android/binder_auto_utils.h>
 #include <android/binder_interface_utils.h>
 #include <nnapi/Result.h>
-#include <nnapi/hal/ProtectCallback.h>
 
 #include <algorithm>
 #include <functional>
@@ -37,7 +36,7 @@
 void DeathMonitor::serviceDied() {
     std::lock_guard guard(mMutex);
     std::for_each(mObjects.begin(), mObjects.end(),
-                  [](hal::utils::IProtectedCallback* killable) { killable->notifyAsDeadObject(); });
+                  [](IProtectedCallback* killable) { killable->notifyAsDeadObject(); });
 }
 
 void DeathMonitor::serviceDied(void* cookie) {
@@ -45,13 +44,13 @@
     deathMonitor->serviceDied();
 }
 
-void DeathMonitor::add(hal::utils::IProtectedCallback* killable) const {
+void DeathMonitor::add(IProtectedCallback* killable) const {
     CHECK(killable != nullptr);
     std::lock_guard guard(mMutex);
     mObjects.push_back(killable);
 }
 
-void DeathMonitor::remove(hal::utils::IProtectedCallback* killable) const {
+void DeathMonitor::remove(IProtectedCallback* killable) const {
     CHECK(killable != nullptr);
     std::lock_guard guard(mMutex);
     const auto removedIter = std::remove(mObjects.begin(), mObjects.end(), killable);
@@ -102,7 +101,7 @@
 }
 
 [[nodiscard]] ::android::base::ScopeGuard<DeathHandler::Cleanup> DeathHandler::protectCallback(
-        hal::utils::IProtectedCallback* killable) const {
+        IProtectedCallback* killable) const {
     CHECK(killable != nullptr);
     kDeathMonitor->add(killable);
     return ::android::base::make_scope_guard(
diff --git a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
index 698c054..3f3e225 100644
--- a/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
+++ b/neuralnetworks/aidl/vts/functional/ValidateModel.cpp
@@ -1315,8 +1315,8 @@
 
 void validateModel(const std::shared_ptr<IDevice>& device, const Model& model) {
     const auto numberOfConsumers =
-            nn::countNumberOfConsumers(model.main.operands.size(),
-                                       nn::unvalidatedConvert(model.main.operations).value())
+            countNumberOfConsumers(model.main.operands.size(),
+                                   nn::unvalidatedConvert(model.main.operations).value())
                     .value();
     mutateExecutionOrderTest(device, model, numberOfConsumers);
     mutateOperandTypeTest(device, model);
diff --git a/neuralnetworks/utils/adapter/src/PreparedModel.cpp b/neuralnetworks/utils/adapter/src/PreparedModel.cpp
index 7397def..40c0888 100644
--- a/neuralnetworks/utils/adapter/src/PreparedModel.cpp
+++ b/neuralnetworks/utils/adapter/src/PreparedModel.cpp
@@ -36,7 +36,6 @@
 #include <nnapi/hal/1.2/Utils.h>
 #include <nnapi/hal/1.3/Conversions.h>
 #include <nnapi/hal/1.3/Utils.h>
-#include <nnapi/hal/HandleError.h>
 #include <sys/types.h>
 
 #include <memory>
diff --git a/neuralnetworks/utils/common/Android.bp b/neuralnetworks/utils/common/Android.bp
index f88e407..e02a202 100644
--- a/neuralnetworks/utils/common/Android.bp
+++ b/neuralnetworks/utils/common/Android.bp
@@ -30,20 +30,8 @@
     local_include_dirs: ["include/nnapi/hal"],
     export_include_dirs: ["include"],
     cflags: ["-Wthread-safety"],
-    static_libs: [
-        "libarect",
-        "neuralnetworks_types",
-    ],
-    shared_libs: [
-        "android.hardware.neuralnetworks-V2-ndk",
-        "libhidlbase",
-        "libbinder_ndk",
-    ],
-    target: {
-        android: {
-            shared_libs: ["libnativewindow"],
-        },
-    },
+    static_libs: ["neuralnetworks_types"],
+    shared_libs: ["libbinder_ndk"],
 }
 
 cc_test {
@@ -51,7 +39,6 @@
     host_supported: true,
     srcs: ["test/*.cpp"],
     static_libs: [
-        "android.hardware.neuralnetworks@1.0",
         "libgmock",
         "libneuralnetworks_common",
         "neuralnetworks_types",
diff --git a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
index 702ee92..ae0d092 100644
--- a/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
+++ b/neuralnetworks/utils/common/include/nnapi/hal/CommonUtils.h
@@ -17,8 +17,6 @@
 #ifndef ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
 #define ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
 
-#include <cutils/native_handle.h>
-#include <hidl/HidlSupport.h>
 #include <nnapi/Result.h>
 #include <nnapi/SharedMemory.h>
 #include <nnapi/Types.h>
@@ -125,18 +123,6 @@
         const nn::Request* request, uint32_t alignment, uint32_t padding,
         std::optional<nn::Request>* maybeRequestInSharedOut, RequestRelocation* relocationOut);
 
-nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
-        size_t numberOfOperands, const std::vector<nn::Operation>& operations);
-
-nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory);
-nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory);
-
-nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::Handle& handle);
-nn::GeneralResult<nn::Handle> sharedHandleFromNativeHandle(const native_handle_t* handle);
-
-nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
-        const std::vector<nn::SyncFence>& fences);
-
 }  // namespace android::hardware::neuralnetworks::utils
 
 #endif  // ANDROID_HARDWARE_INTERFACES_NEURALNETWORKS_UTILS_COMMON_COMMON_UTILS_H
diff --git a/neuralnetworks/utils/common/src/CommonUtils.cpp b/neuralnetworks/utils/common/src/CommonUtils.cpp
index 235ba29..b249881 100644
--- a/neuralnetworks/utils/common/src/CommonUtils.cpp
+++ b/neuralnetworks/utils/common/src/CommonUtils.cpp
@@ -16,11 +16,7 @@
 
 #include "CommonUtils.h"
 
-#include "HandleError.h"
-
 #include <android-base/logging.h>
-#include <android-base/unique_fd.h>
-#include <hidl/HidlSupport.h>
 #include <nnapi/Result.h>
 #include <nnapi/SharedMemory.h>
 #include <nnapi/TypeUtils.h>
@@ -34,11 +30,6 @@
 #include <variant>
 #include <vector>
 
-#ifdef __ANDROID__
-#include <android/hardware_buffer.h>
-#include <vndk/hardware_buffer.h>
-#endif  // __ANDROID__
-
 namespace android::hardware::neuralnetworks::utils {
 namespace {
 
@@ -92,97 +83,6 @@
                   });
 }
 
-nn::GeneralResult<hidl_handle> createNativeHandleFrom(std::vector<base::unique_fd> fds,
-                                                      const std::vector<int32_t>& ints) {
-    constexpr size_t kIntMax = std::numeric_limits<int>::max();
-    CHECK_LE(fds.size(), kIntMax);
-    CHECK_LE(ints.size(), kIntMax);
-    native_handle_t* nativeHandle =
-            native_handle_create(static_cast<int>(fds.size()), static_cast<int>(ints.size()));
-    if (nativeHandle == nullptr) {
-        return NN_ERROR() << "Failed to create native_handle";
-    }
-
-    for (size_t i = 0; i < fds.size(); ++i) {
-        nativeHandle->data[i] = fds[i].release();
-    }
-    std::copy(ints.begin(), ints.end(), nativeHandle->data + nativeHandle->numFds);
-
-    hidl_handle handle;
-    handle.setTo(nativeHandle, /*shouldOwn=*/true);
-    return handle;
-}
-
-nn::GeneralResult<hidl_handle> createNativeHandleFrom(base::unique_fd fd,
-                                                      const std::vector<int32_t>& ints) {
-    std::vector<base::unique_fd> fds;
-    fds.push_back(std::move(fd));
-    return createNativeHandleFrom(std::move(fds), ints);
-}
-
-nn::GeneralResult<hidl_handle> createNativeHandleFrom(const nn::Memory::Unknown::Handle& handle) {
-    std::vector<base::unique_fd> fds = NN_TRY(nn::dupFds(handle.fds.begin(), handle.fds.end()));
-    return createNativeHandleFrom(std::move(fds), handle.ints);
-}
-
-nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Ashmem& memory) {
-    auto fd = NN_TRY(nn::dupFd(memory.fd));
-    auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), {}));
-    return hidl_memory("ashmem", std::move(handle), memory.size);
-}
-
-nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Fd& memory) {
-    auto fd = NN_TRY(nn::dupFd(memory.fd));
-
-    const auto [lowOffsetBits, highOffsetBits] = nn::getIntsFromOffset(memory.offset);
-    const std::vector<int> ints = {memory.prot, lowOffsetBits, highOffsetBits};
-
-    auto handle = NN_TRY(createNativeHandleFrom(std::move(fd), ints));
-    return hidl_memory("mmap_fd", std::move(handle), memory.size);
-}
-
-nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::HardwareBuffer& memory) {
-#ifdef __ANDROID__
-    const auto* ahwb = memory.handle.get();
-    AHardwareBuffer_Desc bufferDesc;
-    AHardwareBuffer_describe(ahwb, &bufferDesc);
-
-    const bool isBlob = bufferDesc.format == AHARDWAREBUFFER_FORMAT_BLOB;
-    const size_t size = isBlob ? bufferDesc.width : 0;
-    const char* const name = isBlob ? "hardware_buffer_blob" : "hardware_buffer";
-
-    const native_handle_t* nativeHandle = AHardwareBuffer_getNativeHandle(ahwb);
-    const hidl_handle hidlHandle(nativeHandle);
-    hidl_handle copiedHandle(hidlHandle);
-
-    return hidl_memory(name, std::move(copiedHandle), size);
-#else   // __ANDROID__
-    LOG(FATAL) << "nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const "
-                  "nn::Memory::HardwareBuffer& memory): Not Available on Host Build";
-    (void)memory;
-    return (NN_ERROR() << "createHidlMemoryFrom failed").operator nn::GeneralResult<hidl_memory>();
-#endif  // __ANDROID__
-}
-
-nn::GeneralResult<hidl_memory> createHidlMemoryFrom(const nn::Memory::Unknown& memory) {
-    return hidl_memory(memory.name, NN_TRY(createNativeHandleFrom(memory.handle)), memory.size);
-}
-
-nn::GeneralResult<nn::Memory::Unknown::Handle> unknownHandleFromNativeHandle(
-        const native_handle_t* handle) {
-    if (handle == nullptr) {
-        return NN_ERROR() << "unknownHandleFromNativeHandle failed because handle is nullptr";
-    }
-
-    std::vector<base::unique_fd> fds =
-            NN_TRY(nn::dupFds(handle->data + 0, handle->data + handle->numFds));
-
-    std::vector<int> ints(handle->data + handle->numFds,
-                          handle->data + handle->numFds + handle->numInts);
-
-    return nn::Memory::Unknown::Handle{.fds = std::move(fds), .ints = std::move(ints)};
-}
-
 }  // anonymous namespace
 
 nn::Capabilities::OperandPerformanceTable makeQuantized8PerformanceConsistentWithP(
@@ -331,147 +231,4 @@
     return **maybeRequestInSharedOut;
 }
 
-nn::GeneralResult<std::vector<uint32_t>> countNumberOfConsumers(
-        size_t numberOfOperands, const std::vector<nn::Operation>& operations) {
-    return nn::countNumberOfConsumers(numberOfOperands, operations);
-}
-
-nn::GeneralResult<hidl_memory> createHidlMemoryFromSharedMemory(const nn::SharedMemory& memory) {
-    if (memory == nullptr) {
-        return NN_ERROR() << "Memory must be non-empty";
-    }
-    return std::visit([](const auto& x) { return createHidlMemoryFrom(x); }, memory->handle);
-}
-
-#ifdef __ANDROID__
-static uint32_t roundUpToMultiple(uint32_t value, uint32_t multiple) {
-    return (value + multiple - 1) / multiple * multiple;
-}
-#endif  // __ANDROID__
-
-nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const hidl_memory& memory) {
-    CHECK_LE(memory.size(), std::numeric_limits<size_t>::max());
-    if (!memory.valid()) {
-        return NN_ERROR() << "Unable to convert invalid hidl_memory";
-    }
-
-    if (memory.name() == "ashmem") {
-        if (memory.handle()->numFds != 1) {
-            return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
-                              << memory.handle()->numFds << " numFds, but expected 1";
-        }
-        if (memory.handle()->numInts != 0) {
-            return NN_ERROR() << "Unable to convert invalid ashmem memory object with "
-                              << memory.handle()->numInts << " numInts, but expected 0";
-        }
-        auto handle = nn::Memory::Ashmem{
-                .fd = NN_TRY(nn::dupFd(memory.handle()->data[0])),
-                .size = static_cast<size_t>(memory.size()),
-        };
-        return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
-    }
-
-    if (memory.name() == "mmap_fd") {
-        if (memory.handle()->numFds != 1) {
-            return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
-                              << memory.handle()->numFds << " numFds, but expected 1";
-        }
-        if (memory.handle()->numInts != 3) {
-            return NN_ERROR() << "Unable to convert invalid mmap_fd memory object with "
-                              << memory.handle()->numInts << " numInts, but expected 3";
-        }
-
-        const int fd = memory.handle()->data[0];
-        const int prot = memory.handle()->data[1];
-        const int lower = memory.handle()->data[2];
-        const int higher = memory.handle()->data[3];
-        const size_t offset = nn::getOffsetFromInts(lower, higher);
-
-        return nn::createSharedMemoryFromFd(static_cast<size_t>(memory.size()), prot, fd, offset);
-    }
-
-    if (memory.name() != "hardware_buffer_blob") {
-        auto handle = nn::Memory::Unknown{
-                .handle = NN_TRY(unknownHandleFromNativeHandle(memory.handle())),
-                .size = static_cast<size_t>(memory.size()),
-                .name = memory.name(),
-        };
-        return std::make_shared<const nn::Memory>(nn::Memory{.handle = std::move(handle)});
-    }
-
-#ifdef __ANDROID__
-    const auto size = memory.size();
-    const auto format = AHARDWAREBUFFER_FORMAT_BLOB;
-    const auto usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN;
-    const uint32_t width = size;
-    const uint32_t height = 1;  // height is always 1 for BLOB mode AHardwareBuffer.
-    const uint32_t layers = 1;  // layers is always 1 for BLOB mode AHardwareBuffer.
-
-    // AHardwareBuffer_createFromHandle() might fail because an allocator
-    // expects a specific stride value. In that case, we try to guess it by
-    // aligning the width to small powers of 2.
-    // TODO(b/174120849): Avoid stride assumptions.
-    AHardwareBuffer* hardwareBuffer = nullptr;
-    status_t status = UNKNOWN_ERROR;
-    for (uint32_t alignment : {1, 4, 32, 64, 128, 2, 8, 16}) {
-        const uint32_t stride = roundUpToMultiple(width, alignment);
-        AHardwareBuffer_Desc desc{
-                .width = width,
-                .height = height,
-                .layers = layers,
-                .format = format,
-                .usage = usage,
-                .stride = stride,
-        };
-        status = AHardwareBuffer_createFromHandle(&desc, memory.handle(),
-                                                  AHARDWAREBUFFER_CREATE_FROM_HANDLE_METHOD_CLONE,
-                                                  &hardwareBuffer);
-        if (status == NO_ERROR) {
-            break;
-        }
-    }
-    if (status != NO_ERROR) {
-        return NN_ERROR(nn::ErrorStatus::GENERAL_FAILURE)
-               << "Can't create AHardwareBuffer from handle. Error: " << status;
-    }
-
-    return nn::createSharedMemoryFromAHWB(hardwareBuffer, /*takeOwnership=*/true);
-#else   // __ANDROID__
-    LOG(FATAL) << "nn::GeneralResult<nn::SharedMemory> createSharedMemoryFromHidlMemory(const "
-                  "hidl_memory& memory): Not Available on Host Build";
-    return (NN_ERROR() << "createSharedMemoryFromHidlMemory failed")
-            .
-            operator nn::GeneralResult<nn::SharedMemory>();
-#endif  // __ANDROID__
-}
-
-nn::GeneralResult<hidl_handle> hidlHandleFromSharedHandle(const nn::Handle& handle) {
-    base::unique_fd fd = NN_TRY(nn::dupFd(handle.get()));
-    return createNativeHandleFrom(std::move(fd), {});
-}
-
-nn::GeneralResult<nn::Handle> sharedHandleFromNativeHandle(const native_handle_t* handle) {
-    if (handle == nullptr) {
-        return NN_ERROR() << "sharedHandleFromNativeHandle failed because handle is nullptr";
-    }
-    if (handle->numFds != 1 || handle->numInts != 0) {
-        return NN_ERROR() << "sharedHandleFromNativeHandle failed because handle does not only "
-                             "hold a single fd";
-    }
-    return nn::dupFd(handle->data[0]);
-}
-
-nn::GeneralResult<hidl_vec<hidl_handle>> convertSyncFences(
-        const std::vector<nn::SyncFence>& syncFences) {
-    hidl_vec<hidl_handle> handles(syncFences.size());
-    for (size_t i = 0; i < syncFences.size(); ++i) {
-        const auto& handle = syncFences[i].getSharedHandle();
-        if (handle == nullptr) {
-            return NN_ERROR() << "convertSyncFences failed because sync fence is empty";
-        }
-        handles[i] = NN_TRY(hidlHandleFromSharedHandle(*handle));
-    }
-    return handles;
-}
-
 }  // namespace android::hardware::neuralnetworks::utils