[lavapipe] Support for VK_FUCHSIA_buffer_collection
Bug: 319143867
Change-Id: Ie0dc8c75349d0d6022044091b2a1b365e47808b0
Reviewed-on: https://fuchsia-review.googlesource.com/c/third_party/mesa/+/1009656
Commit-Queue: Josh Gargus <jjosh@google.com>
Reviewed-by: John Rosasco <rosasco@google.com>
diff --git a/src/gallium/frontends/lavapipe/BUILD.gn b/src/gallium/frontends/lavapipe/BUILD.gn
index 21962c1..905fc67 100644
--- a/src/gallium/frontends/lavapipe/BUILD.gn
+++ b/src/gallium/frontends/lavapipe/BUILD.gn
@@ -99,6 +99,7 @@
public_configs = [ ":lavapipe_config" ]
deps = [
+ ":fuchsia_buffer_collection_extension",
":lvp_entrypoints",
"$mesa_build_root/src:sha1",
"$mesa_build_root/src/compiler/nir",
@@ -144,3 +145,43 @@
"lvp_wsi.c",
]
}
+
+# Implements VK_FUCHSIA_buffer_collection.
+#
+# Unlike most ICDs, the Lavapipe ICD runs entirely on the CPU in the client process, without the
+# need to communicate with the privileged system driver. Therefore it would seem that Magma, whose
+# job is to provide a framework to communicate with the system driver, is unnecessary in Fuchsia.
+# This is true. However, to support VK_FUCHSIA_buffer_collection, Lavapipe must interact with
+# sysmem. Magma provides sysmem utilities which make this job much easier.
+mesa_source_set("fuchsia_buffer_collection_extension") {
+ sources = [ "lvp_fuchsia_buffer_collection.c" ]
+
+ deps = [
+ # See target-level comment for rationale of use of Magma in Lavapipe.
+ "$magma_build_root/src/libmagma:magma_sysmem",
+ "$mesa_build_root/include:vulkan",
+ "//src/graphics/magma/lib/magma/platform/zircon:logger_for_stderr",
+ ]
+
+ if (target_os == "fuchsia") {
+ deps += [
+ "$mesa_build_root/src/vulkan/runtime:zircon",
+ "$mesa_build_root/src/vulkan/wsi:stub",
+ ]
+ } else {
+ deps += [
+ "$mesa_build_root/src/vulkan/runtime",
+ "$mesa_build_root/src/vulkan/wsi",
+ ]
+ }
+
+ public_deps = [
+ # See target-level comment for rationale of use of Magma in Lavapipe.
+ "//sdk/lib/magma_client:magma_headers",
+ ]
+
+ configs = [
+ ":lavapipe_config",
+ "//third_party/mesa:LLVMHeader_config",
+ ]
+}
diff --git a/src/gallium/frontends/lavapipe/lvp_device.c b/src/gallium/frontends/lavapipe/lvp_device.c
index 9acd022..847ed8d 100644
--- a/src/gallium/frontends/lavapipe/lvp_device.c
+++ b/src/gallium/frontends/lavapipe/lvp_device.c
@@ -47,6 +47,8 @@
#if defined(VK_USE_PLATFORM_FUCHSIA)
#include <zircon/process.h>
#include <zircon/syscalls.h>
+#include "lvp_fuchsia.h"
+#include "lvp_fuchsia_buffer_collection.h"
#include "lvp_fuchsia_memory.h"
#include "vulkan/runtime/vk_zircon_syncobj.h"
#endif
@@ -201,8 +203,13 @@
.GOOGLE_decorate_string = true,
.GOOGLE_hlsl_functionality1 = true,
#if defined(PIPE_MEMORY_FUCHSIA)
+ // TODO(https://fxbug.dev/331684866): vkext test requires EXT_queue_family_foreign. It seems
+ // sufficient merely to claim support here (with no other changes). However, this may result
+ // in bugs that our tests don't catch.
+ .EXT_queue_family_foreign = true,
.FUCHSIA_external_memory = true,
.FUCHSIA_external_semaphore = true,
+ .FUCHSIA_buffer_collection = true,
#endif
};
@@ -1656,6 +1663,14 @@
device->pscreen = physical_device->pscreen;
+#ifdef VK_USE_PLATFORM_FUCHSIA
+ result = lvp_fuchsia_device_init(device);
+ if (result != VK_SUCCESS) {
+ vk_free(&device->vk.alloc, device);
+ return result;
+ }
+#endif
+
assert(pCreateInfo->queueCreateInfoCount == 1);
assert(pCreateInfo->pQueueCreateInfos[0].queueFamilyIndex == 0);
assert(pCreateInfo->pQueueCreateInfos[0].queueCount == 1);
@@ -1735,6 +1750,7 @@
ASSERTED const VkImportMemoryFdInfoKHR *import_info = NULL;
#ifdef VK_USE_PLATFORM_FUCHSIA
const VkImportMemoryZirconHandleInfoFUCHSIA *zircon_handle_info = NULL;
+ const VkImportMemoryBufferCollectionFUCHSIA* fuchsia_buffer_collection = NULL;
#endif
const VkImportMemoryHostPointerInfoEXT *host_ptr_info = NULL;
VkResult error = VK_ERROR_OUT_OF_DEVICE_MEMORY;
@@ -1770,6 +1786,10 @@
zircon_handle_info = (void *)ext;
assert(zircon_handle_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA);
break;
+
+ case VK_STRUCTURE_TYPE_IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA:
+ fuchsia_buffer_collection = (void *)ext;
+ break;
#endif
default:
@@ -1831,32 +1851,29 @@
else if (zircon_handle_info) {
assert(zircon_handle_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA);
- /* TODO(https://fxbug.dev/321122796):
- Verify whether Lavapipe needs allocations to be page-aligned. */
VkDeviceSize aligned_alloc_size =
align_u64(pAllocateInfo->allocationSize, zx_system_get_page_size());
- uint64_t vmo_size = 0;
- error = lvp_fuchsia_get_vmo_size(zircon_handle_info->handle, &vmo_size);
+ VkResult error = lvp_fuchsia_import_vmo(zircon_handle_info->handle, aligned_alloc_size, mem);
if (error != VK_SUCCESS)
goto fail;
- assert(vmo_size >= aligned_alloc_size);
+ mem->type_index = pAllocateInfo->memoryTypeIndex;
+ }
+ else if (fuchsia_buffer_collection) {
+ LVP_FROM_HANDLE(lvp_buffer_collection, buffer_collection, fuchsia_buffer_collection->collection);
- zx_vaddr_t vmo_ptr = 0;
- error = lvp_fuchsia_map_vmo(zircon_handle_info->handle, aligned_alloc_size, &vmo_ptr);
+ VkDeviceSize aligned_alloc_size =
+ align_u64(pAllocateInfo->allocationSize, zx_system_get_page_size());
+
+ VkResult error = fuchsia_allocate_memory_from_buffer_collection(
+ device, buffer_collection, fuchsia_buffer_collection->index, aligned_alloc_size, mem);
if (error != VK_SUCCESS)
goto fail;
- mem->pmem = (void*)vmo_ptr;
- mem->memory_type = LVP_DEVICE_MEMORY_TYPE_ZIRCON_VMO;
- mem->map_size = aligned_alloc_size;
- mem->vmo_handle = zircon_handle_info->handle;
mem->type_index = pAllocateInfo->memoryTypeIndex;
}
else if (export_info && export_info->handleTypes) {
- /* TODO(https://fxbug.dev/321122796):
- Verify whether Lavapipe needs allocations to be page-aligned. */
VkDeviceSize aligned_alloc_size =
align_u64(pAllocateInfo->allocationSize, zx_system_get_page_size());
diff --git a/src/gallium/frontends/lavapipe/lvp_formats.c b/src/gallium/frontends/lavapipe/lvp_formats.c
index 0a25157..8b91aa1 100644
--- a/src/gallium/frontends/lavapipe/lvp_formats.c
+++ b/src/gallium/frontends/lavapipe/lvp_formats.c
@@ -58,7 +58,7 @@
}
}
-static void
+void
lvp_physical_device_get_format_properties(struct lvp_physical_device *physical_device,
VkFormat format,
VkFormatProperties3 *out_properties)
diff --git a/src/gallium/frontends/lavapipe/lvp_fuchsia.c b/src/gallium/frontends/lavapipe/lvp_fuchsia.c
index a40dfe1..15d2481 100644
--- a/src/gallium/frontends/lavapipe/lvp_fuchsia.c
+++ b/src/gallium/frontends/lavapipe/lvp_fuchsia.c
@@ -21,11 +21,19 @@
* IN THE SOFTWARE.
*/
+#include "lvp_fuchsia.h"
+
#include "macros.h"
+
#include "os/fuchsia.h"
+
#include "vulkan/vulkan_core.h"
- /* Called by Vulkan-Loader. */
+VkResult lvp_fuchsia_device_init(struct lvp_device *device) {
+ return lvp_fuchsia_get_magma_sysmem_connection(&device->sysmem_connection);
+}
+
+/* Called by Vulkan-Loader. */
typedef VkResult(VKAPI_PTR* PFN_vkOpenInNamespaceAddr)(const char* pName, uint32_t handle);
diff --git a/src/gallium/frontends/lavapipe/lvp_fuchsia.h b/src/gallium/frontends/lavapipe/lvp_fuchsia.h
new file mode 100644
index 0000000..7d17ed3
--- /dev/null
+++ b/src/gallium/frontends/lavapipe/lvp_fuchsia.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright © 2024 Google, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include "lvp_private.h"
+
+VkResult lvp_fuchsia_device_init(struct lvp_device *device);
+
+VkResult lvp_fuchsia_get_magma_sysmem_connection(uint64_t* connection_out);
diff --git a/src/gallium/frontends/lavapipe/lvp_fuchsia_buffer_collection.c b/src/gallium/frontends/lavapipe/lvp_fuchsia_buffer_collection.c
new file mode 100644
index 0000000..62cf8b2
--- /dev/null
+++ b/src/gallium/frontends/lavapipe/lvp_fuchsia_buffer_collection.c
@@ -0,0 +1,613 @@
+/*
+ * Copyright © 2023 Google, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "lvp_fuchsia_buffer_collection.h"
+
+#include "lvp_fuchsia.h"
+#include "lvp_fuchsia_memory.h"
+#include "lvp_private.h"
+
+#include "os/fuchsia.h"
+#include "vk_alloc.h"
+#include "vk_object.h"
+#include "vulkan/vulkan_core.h"
+
+#include <zircon/syscalls.h>
+
+#include <assert.h>
+
+#define LVP_LOG_VK_ERROR(ret) \
+ (ret != 0 ? mesa_loge("%s:%d Returning error %" PRId64, __FILE__, __LINE__, (int64_t)ret), ret : ret)
+
+VkResult lvp_fuchsia_get_magma_sysmem_connection(uint64_t* connection_out)
+{
+ zx_handle_t client_handle = ZX_HANDLE_INVALID;
+ if (!fuchsia_open("/svc/fuchsia.sysmem.Allocator", &client_handle)) {
+ connection_out = 0;
+ return LVP_LOG_VK_ERROR(VK_ERROR_NOT_PERMITTED_KHR);
+ }
+
+ magma_status_t status = magma_sysmem_connection_import(client_handle, connection_out);
+ if (status != MAGMA_STATUS_OK) {
+ connection_out = 0;
+ return LVP_LOG_VK_ERROR(VK_ERROR_UNKNOWN);
+ }
+ return VK_SUCCESS;
+}
+
+VkResult lvp_CreateBufferCollectionFUCHSIA(
+ VkDevice _device,
+ const VkBufferCollectionCreateInfoFUCHSIA* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkBufferCollectionFUCHSIA* pCollection)
+{
+ LVP_FROM_HANDLE(lvp_device, device, _device);
+ struct lvp_buffer_collection * bufcol;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA);
+
+ magma_sysmem_connection_t sysmem_connection = device->sysmem_connection;
+ if (!sysmem_connection) {
+ return LVP_LOG_VK_ERROR(VK_ERROR_UNKNOWN);
+ }
+
+ magma_buffer_collection_t magma_buffer_collection;
+ magma_status_t status = magma_sysmem_connection_import_buffer_collection(
+ sysmem_connection, pCreateInfo->collectionToken, &magma_buffer_collection);
+ if (status != MAGMA_STATUS_OK)
+ return LVP_LOG_VK_ERROR(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+
+ struct lvp_buffer_collection* buffer_collection =
+ vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*buffer_collection), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+
+ vk_object_base_init(&device->vk, &buffer_collection->base, VK_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA);
+
+ buffer_collection->buffer_collection = magma_buffer_collection;
+ buffer_collection->constraints = 0;
+
+ *pCollection = lvp_buffer_collection_to_handle(buffer_collection);
+
+ return VK_SUCCESS;
+}
+
+void lvp_DestroyBufferCollectionFUCHSIA(
+ VkDevice vk_device,
+ VkBufferCollectionFUCHSIA vk_collection,
+ const VkAllocationCallbacks* pAllocator)
+{
+ LVP_FROM_HANDLE(lvp_device, device, vk_device);
+ LVP_FROM_HANDLE(lvp_buffer_collection, buffer_collection, vk_collection);
+
+ if (!device->sysmem_connection) {
+ mesa_loge("%s:%d lvp_DestroyBufferCollectionFUCHSIA: no sysmem connection.", __FILE__, __LINE__);
+ return;
+ }
+
+ if (buffer_collection->constraints) {
+ magma_buffer_constraints_release2(buffer_collection->constraints);
+ }
+
+ magma_buffer_collection_release2(buffer_collection->buffer_collection);
+ vk_free2(&device->vk.alloc, pAllocator, buffer_collection);
+}
+
+static VkFormat sysmem_to_vk_format(uint32_t sysmem_format)
+{
+ switch (sysmem_format) {
+ case MAGMA_FORMAT_BGRA32:
+ return VK_FORMAT_B8G8R8A8_UNORM;
+ case MAGMA_FORMAT_R8G8B8A8:
+ return VK_FORMAT_R8G8B8A8_UNORM;
+ case MAGMA_FORMAT_NV12:
+ return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
+ case MAGMA_FORMAT_I420:
+ return VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
+ case MAGMA_FORMAT_L8:
+ case MAGMA_FORMAT_R8:
+ return VK_FORMAT_R8_UNORM;
+ case MAGMA_FORMAT_R8G8:
+ return VK_FORMAT_R8G8_UNORM;
+ default:
+ return VK_FORMAT_UNDEFINED;
+ }
+}
+
+static VkFormatFeatureFlags
+get_image_format_features(struct lvp_physical_device *physical_device, VkFormat format, VkImageTiling tiling)
+{
+ VkFormatProperties3 format_props;
+ lvp_physical_device_get_format_properties(physical_device, format, &format_props);
+ if (tiling == VK_IMAGE_TILING_LINEAR) {
+ return format_props.linearTilingFeatures;
+ } else if (tiling == VK_IMAGE_TILING_OPTIMAL) {
+ return format_props.optimalTilingFeatures;
+ } else {
+ unreachable("unknown VkImageTiling");
+ return 0;
+ }
+}
+
+VkResult lvp_GetBufferCollectionPropertiesFUCHSIA(
+ VkDevice vk_device,
+ VkBufferCollectionFUCHSIA vk_collection,
+ VkBufferCollectionPropertiesFUCHSIA* pProperties)
+{
+ LVP_FROM_HANDLE(lvp_device, device, vk_device);
+ LVP_FROM_HANDLE(lvp_buffer_collection, buffer_collection, vk_collection);
+
+ if (!device->sysmem_connection)
+ return LVP_LOG_VK_ERROR(VK_ERROR_UNKNOWN);
+
+ magma_collection_info_t collection_info = {0};
+ magma_status_t status = magma_buffer_collection_get_collection_info(
+ buffer_collection->buffer_collection,
+ &collection_info);
+ if (status != MAGMA_STATUS_OK)
+ return LVP_LOG_VK_ERROR(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ uint32_t count = 0;
+ status = magma_collection_info_get_buffer_count(collection_info, &count);
+ if (status != MAGMA_STATUS_OK) {
+ magma_collection_info_release(collection_info);
+ return LVP_LOG_VK_ERROR(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ uint32_t sysmem_format = MAGMA_FORMAT_INVALID;
+ status = magma_collection_info_get_format(collection_info, &sysmem_format);
+ if (status != MAGMA_STATUS_OK) {
+ magma_collection_info_release(collection_info);
+ return LVP_LOG_VK_ERROR(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ pProperties->sysmemPixelFormat = sysmem_format;
+
+ magma_bool_t has_format_modifier = false;
+ uint64_t format_modifier = 0;
+ status = magma_collection_info_get_format_modifier(collection_info, &has_format_modifier,
+ &format_modifier);
+ if (status != MAGMA_STATUS_OK) {
+ magma_collection_info_release(collection_info);
+ return LVP_LOG_VK_ERROR(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ uint32_t color_space = MAGMA_COLORSPACE_INVALID;
+ magma_collection_info_get_color_space(collection_info, &color_space);
+ // Colorspace may be invalid for non-images, so ignore error.
+
+ pProperties->sysmemColorSpaceIndex.colorSpace = color_space;
+ pProperties->samplerYcbcrConversionComponents.r = VK_COMPONENT_SWIZZLE_IDENTITY;
+ pProperties->samplerYcbcrConversionComponents.g = VK_COMPONENT_SWIZZLE_IDENTITY;
+ pProperties->samplerYcbcrConversionComponents.b = VK_COMPONENT_SWIZZLE_IDENTITY;
+ pProperties->samplerYcbcrConversionComponents.a = VK_COMPONENT_SWIZZLE_IDENTITY;
+
+ pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
+ pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW;
+ pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_COSITED_EVEN;
+ pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
+
+ switch (color_space) {
+ case MAGMA_COLORSPACE_REC601_NTSC:
+ case MAGMA_COLORSPACE_REC601_PAL:
+ pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601;
+ break;
+
+ case MAGMA_COLORSPACE_REC601_NTSC_FULL_RANGE:
+ case MAGMA_COLORSPACE_REC601_PAL_FULL_RANGE:
+ pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601;
+ pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
+ break;
+
+ case MAGMA_COLORSPACE_REC709:
+ pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709;
+ break;
+
+ case MAGMA_COLORSPACE_REC2020:
+ pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020;
+ break;
+
+ case MAGMA_COLORSPACE_SRGB:
+ pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
+ pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
+ break;
+
+ default:
+ pProperties->suggestedYcbcrModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
+ pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
+ break;
+ }
+
+ pProperties->createInfoIndex = 0;
+
+ if (buffer_collection->constraints) {
+ magma_bool_t format_valid[MAX_BUFFER_COLLECTION_FORMAT_INDICES];
+ status = magma_collection_info_get_format_index(
+ collection_info, buffer_collection->constraints, format_valid,
+ MAX_BUFFER_COLLECTION_FORMAT_INDICES);
+ if (status != MAGMA_STATUS_OK) {
+ magma_collection_info_release(collection_info);
+ return LVP_LOG_VK_ERROR(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ // Choose the first valid format for now.
+ for (uint32_t i = 0; i < MAX_BUFFER_COLLECTION_FORMAT_INDICES; i++) {
+ if (format_valid[i]) {
+ pProperties->createInfoIndex = buffer_collection->format_index_input_index_map[i];
+ break;
+ }
+ }
+ }
+
+ if (!has_format_modifier) {
+ format_modifier = MAGMA_FORMAT_MODIFIER_LINEAR;
+ }
+
+ {
+ VkFormat format = sysmem_to_vk_format(sysmem_format);
+ const struct lvp_physical_device* physical_device = device->physical_device;
+ if (format_modifier == MAGMA_FORMAT_MODIFIER_LINEAR) {
+ pProperties->formatFeatures = get_image_format_features(physical_device,
+ format,
+ VK_IMAGE_TILING_LINEAR);
+ } else {
+ pProperties->formatFeatures = get_image_format_features(physical_device,
+ format,
+ VK_IMAGE_TILING_OPTIMAL);
+ }
+ }
+
+ magma_collection_info_release(collection_info);
+
+ pProperties->bufferCount = count;
+
+ if (pProperties->bufferCount < 1) {
+ pProperties->memoryTypeBits = 0u;
+ } else {
+ struct lvp_physical_device* pdevice = device->physical_device;
+
+ /* Lavapipe supports exactly one memory type. */
+ pProperties->memoryTypeBits = 1;
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult lvp_SetBufferCollectionBufferConstraintsFUCHSIA(
+ VkDevice vk_device,
+ VkBufferCollectionFUCHSIA vk_collection,
+ const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo)
+{
+ LVP_FROM_HANDLE(lvp_device, device, vk_device);
+ LVP_FROM_HANDLE(lvp_buffer_collection, buffer_collection, vk_collection);
+
+ magma_sysmem_connection_t sysmem_connection = device->sysmem_connection;
+ if (!sysmem_connection)
+ return LVP_LOG_VK_ERROR(VK_ERROR_UNKNOWN);
+
+ magma_buffer_format_constraints_t format_constraints = {
+ .count = pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCount,
+ .usage = 0,
+ .secure_permitted = false,
+ .secure_required = false,
+ /* TODO(https://fxbug.dev/330572161): only CPU domain is supported. In order to support
+ RAM domain, Lavapipe would need to perform cache clean and invalidate operations on
+ VK_QUEUE_FAMILY_EXTERNAL and VK_QUEUE_FAMILY_FOREIGN. */
+ .ram_domain_supported = false,
+ .cpu_domain_supported = true,
+ .min_size_bytes = pBufferConstraintsInfo->createInfo.size,
+ .options = MAGMA_BUFFER_FORMAT_CONSTRAINT_OPTIONS_EXTRA_COUNTS,
+ .min_buffer_count_for_camping =
+ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCountForCamping,
+ .min_buffer_count_for_shared_slack =
+ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCountForSharedSlack,
+ .min_buffer_count_for_dedicated_slack =
+ pBufferConstraintsInfo->bufferCollectionConstraints.minBufferCountForDedicatedSlack,
+ .max_buffer_count = pBufferConstraintsInfo->bufferCollectionConstraints.maxBufferCount};
+
+ magma_sysmem_buffer_constraints_t constraints = {0};
+ magma_status_t status = magma_sysmem_connection_create_buffer_constraints(sysmem_connection,
+ &format_constraints,
+ &constraints);
+ if (status != MAGMA_STATUS_OK)
+ return LVP_LOG_VK_ERROR(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ status =
+ magma_buffer_collection_set_constraints2(buffer_collection->buffer_collection, constraints);
+
+ magma_buffer_constraints_release2(constraints);
+
+ if (status != MAGMA_STATUS_OK)
+ return LVP_LOG_VK_ERROR(VK_ERROR_FORMAT_NOT_SUPPORTED);
+
+ return VK_SUCCESS;
+}
+
+static VkResult get_image_format_constraints(
+ VkDevice vk_device, VkFormat format, const VkImageCreateInfo* image_create_info,
+ bool aux_buffer, const VkImageFormatConstraintsInfoFUCHSIA* format_constraints,
+ magma_image_format_constraints_t* image_constraints_out)
+{
+ LVP_FROM_HANDLE(lvp_device, device, vk_device);
+
+ /* TODO(https://fxbug.dev/332422422): Revisit these values. */
+ const uint32_t kBytesPerRowDivisor = 4;
+ const uint32_t kMinBytesPerRow = 16;
+
+ assert(image_create_info->extent.width);
+ magma_image_format_constraints_t image_constraints = {.width = image_create_info->extent.width,
+ .height = image_create_info->extent.height,
+ .layers = 1,
+ .bytes_per_row_divisor = kBytesPerRowDivisor,
+ .min_bytes_per_row = kMinBytesPerRow};
+
+ bool is_yuv_format = false;
+ switch (format) {
+ case VK_FORMAT_B8G8R8A8_SINT:
+ case VK_FORMAT_B8G8R8A8_UNORM:
+ case VK_FORMAT_B8G8R8A8_SRGB:
+ case VK_FORMAT_B8G8R8A8_SNORM:
+ case VK_FORMAT_B8G8R8A8_SSCALED:
+ case VK_FORMAT_B8G8R8A8_USCALED:
+ image_constraints.image_format = MAGMA_FORMAT_BGRA32;
+ break;
+ case VK_FORMAT_R8G8B8A8_SINT:
+ case VK_FORMAT_R8G8B8A8_UNORM:
+ case VK_FORMAT_R8G8B8A8_SRGB:
+ case VK_FORMAT_R8G8B8A8_SNORM:
+ case VK_FORMAT_R8G8B8A8_SSCALED:
+ case VK_FORMAT_R8G8B8A8_USCALED:
+ image_constraints.image_format = MAGMA_FORMAT_R8G8B8A8;
+ break;
+ case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
+ image_constraints.image_format = MAGMA_FORMAT_NV12;
+ is_yuv_format = true;
+ break;
+ case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+ image_constraints.image_format = MAGMA_FORMAT_I420;
+ is_yuv_format = true;
+ break;
+ case VK_FORMAT_R8_UNORM:
+ image_constraints.image_format = MAGMA_FORMAT_R8;
+ if (format_constraints && format_constraints->sysmemPixelFormat) {
+ if (format_constraints->sysmemPixelFormat == MAGMA_FORMAT_L8) {
+ image_constraints.image_format = MAGMA_FORMAT_L8;
+ } else if (format_constraints->sysmemPixelFormat != MAGMA_FORMAT_R8) {
+ return VK_ERROR_FORMAT_NOT_SUPPORTED;
+ }
+ }
+ break;
+ case VK_FORMAT_R8G8_UNORM:
+ image_constraints.image_format = MAGMA_FORMAT_R8G8;
+ break;
+ case VK_FORMAT_R5G6B5_UNORM_PACK16:
+ image_constraints.image_format = MAGMA_FORMAT_RGB565;
+ break;
+ default:
+ return VK_ERROR_FORMAT_NOT_SUPPORTED;
+ }
+
+ image_constraints.has_format_modifier = false;
+ assert(!aux_buffer);
+
+ *image_constraints_out = image_constraints;
+
+ return VK_SUCCESS;
+}
+
+VkResult lvp_SetBufferCollectionImageConstraintsFUCHSIA(
+ VkDevice vk_device,
+ VkBufferCollectionFUCHSIA vk_collection,
+ const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo)
+{
+ LVP_FROM_HANDLE(lvp_device, device, vk_device);
+ LVP_FROM_HANDLE(lvp_buffer_collection, collection, vk_collection);
+
+ // Can't set constraints twice.
+ if (collection->constraints)
+ return LVP_LOG_VK_ERROR(VK_ERROR_INITIALIZATION_FAILED);
+
+ const magma_sysmem_connection_t sysmem_connection = device->sysmem_connection;
+ if (!sysmem_connection)
+ return LVP_LOG_VK_ERROR(VK_ERROR_UNKNOWN);
+
+ if (pImageConstraintsInfo->formatConstraintsCount < 1) {
+ assert(!(pImageConstraintsInfo->formatConstraintsCount < 1));
+ return LVP_LOG_VK_ERROR(VK_ERROR_FORMAT_NOT_SUPPORTED);
+ }
+
+ const bool have_format_constraints = (pImageConstraintsInfo->pFormatConstraints != NULL);
+
+ // Secure formats not supported.
+ for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount; ++i) {
+ bool secure_required =
+ (pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo.flags &
+ VK_IMAGE_CREATE_PROTECTED_BIT);
+
+ if (secure_required) {
+ assert(!secure_required);
+ return LVP_LOG_VK_ERROR(VK_ERROR_FORMAT_NOT_SUPPORTED);
+ }
+ }
+
+ magma_sysmem_buffer_constraints_t buffer_constraints;
+ magma_status_t status = MAGMA_STATUS_OK;
+
+ // Create the buffer constraints.
+ {
+ magma_buffer_format_constraints_t format_constraints = {
+ .count = pImageConstraintsInfo->bufferCollectionConstraints.minBufferCount,
+ .usage = 0,
+ .secure_permitted = false,
+ .secure_required = false,
+ /* TODO(https://fxbug.dev/330572161): only CPU domain is supported. In order to support
+ RAM domain, Lavapipe would need to perform cache clean and invalidate operations on
+ VK_QUEUE_FAMILY_EXTERNAL and VK_QUEUE_FAMILY_FOREIGN. */
+ .ram_domain_supported = false,
+ .cpu_domain_supported = true,
+ .min_size_bytes = 0,
+ .options = MAGMA_BUFFER_FORMAT_CONSTRAINT_OPTIONS_EXTRA_COUNTS,
+ .min_buffer_count_for_camping =
+ pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForCamping,
+ .min_buffer_count_for_shared_slack =
+ pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForSharedSlack,
+ .min_buffer_count_for_dedicated_slack =
+ pImageConstraintsInfo->bufferCollectionConstraints.minBufferCountForDedicatedSlack,
+ .max_buffer_count = pImageConstraintsInfo->bufferCollectionConstraints.maxBufferCount};
+
+ status = magma_sysmem_connection_create_buffer_constraints(sysmem_connection,
+ &format_constraints,
+ &buffer_constraints);
+ if (status != MAGMA_STATUS_OK)
+ return LVP_LOG_VK_ERROR(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ uint32_t format_index = 0;
+
+ // Set format slots for each image info.
+ for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount; ++i) {
+ const VkImageCreateInfo* pCreateInfo =
+ &pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
+ VkFormat format = pCreateInfo->format;
+
+ const struct lvp_physical_device* physical_device = device->physical_device;
+ const VkFormatFeatureFlags linear_flags =
+ get_image_format_features(physical_device, format, VK_IMAGE_TILING_LINEAR);
+ const VkFormatFeatureFlags optimal_flags =
+ get_image_format_features(physical_device, format, VK_IMAGE_TILING_OPTIMAL);
+
+ const VkImageFormatConstraintsInfoFUCHSIA* format_constraints =
+ have_format_constraints ? &pImageConstraintsInfo->pFormatConstraints[i] : NULL;
+
+ const uint32_t color_space_count =
+ format_constraints ? format_constraints->colorSpaceCount : 0;
+ uint32_t color_spaces[color_space_count];
+
+ for (uint32_t j = 0; j < color_space_count; ++j) {
+ color_spaces[j] = format_constraints->pColorSpaces[j].colorSpace;
+ }
+
+ magma_image_format_constraints_t image_constraints = {};
+ bool image_constraints_valid = false;
+
+ status = get_image_format_constraints(vk_device, format, pCreateInfo,
+ /*aux_buffer*/ false, format_constraints,
+ &image_constraints);
+ if (status != VK_SUCCESS)
+ continue;
+
+ image_constraints_valid =
+ !format_constraints || !(~linear_flags & format_constraints->requiredFormatFeatures);
+
+ if (!image_constraints_valid)
+ continue;
+
+ // Currently every vulkan format maps to only 1 sysmem format, so ensure the client is
+ // using the same format.
+ if (format_constraints && format_constraints->sysmemPixelFormat &&
+ (format_constraints->sysmemPixelFormat != image_constraints.image_format)) {
+ continue;
+ }
+
+ collection->format_index_input_index_map[format_index] = i;
+
+ status = magma_buffer_constraints_set_format2(buffer_constraints, format_index,
+ &image_constraints);
+ if (status != MAGMA_STATUS_OK) {
+ magma_buffer_constraints_release2(buffer_constraints);
+ return LVP_LOG_VK_ERROR(VK_ERROR_FORMAT_NOT_SUPPORTED);
+ }
+
+ if (color_space_count) {
+ magma_buffer_constraints_set_colorspaces2(buffer_constraints, format_index,
+ color_space_count, color_spaces);
+ }
+
+ format_index += 1;
+ assert(format_index < MAX_BUFFER_COLLECTION_FORMAT_INDICES);
+ if (format_index >= MAX_BUFFER_COLLECTION_FORMAT_INDICES) {
+ magma_buffer_constraints_release2(buffer_constraints);
+ return LVP_LOG_VK_ERROR(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ }
+
+ if (format_index == 0) {
+ magma_buffer_constraints_release2(buffer_constraints);
+ return LVP_LOG_VK_ERROR(VK_ERROR_FORMAT_NOT_SUPPORTED);
+ }
+
+ status = magma_buffer_collection_set_constraints2(collection->buffer_collection,
+ buffer_constraints);
+ if (status != MAGMA_STATUS_OK) {
+ magma_buffer_constraints_release2(buffer_constraints);
+ return LVP_LOG_VK_ERROR(VK_ERROR_FORMAT_NOT_SUPPORTED);
+ }
+
+ collection->constraints = buffer_constraints;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+fuchsia_allocate_memory_from_buffer_collection(
+ const struct lvp_device* device,
+ const struct lvp_buffer_collection* buffer_collection,
+ uint32_t index,
+ uint64_t alloc_size,
+ struct lvp_device_memory* memory_out)
+{
+ {
+ magma_collection_info_t collection_info;
+ magma_status_t status = magma_buffer_collection_get_collection_info(
+ buffer_collection->buffer_collection, &collection_info);
+ if (status != MAGMA_STATUS_OK)
+ return LVP_LOG_VK_ERROR(VK_ERROR_UNKNOWN);
+
+ uint32_t coherency_domain;
+ status = magma_collection_info_get_coherency_domain(collection_info, &coherency_domain);
+ magma_collection_info_release(collection_info);
+
+ if (status != MAGMA_STATUS_OK)
+ return LVP_LOG_VK_ERROR(VK_ERROR_FORMAT_NOT_SUPPORTED);
+
+ /* TODO(https://fxbug.dev/330572161): only CPU domain is supported. In order to support
+ RAM domain, Lavapipe would need to perform cache clean and invalidate operations on
+ VK_QUEUE_FAMILY_EXTERNAL and VK_QUEUE_FAMILY_FOREIGN. */
+ assert(coherency_domain == MAGMA_COHERENCY_DOMAIN_CPU);
+ }
+
+ uint32_t vmo_handle;
+ {
+ uint32_t offset;
+ if (magma_buffer_collection_get_buffer_handle(buffer_collection->buffer_collection, index,
+ &vmo_handle, &offset) != MAGMA_STATUS_OK) {
+ return LVP_LOG_VK_ERROR(VK_ERROR_UNKNOWN);
+ }
+ }
+
+ VkResult error = lvp_fuchsia_import_vmo(vmo_handle, alloc_size, memory_out);
+ if (error != VK_SUCCESS) {
+ return LVP_LOG_VK_ERROR(error);
+ }
+
+ return VK_SUCCESS;
+}
diff --git a/src/gallium/frontends/lavapipe/lvp_fuchsia_buffer_collection.h b/src/gallium/frontends/lavapipe/lvp_fuchsia_buffer_collection.h
new file mode 100644
index 0000000..93467f0
--- /dev/null
+++ b/src/gallium/frontends/lavapipe/lvp_fuchsia_buffer_collection.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2023 Google, LLC
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#pragma once
+
+#include <zircon/types.h>
+
+#include <lib/magma/magma_sysmem.h>
+
+#include "vulkan/vulkan_core.h"
+#include "vulkan/vulkan_fuchsia.h"
+
+#include "vk_object.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Provided by VK_FUCHSIA_buffer_collection. */
+VkResult lvp_CreateBufferCollectionFUCHSIA(
+ VkDevice device,
+ const VkBufferCollectionCreateInfoFUCHSIA* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkBufferCollectionFUCHSIA* pCollection);
+
+/* Provided by VK_FUCHSIA_buffer_collection. */
+void lvp_DestroyBufferCollectionFUCHSIA(
+ VkDevice device,
+ VkBufferCollectionFUCHSIA collection,
+ const VkAllocationCallbacks* pAllocator);
+
+/* Provided by VK_FUCHSIA_buffer_collection. */
+VkResult lvp_GetBufferCollectionPropertiesFUCHSIA(
+ VkDevice device,
+ VkBufferCollectionFUCHSIA collection,
+ VkBufferCollectionPropertiesFUCHSIA* pProperties);
+
+/* Provided by VK_FUCHSIA_buffer_collection. */
+VkResult lvp_SetBufferCollectionBufferConstraintsFUCHSIA(
+ VkDevice device,
+ VkBufferCollectionFUCHSIA collection,
+ const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo);
+
+/* Provided by VK_FUCHSIA_buffer_collection. */
+VkResult lvp_SetBufferCollectionImageConstraintsFUCHSIA(
+ VkDevice device,
+ VkBufferCollectionFUCHSIA collection,
+ const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo);
+
+#define MAX_BUFFER_COLLECTION_FORMAT_INDICES 128
+
+struct lvp_buffer_collection {
+ struct vk_object_base base;
+
+ magma_buffer_collection_t buffer_collection;
+ magma_sysmem_buffer_constraints_t constraints;
+ uint32_t format_index_input_index_map[MAX_BUFFER_COLLECTION_FORMAT_INDICES];
+};
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(lvp_buffer_collection, base, VkBufferCollectionFUCHSIA,
+ VK_OBJECT_TYPE_BUFFER_COLLECTION_FUCHSIA)
+
+struct lvp_device;
+struct lvp_device_memory;
+
+/* Called by lvp_AllocateMemory(). Obtains a VMO from the buffer_collection and passes it to
+ `lvp_fuchsia_import_mem()`. Constraints must have been set on the buffer collection before
+ calling this, otherwise it will hang in `WaitForBuffersAllocated()`.
+
+ NOTE: the `type_index` field is left untouched; the caller is responsible for setting it.
+*/
+VkResult
+fuchsia_allocate_memory_from_buffer_collection(
+ const struct lvp_device* device,
+ const struct lvp_buffer_collection* buffer_collection,
+ uint32_t index,
+ uint64_t alloc_size,
+ struct lvp_device_memory* memory_out);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/gallium/frontends/lavapipe/lvp_fuchsia_memory.c b/src/gallium/frontends/lavapipe/lvp_fuchsia_memory.c
index 5cfd146..51d5fb4 100644
--- a/src/gallium/frontends/lavapipe/lvp_fuchsia_memory.c
+++ b/src/gallium/frontends/lavapipe/lvp_fuchsia_memory.c
@@ -29,6 +29,8 @@
#include <assert.h>
#include <stdio.h>
+#include "lvp_private.h"
+
VkResult lvp_fuchsia_map_vmo(zx_handle_t vmo, size_t len, zx_vaddr_t* ptr)
{
zx_status_t status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
@@ -86,3 +88,32 @@
}
return VK_SUCCESS;
}
+
+VkResult lvp_fuchsia_import_vmo(
+ zx_handle_t vmo,
+ uint64_t allocation_size,
+ struct lvp_device_memory* memory_out)
+{
+ uint64_t vmo_size = 0;
+ VkResult error = lvp_fuchsia_get_vmo_size(vmo, &vmo_size);
+ if (error != VK_SUCCESS)
+ return error;
+
+ assert(vmo_size >= allocation_size);
+
+ zx_vaddr_t vmo_ptr = 0;
+ error = lvp_fuchsia_map_vmo(vmo, allocation_size, &vmo_ptr);
+ if (error != VK_SUCCESS)
+ return error;
+
+ memory_out->pmem = (void*)vmo_ptr;
+
+ /* TODO(https://fxbug.dev/321122796): do we need a different memory type to distinguish VMOs
+ obtained from a sysmem buffer collection? */
+ memory_out->memory_type = LVP_DEVICE_MEMORY_TYPE_ZIRCON_VMO;
+ memory_out->map_size = allocation_size;
+ memory_out->vmo_handle = vmo;
+ memory_out->map = (void*)vmo_ptr;
+
+ return VK_SUCCESS;
+}
diff --git a/src/gallium/frontends/lavapipe/lvp_fuchsia_memory.h b/src/gallium/frontends/lavapipe/lvp_fuchsia_memory.h
index ef31e2f..4afd96c 100644
--- a/src/gallium/frontends/lavapipe/lvp_fuchsia_memory.h
+++ b/src/gallium/frontends/lavapipe/lvp_fuchsia_memory.h
@@ -50,6 +50,19 @@
*/
VkResult lvp_fuchsia_get_vmo_size(zx_handle_t vmo, uint64_t* size_out);
+
+struct lvp_device_memory;
+
+/* Called by (or on behalf of) lvp_AllocateMemory(). Maps the provide VMO, and populates the
+ provided `memory_out` struct.
+
+ NOTE: the `type_index` field is left untouched; the caller is responsible for setting it.
+*/
+VkResult lvp_fuchsia_import_vmo(
+ zx_handle_t vmo,
+ uint64_t allocation_size,
+ struct lvp_device_memory* memory_out);
+
#ifdef __cplusplus
}
#endif
diff --git a/src/gallium/frontends/lavapipe/lvp_private.h b/src/gallium/frontends/lavapipe/lvp_private.h
index 52cdbed..f1f10b4 100644
--- a/src/gallium/frontends/lavapipe/lvp_private.h
+++ b/src/gallium/frontends/lavapipe/lvp_private.h
@@ -55,6 +55,10 @@
#include <vulkan/vulkan.h>
#include <vulkan/vk_icd.h>
+#ifdef VK_USE_PLATFORM_FUCHSIA
+#include <vulkan/vulkan_fuchsia.h>
+#endif
+
#include "lvp_entrypoints.h"
#include "vk_device.h"
#include "vk_instance.h"
@@ -184,7 +188,9 @@
struct lvp_physical_device *physical_device;
struct pipe_screen *pscreen;
#ifdef VK_USE_PLATFORM_FUCHSIA
- zx_handle_t sysmem_connection;
+ /* This is really a magma_sysmem_connection_t,
+ but we want to limit the proliferation of Magma types. */
+ uint64_t sysmem_connection;
#endif
bool poison_mem;
};
@@ -620,6 +626,11 @@
}
void
+lvp_physical_device_get_format_properties(struct lvp_physical_device *physical_device,
+ VkFormat format,
+ VkFormatProperties3 *out_properties);
+
+void
lvp_pipeline_destroy(struct lvp_device *device, struct lvp_pipeline *pipeline);
void