blob: f9393257594ef71ce498cabd78099ea9777b37c1 [file] [log] [blame]
/*
* Copyright © 2024 Google, LLC
*
* based in part on lvp_pipe_sync.c which is
* Copyright © 2022 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <zircon/errors.h>
#include <zircon/syscalls.h>
#include <zircon/time.h>
#include <zircon/types.h>
#include "lvp_private.h"
#include "util/log.h"
#include "vk_log.h"
#include "vulkan/vulkan_core.h"
#define ZX_STATUS_VK_RTN(status, device, handle, message, external) \
switch (status) { \
case ZX_OK: \
return VK_SUCCESS; \
case ZX_ERR_BAD_HANDLE: \
return vk_errorf(device, external ? VK_ERROR_INVALID_EXTERNAL_HANDLE : \
VK_ERROR_UNKNOWN, \
"%s - Bad Zircon handle 0x%0x: %m", message, handle); \
case ZX_ERR_TIMED_OUT: \
return VK_TIMEOUT; \
default: \
return vk_errorf(device, VK_ERROR_UNKNOWN, \
"%s - Unknown Zircon error: %m", message); \
}
/* No-op when status == ZX_OK. */
#define ZX_STATUS_VK_RTN_ERR(status, device, handle, message, external) \
switch (status) { \
case ZX_OK: \
break; \
case ZX_ERR_BAD_HANDLE: \
return vk_errorf(device, external ? VK_ERROR_INVALID_EXTERNAL_HANDLE : \
VK_ERROR_UNKNOWN, \
"%s - Bad Zircon handle 0x%0x: %m", message, handle); \
case ZX_ERR_TIMED_OUT: \
return VK_TIMEOUT; \
default: \
return vk_errorf(device, VK_ERROR_UNKNOWN, \
"%s - Unknown Zircon error: %m", message); \
}
static bool is_signaled(zx_handle_t handle) {
zx_status_t status = zx_object_wait_one(handle, ZX_EVENT_SIGNALED, 0, NULL);
return (status == ZX_OK);
}
static VkResult lvp_pipe_sync_fuchsia_init(UNUSED struct vk_device *vk_device,
struct vk_sync *vk_sync,
uint64_t initial_value) {
struct lvp_pipe_sync_fuchsia *sync =
vk_sync_as_lvp_pipe_sync_fuchsia(vk_sync);
mtx_init(&sync->lock, mtx_plain);
zx_handle_t handle = (zx_handle_t)initial_value;
/* |initial_value| is overloaded to convey zx_handle_t handles. */
/* If it's set and not 1, it's a zx_handle_t. */
if (initial_value && initial_value != 1) {
assert((initial_value >> 32) == 0);
/* Close existing handle if it exists. */
if (sync->signaled != ZX_HANDLE_INVALID) {
if (zx_handle_close(sync->signaled) != ZX_OK) {
return vk_errorf(vk_device, VK_ERROR_UNKNOWN,
"Init (close) bad handle 0x%x: %m", sync->signaled);
}
}
/* Duplicate and store |handle| into |sync->signaled|. */
zx_status_t status =
zx_handle_replace(handle, ZX_RIGHT_SAME_RIGHTS, &sync->signaled);
ZX_STATUS_VK_RTN_ERR(status, vk_device, handle, "Init (replace)", false /* external */);
} else {
zx_status_t status = zx_event_create(0, &sync->signaled);
if (status != ZX_OK) {
assert(status == ZX_ERR_NO_MEMORY);
return vk_errorf(vk_device, VK_ERROR_OUT_OF_HOST_MEMORY,
"Init (event create): %m");
}
if (initial_value == 1) {
zx_status_t status =
zx_object_signal(sync->signaled, 0u, ZX_EVENT_SIGNALED);
ZX_STATUS_VK_RTN(status, vk_device, sync->signaled, "Init (signal)", false /* external */);
}
}
sync->fence = NULL;
return VK_SUCCESS;
}
static void lvp_pipe_sync_fuchsia_finish(struct vk_device *vk_device,
struct vk_sync *vk_sync) {
struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
struct lvp_pipe_sync_fuchsia *sync =
vk_sync_as_lvp_pipe_sync_fuchsia(vk_sync);
mtx_lock(&sync->lock);
if (sync->fence)
device->pscreen->fence_reference(device->pscreen, &sync->fence, NULL);
if (sync->signaled == ZX_HANDLE_INVALID) {
mtx_unlock(&sync->lock);
vk_logi(VK_LOG_OBJS(device), "Finish - invalid handle\n");
return;
}
zx_status_t status = zx_handle_close(sync->signaled);
if (status == ZX_OK) {
sync->signaled = ZX_HANDLE_INVALID;
mtx_unlock(&sync->lock);
mtx_destroy(&sync->lock);
return;
}
if (status == ZX_ERR_BAD_HANDLE) {
vk_loge(VK_LOG_OBJS(device), "Finish - attempt to close bad Zircon event handle: %u.\n",
sync->signaled);
} else {
vk_loge(VK_LOG_OBJS(device), "Finish - Unknown error.\n");
}
}
void lvp_pipe_sync_fuchsia_signal_with_fence(struct lvp_device *device,
struct lvp_pipe_sync_fuchsia *sync,
struct pipe_fence_handle *fence) {
mtx_lock(&sync->lock);
device->pscreen->fence_reference(device->pscreen, &sync->fence, fence);
if (zx_object_signal(sync->signaled, 0u, ZX_EVENT_SIGNALED) != ZX_OK) {
mesa_loge("Signal handle 0x%x with fence (== NULL) failed.", sync->signaled);
}
mtx_unlock(&sync->lock);
}
static VkResult lvp_pipe_sync_fuchsia_signal(struct vk_device *vk_device,
struct vk_sync *vk_sync,
uint64_t value) {
struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
struct lvp_pipe_sync_fuchsia *sync =
vk_sync_as_lvp_pipe_sync_fuchsia(vk_sync);
mtx_lock(&sync->lock);
if (sync->fence) {
device->pscreen->fence_reference(device->pscreen, &sync->fence, NULL);
}
zx_status_t status = zx_object_signal(sync->signaled, 0u, ZX_EVENT_SIGNALED);
mtx_unlock(&sync->lock);
ZX_STATUS_VK_RTN(status, vk_device, sync->signaled, "Signal (signal)", false /* external */);
}
static VkResult lvp_pipe_sync_fuchsia_reset(struct vk_device *vk_device,
struct vk_sync *vk_sync) {
struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
struct lvp_pipe_sync_fuchsia *sync =
vk_sync_as_lvp_pipe_sync_fuchsia(vk_sync);
mtx_lock(&sync->lock);
if (sync->signaled == ZX_HANDLE_INVALID) {
mtx_unlock(&sync->lock);
return vk_errorf(
device, VK_ERROR_UNKNOWN,
"Reset - Attempt to reset invalid handle: %m");
}
if (sync->fence) {
device->pscreen->fence_reference(device->pscreen, &sync->fence, NULL);
}
zx_status_t status = zx_object_signal(sync->signaled, ZX_EVENT_SIGNALED, 0u);
mtx_unlock(&sync->lock);
ZX_STATUS_VK_RTN(status, vk_device, sync->signaled, "Reset (signal)", false /* external */);
}
#define DUAL_LOCK(a, b) mtx_lock(a); mtx_lock(b);
#define DUAL_UNLOCK(a, b) mtx_unlock(a); mtx_unlock(b);
/* This matches the lvp_pipe_sync move implementation. Unlike typical move semantics, */
/* it's expected that vk_src->signaled will be used after this move. */
static VkResult lvp_pipe_sync_fuchsia_move(struct vk_device *vk_device,
struct vk_sync *vk_dst,
struct vk_sync *vk_src) {
struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
struct lvp_pipe_sync_fuchsia *dst = vk_sync_as_lvp_pipe_sync_fuchsia(vk_dst);
struct lvp_pipe_sync_fuchsia *src = vk_sync_as_lvp_pipe_sync_fuchsia(vk_src);
DUAL_LOCK(&src->lock, &dst->lock);
if (src->signaled == ZX_HANDLE_INVALID) {
DUAL_UNLOCK(&src->lock, &dst->lock);
return vk_errorf(&device->vk, VK_ERROR_UNKNOWN,
"Move - Unable to move invalid handle: %m");
}
struct pipe_fence_handle *fence = src->fence;
src->fence = NULL;
if (dst->fence) {
device->pscreen->fence_reference(device->pscreen, &dst->fence, NULL);
}
dst->fence = fence;
zx_status_t status = ZX_ERR_BAD_STATE;
if(is_signaled(src->signaled)) {
status = zx_object_signal(dst->signaled, 0u, ZX_EVENT_SIGNALED);
} else {
status = zx_object_signal(dst->signaled, ZX_EVENT_SIGNALED, 0u);
}
if(status != ZX_OK) {
DUAL_UNLOCK(&src->lock, &dst->lock);
return vk_errorf(&device->vk, VK_ERROR_UNKNOWN,
"Move - unable to signal handle 0x%x: %m", dst->signaled);
}
status = zx_object_signal(src->signaled, ZX_EVENT_SIGNALED, 0u);
DUAL_UNLOCK(&src->lock, &dst->lock);
return VK_SUCCESS;
}
/* The submit thread is force enabled even when requesting "immediate". A client's wait may */
/* may begin before the submit thread calls signal_with_fence. */
static VkResult lvp_pipe_sync_fuchsia_wait(struct vk_device *vk_device,
struct vk_sync *vk_sync,
uint64_t wait_value,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns) {
struct lvp_device *device = container_of(vk_device, struct lvp_device, vk);
struct lvp_pipe_sync_fuchsia *sync =
vk_sync_as_lvp_pipe_sync_fuchsia(vk_sync);
assert(!(wait_flags & VK_SYNC_WAIT_ANY));
zx_status_t status = ZX_OK;
zx_time_t now_ns = zx_clock_get_monotonic();
abs_timeout_ns =
(abs_timeout_ns > ZX_TIME_INFINITE) ? ZX_TIME_INFINITE : abs_timeout_ns;
mtx_lock(&sync->lock);
while (!is_signaled(sync->signaled) && !sync->fence) {
mtx_unlock(&sync->lock);
if (now_ns >= abs_timeout_ns) return VK_TIMEOUT;
status = zx_object_wait_one(sync->signaled, ZX_EVENT_SIGNALED,
(zx_time_t)abs_timeout_ns, NULL);
ZX_STATUS_VK_RTN_ERR(status, device, sync->signaled, "Wait (wait one)", false /* external */);
mtx_lock(&sync->lock);
now_ns = zx_clock_get_monotonic();
}
if ((!sync->fence && is_signaled(sync->signaled)) ||
(wait_flags & VK_SYNC_WAIT_PENDING)) {
mtx_unlock(&sync->lock);
return VK_SUCCESS;
}
/* Grab a reference before we drop the lock */
struct pipe_fence_handle *fence = NULL;
device->pscreen->fence_reference(device->pscreen, &fence, sync->fence);
uint64_t rel_timeout_ns =
now_ns >= abs_timeout_ns ? 0 : abs_timeout_ns - now_ns;
bool complete = device->pscreen->fence_finish(device->pscreen, NULL, fence,
rel_timeout_ns);
device->pscreen->fence_reference(device->pscreen, &fence, NULL);
if (!complete) {
mtx_unlock(&sync->lock);
return VK_TIMEOUT;
}
if (sync->fence == fence) {
device->pscreen->fence_reference(device->pscreen, &sync->fence, NULL);
status = zx_object_signal(sync->signaled, 0u, ZX_EVENT_SIGNALED);
mtx_unlock(&sync->lock);
ZX_STATUS_VK_RTN_ERR(status, device, sync->signaled, "Wait (signal)", false /* external */);
}
mtx_unlock(&sync->lock);
return VK_SUCCESS;
}
static VkResult lvp_pipe_sync_fuchsia_import_zircon_handle(
struct vk_device *device, struct vk_sync *vk_sync, uint32_t handle) {
struct lvp_pipe_sync_fuchsia *sync = vk_sync_as_lvp_pipe_sync_fuchsia(vk_sync);
zx_handle_t semaphore = ZX_HANDLE_INVALID;
zx_status_t status =
zx_handle_replace(handle, ZX_RIGHT_SAME_RIGHTS, &semaphore);
ZX_STATUS_VK_RTN_ERR(status, device, handle, "Import (replace)", true /* external */);
mtx_lock(&sync->lock);
status = zx_handle_close(sync->signaled);
if (status == ZX_OK) {
sync->signaled = semaphore;
mtx_unlock(&sync->lock);
return VK_SUCCESS;
}
zx_handle_close(semaphore);
mtx_unlock(&sync->lock);
ZX_STATUS_VK_RTN(status, device, semaphore, "Import (close - signaled)", false /* external */);
}
static VkResult lvp_pipe_sync_fuchsia_export_zircon_handle(
struct vk_device *device, struct vk_sync *vk_sync, uint32_t *handle_out) {
struct lvp_pipe_sync_fuchsia *sync = vk_sync_as_lvp_pipe_sync_fuchsia(vk_sync);
mtx_lock(&sync->lock);
if (sync->signaled == ZX_HANDLE_INVALID) {
mtx_unlock(&sync->lock);
return vk_errorf(
device, VK_ERROR_UNKNOWN,
"Export - Attempt to export invalid handle: %m");
}
zx_status_t status =
zx_handle_duplicate(sync->signaled, ZX_RIGHT_SAME_RIGHTS, handle_out);
mtx_unlock(&sync->lock);
ZX_STATUS_VK_RTN(status, device, sync->signaled, "Export (duplicate)", false /* external */);
}
const struct vk_sync_type lvp_pipe_sync_fuchsia_type = {
.size = sizeof(struct lvp_pipe_sync_fuchsia),
.features = VK_SYNC_FEATURE_BINARY | VK_SYNC_FEATURE_GPU_WAIT |
VK_SYNC_FEATURE_GPU_MULTI_WAIT | VK_SYNC_FEATURE_CPU_WAIT |
VK_SYNC_FEATURE_CPU_RESET | VK_SYNC_FEATURE_CPU_SIGNAL |
VK_SYNC_FEATURE_WAIT_PENDING,
.init = lvp_pipe_sync_fuchsia_init,
.finish = lvp_pipe_sync_fuchsia_finish,
.signal = lvp_pipe_sync_fuchsia_signal,
.reset = lvp_pipe_sync_fuchsia_reset,
.move = lvp_pipe_sync_fuchsia_move,
.wait = lvp_pipe_sync_fuchsia_wait,
.import_zircon_handle = lvp_pipe_sync_fuchsia_import_zircon_handle,
.export_zircon_handle = lvp_pipe_sync_fuchsia_export_zircon_handle,
};