blob: 9d4977eaac4255ba973fd614f04d3549678991ca [file] [log] [blame]
/*
* Copyright © 2025 Google, Inc.
* SPDX-License-Identifier: MIT
*/
#include "vk_magma_syncobj.h"
#include "util/detect_os.h"
#include "util/magma/magma_wait.h"
#include "vk_device.h"
#include "vk_log.h"
#include "vk_util.h"
#include "util/log.h"
static const char* kLogTag = "magma_syncobj";
static struct vk_magma_syncobj *
to_magma_syncobj(struct vk_sync *sync)
{
assert(vk_sync_type_is_magma_syncobj(sync->type));
return container_of(sync, struct vk_magma_syncobj, base);
}
static VkResult
vk_magma_syncobj_init(struct vk_device *device,
struct vk_sync *sync,
uint64_t initial_value)
{
struct vk_magma_syncobj *sobj = to_magma_syncobj(sync);
assert((sync->flags & VK_SYNC_IS_TIMELINE) == 0);
magma_status_t status = MAGMA_STATUS_OK;
if (initial_value && initial_value != 1) {
/* Special case: initial_value is a handle, this optimizes import
* (see vk_common_ImportSemaphoreZirconHandleFUCHSIA)
*/
assert((initial_value >> 32) == 0);
magma_handle_t handle = (magma_handle_t) initial_value;
status = magma_connection_import_semaphore2(
device->magma_connection.connection, handle, /*flags=*/0,
&sobj->semaphore, &sobj->id);
} else {
status = magma_connection_create_semaphore(
device->magma_connection.connection, &sobj->semaphore, &sobj->id);
}
if (status == MAGMA_STATUS_CONNECTION_LOST) {
return vk_errorf(device, VK_ERROR_DEVICE_LOST,
"Failed to import or create semaphore: %m");
} else if (status != MAGMA_STATUS_OK) {
return vk_errorf(device, VK_ERROR_OUT_OF_HOST_MEMORY,
"Failed to import or create semaphore: %m");
}
if (initial_value == 1) {
magma_semaphore_signal(sobj->semaphore);
}
return VK_SUCCESS;
}
void
vk_magma_syncobj_finish(struct vk_device *device,
struct vk_sync *sync)
{
struct vk_magma_syncobj *sobj = to_magma_syncobj(sync);
magma_connection_release_semaphore(device->magma_connection.connection,
sobj->semaphore);
}
static VkResult
vk_magma_syncobj_signal(struct vk_device *device,
struct vk_sync *sync,
uint64_t value)
{
struct vk_magma_syncobj *sobj = to_magma_syncobj(sync);
magma_semaphore_signal(sobj->semaphore);
return VK_SUCCESS;
}
static VkResult
vk_magma_syncobj_reset(struct vk_device *device,
struct vk_sync *sync)
{
struct vk_magma_syncobj *sobj = to_magma_syncobj(sync);
magma_semaphore_reset(sobj->semaphore);
return VK_SUCCESS;
}
static VkResult
vk_magma_syncobj_wait_many(struct vk_device *device,
uint32_t wait_count,
const struct vk_sync_wait *waits,
enum vk_sync_wait_flags wait_flags,
uint64_t abs_timeout_ns)
{
assert((wait_flags & VK_SYNC_WAIT_PENDING) == 0);
/* Syncobj timeouts are signed */
abs_timeout_ns = MIN2(abs_timeout_ns, (uint64_t)INT64_MAX);
STACK_ARRAY(magma_semaphore_t, semaphores, wait_count);
for (uint32_t i = 0; i < wait_count; i++) {
semaphores[i] = to_magma_syncobj(waits[i].sync)->semaphore;
}
magma_status_t status = MAGMA_STATUS_OK;
if (wait_count) {
const bool kWaitAll = (wait_flags & VK_SYNC_WAIT_ANY) == 0;
status = magma_wait(device->magma_connection.notification_channel,
semaphores, wait_count, abs_timeout_ns,
kWaitAll, device->magma_connection.notification_callback,
&device->magma_connection);
}
STACK_ARRAY_FINISH(semaphores);
switch (status) {
case MAGMA_STATUS_OK:
return VK_SUCCESS;
case MAGMA_STATUS_TIMED_OUT:
return VK_TIMEOUT;
case MAGMA_STATUS_CONNECTION_LOST:
return vk_errorf(device, VK_ERROR_DEVICE_LOST, "magma_wait failed: %m");
default:
return vk_errorf(device, VK_ERROR_UNKNOWN, "magma_wait failed: %m");
}
}
static VkResult
vk_magma_syncobj_import_magma_handle(struct vk_device *device,
struct vk_sync *sync,
uint32_t handle)
{
struct vk_magma_syncobj *sobj = to_magma_syncobj(sync);
magma_semaphore_t semaphore = 0;
magma_semaphore_id_t id = 0;
magma_status_t status = magma_connection_import_semaphore2(
device->magma_connection.connection, handle, /*flags=*/0, &semaphore, &id);
if (status != MAGMA_STATUS_OK) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"magma_connection_import_semaphore2 failed: %m");
}
magma_connection_release_semaphore(device->magma_connection.connection,
sobj->semaphore);
sobj->semaphore = semaphore;
sobj->id = id;
return VK_SUCCESS;
}
static VkResult
vk_magma_syncobj_export_magma_handle(struct vk_device *device,
struct vk_sync *sync,
uint32_t* handle_out)
{
struct vk_magma_syncobj *sobj = to_magma_syncobj(sync);
magma_handle_t handle = 0;
magma_status_t status = magma_semaphore_export(sobj->semaphore, &handle);
if (status != MAGMA_STATUS_OK) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"magma_semaphore_export failed: %m");
}
*handle_out = handle;
return VK_SUCCESS;
}
static VkResult
vk_magma_syncobj_import_sync_file(struct vk_device *device,
struct vk_sync *sync,
int fd)
{
struct vk_magma_syncobj *sobj = to_magma_syncobj(sync);
assert(device->magma_connection.connection);
magma_semaphore_t semaphore;
magma_semaphore_id_t id;
magma_handle_t handle;
{
int new_fd = dup(fd);
if (new_fd < 0) {
mesa_log(MESA_LOG_ERROR, kLogTag, "dup(%d) failed: %d", fd, new_fd);
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
handle = new_fd;
}
magma_status_t status = magma_connection_import_semaphore2(
device->magma_connection.connection, handle, MAGMA_IMPORT_SEMAPHORE_ONE_SHOT,
&semaphore, &id);
if (status != MAGMA_STATUS_OK) {
return vk_errorf(device, VK_ERROR_UNKNOWN,
"magma_connection_import_semaphore2 failed: %m");
}
magma_connection_release_semaphore(device->magma_connection.connection,
sobj->semaphore);
sobj->semaphore = semaphore;
sobj->id = id;
return VK_SUCCESS;
}
static VkResult
vk_magma_syncobj_export_sync_file(struct vk_device *device,
struct vk_sync *sync,
int *fd)
{
struct vk_magma_syncobj *sobj = to_magma_syncobj(sync);
magma_handle_t handle;
VkResult result = vk_magma_syncobj_export_magma_handle(device, sync, &handle);
if (result != VK_SUCCESS)
return result;
*fd = handle;
/* Copy transference means after exporting the syncobj state, we need to ensure
* the exported-from syncobj is now independent of the exported state.
* Since the export of copy transference is considered like a wait operation,
* it has the side effect of resetting the semaphore state, so it's fine to just
* create a new syncobj.
* It's also important to provide independent state because vk_common_GetSemaphoreFdKHR
* may do a reset of the syncobj immediately after this codepath returns, and we
* don't want that reset to overwrite a possible signal in the exported state.
*/
vk_magma_syncobj_finish(device, sync);
result = vk_magma_syncobj_init(device, sync, /*initial_value=*/0);
return result;
}
struct vk_sync_type
vk_magma_syncobj_get_type(void)
{
struct vk_sync_type type = {
.size = sizeof(struct vk_magma_syncobj),
.features = VK_SYNC_FEATURE_BINARY |
VK_SYNC_FEATURE_GPU_WAIT |
VK_SYNC_FEATURE_GPU_MULTI_WAIT |
VK_SYNC_FEATURE_CPU_RESET |
VK_SYNC_FEATURE_CPU_SIGNAL |
VK_SYNC_FEATURE_CPU_WAIT |
VK_SYNC_FEATURE_WAIT_ANY,
.init = vk_magma_syncobj_init,
.finish = vk_magma_syncobj_finish,
.signal = vk_magma_syncobj_signal,
.reset = vk_magma_syncobj_reset,
.wait_many = vk_magma_syncobj_wait_many,
#if DETECT_OS_ANDROID
.import_sync_file = vk_magma_syncobj_import_sync_file,
.export_sync_file = vk_magma_syncobj_export_sync_file,
#elif DETECT_OS_FUCHSIA
.import_magma_handle = vk_magma_syncobj_import_magma_handle,
.export_magma_handle = vk_magma_syncobj_export_magma_handle,
#endif
};
return type;
}