blob: dba43223dde5e1d8399dc595ae2737d51b109a58 [file] [log] [blame]
/*
* Copyright © 2019 Raspberry Pi Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "v3dv_private.h"
#include "util/u_pack_color.h"
#include "vk_util.h"
void
v3dv_job_add_bo(struct v3dv_job *job, struct v3dv_bo *bo)
{
if (!bo)
return;
if (job->bo_handle_mask & bo->handle_bit) {
if (_mesa_set_search(job->bos, bo))
return;
}
_mesa_set_add(job->bos, bo);
job->bo_count++;
job->bo_handle_mask |= bo->handle_bit;
}
void
v3dv_job_add_bo_unchecked(struct v3dv_job *job, struct v3dv_bo *bo)
{
assert(bo);
_mesa_set_add(job->bos, bo);
job->bo_count++;
job->bo_handle_mask |= bo->handle_bit;
}
static void
cmd_buffer_init(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_device *device)
{
/* Do not reset the base object! If we are calling this from a command
* buffer reset that would reset the loader's dispatch table for the
* command buffer, and any other relevant info from vk_object_base
*/
const uint32_t base_size = sizeof(struct vk_command_buffer);
uint8_t *cmd_buffer_driver_start = ((uint8_t *) cmd_buffer) + base_size;
memset(cmd_buffer_driver_start, 0, sizeof(*cmd_buffer) - base_size);
cmd_buffer->device = device;
list_inithead(&cmd_buffer->private_objs);
list_inithead(&cmd_buffer->jobs);
list_inithead(&cmd_buffer->list_link);
cmd_buffer->state.subpass_idx = -1;
cmd_buffer->state.meta.subpass_idx = -1;
cmd_buffer->status = V3DV_CMD_BUFFER_STATUS_INITIALIZED;
}
static VkResult
cmd_buffer_create(struct vk_command_pool *pool,
struct vk_command_buffer **cmd_buffer_out)
{
struct v3dv_device *device =
container_of(pool->base.device, struct v3dv_device, vk);
struct v3dv_cmd_buffer *cmd_buffer;
cmd_buffer = vk_zalloc(&pool->alloc,
sizeof(*cmd_buffer),
8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
/* Here we pass 0 as level because this callback hook doesn't have the level
* info, but that's fine, vk_common_AllocateCommandBuffers will fix it up
* after creation.
*/
VkResult result;
result = vk_command_buffer_init(pool, &cmd_buffer->vk,
&v3dv_cmd_buffer_ops, 0 /* level */);
if (result != VK_SUCCESS) {
vk_free(&pool->alloc, cmd_buffer);
return result;
}
cmd_buffer_init(cmd_buffer, device);
*cmd_buffer_out = &cmd_buffer->vk;
return VK_SUCCESS;
}
static void
job_destroy_gpu_cl_resources(struct v3dv_job *job)
{
assert(job->type == V3DV_JOB_TYPE_GPU_CL ||
job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
v3dv_cl_destroy(&job->bcl);
v3dv_cl_destroy(&job->rcl);
v3dv_cl_destroy(&job->indirect);
/* Since we don't ref BOs when we add them to the command buffer, don't
* unref them here either. Bo's will be freed when their corresponding API
* objects are destroyed.
*/
_mesa_set_destroy(job->bos, NULL);
v3dv_bo_free(job->device, job->tile_alloc);
v3dv_bo_free(job->device, job->tile_state);
}
static void
job_destroy_cloned_gpu_cl_resources(struct v3dv_job *job)
{
assert(job->type == V3DV_JOB_TYPE_GPU_CL);
list_for_each_entry_safe(struct v3dv_bo, bo, &job->bcl.bo_list, list_link) {
list_del(&bo->list_link);
vk_free(&job->device->vk.alloc, bo);
}
list_for_each_entry_safe(struct v3dv_bo, bo, &job->rcl.bo_list, list_link) {
list_del(&bo->list_link);
vk_free(&job->device->vk.alloc, bo);
}
list_for_each_entry_safe(struct v3dv_bo, bo, &job->indirect.bo_list, list_link) {
list_del(&bo->list_link);
vk_free(&job->device->vk.alloc, bo);
}
}
static void
job_destroy_gpu_csd_resources(struct v3dv_job *job)
{
assert(job->type == V3DV_JOB_TYPE_GPU_CSD);
assert(job->cmd_buffer);
v3dv_cl_destroy(&job->indirect);
_mesa_set_destroy(job->bos, NULL);
if (job->csd.shared_memory)
v3dv_bo_free(job->device, job->csd.shared_memory);
}
void
v3dv_job_destroy(struct v3dv_job *job)
{
assert(job);
list_del(&job->list_link);
/* Cloned jobs don't make deep copies of the original jobs, so they don't
* own any of their resources. However, they do allocate clones of BO
* structs, so make sure we free those.
*/
if (!job->is_clone) {
switch (job->type) {
case V3DV_JOB_TYPE_GPU_CL:
case V3DV_JOB_TYPE_GPU_CL_SECONDARY:
job_destroy_gpu_cl_resources(job);
break;
case V3DV_JOB_TYPE_GPU_CSD:
job_destroy_gpu_csd_resources(job);
break;
default:
break;
}
} else {
/* Cloned jobs */
if (job->type == V3DV_JOB_TYPE_GPU_CL)
job_destroy_cloned_gpu_cl_resources(job);
}
vk_free(&job->device->vk.alloc, job);
}
void
v3dv_cmd_buffer_add_private_obj(struct v3dv_cmd_buffer *cmd_buffer,
uint64_t obj,
v3dv_cmd_buffer_private_obj_destroy_cb destroy_cb)
{
struct v3dv_cmd_buffer_private_obj *pobj =
vk_alloc(&cmd_buffer->device->vk.alloc, sizeof(*pobj), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!pobj) {
v3dv_flag_oom(cmd_buffer, NULL);
return;
}
pobj->obj = obj;
pobj->destroy_cb = destroy_cb;
list_addtail(&pobj->list_link, &cmd_buffer->private_objs);
}
static void
cmd_buffer_destroy_private_obj(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_cmd_buffer_private_obj *pobj)
{
assert(pobj && pobj->obj && pobj->destroy_cb);
pobj->destroy_cb(v3dv_device_to_handle(cmd_buffer->device),
pobj->obj,
&cmd_buffer->device->vk.alloc);
list_del(&pobj->list_link);
vk_free(&cmd_buffer->device->vk.alloc, pobj);
}
static void
cmd_buffer_free_resources(struct v3dv_cmd_buffer *cmd_buffer)
{
list_for_each_entry_safe(struct v3dv_job, job,
&cmd_buffer->jobs, list_link) {
v3dv_job_destroy(job);
}
if (cmd_buffer->state.job)
v3dv_job_destroy(cmd_buffer->state.job);
if (cmd_buffer->state.attachments)
vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer->state.attachments);
if (cmd_buffer->state.query.end.alloc_count > 0)
vk_free(&cmd_buffer->device->vk.alloc, cmd_buffer->state.query.end.states);
if (cmd_buffer->push_constants_resource.bo)
v3dv_bo_free(cmd_buffer->device, cmd_buffer->push_constants_resource.bo);
list_for_each_entry_safe(struct v3dv_cmd_buffer_private_obj, pobj,
&cmd_buffer->private_objs, list_link) {
cmd_buffer_destroy_private_obj(cmd_buffer, pobj);
}
if (cmd_buffer->state.meta.attachments) {
assert(cmd_buffer->state.meta.attachment_alloc_count > 0);
vk_free(&cmd_buffer->device->vk.alloc, cmd_buffer->state.meta.attachments);
}
}
static void
cmd_buffer_destroy(struct vk_command_buffer *vk_cmd_buffer)
{
struct v3dv_cmd_buffer *cmd_buffer =
container_of(vk_cmd_buffer, struct v3dv_cmd_buffer, vk);
cmd_buffer_free_resources(cmd_buffer);
vk_command_buffer_finish(&cmd_buffer->vk);
vk_free(&cmd_buffer->vk.pool->alloc, cmd_buffer);
}
static bool
cmd_buffer_can_merge_subpass(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t subpass_idx)
{
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
assert(state->pass);
const struct v3dv_physical_device *physical_device =
cmd_buffer->device->pdevice;
if (cmd_buffer->vk.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
return false;
if (!cmd_buffer->state.job)
return false;
if (cmd_buffer->state.job->always_flush)
return false;
if (!physical_device->options.merge_jobs)
return false;
/* Each render pass starts a new job */
if (subpass_idx == 0)
return false;
/* Two subpasses can be merged in the same job if we can emit a single RCL
* for them (since the RCL includes the END_OF_RENDERING command that
* triggers the "render job finished" interrupt). We can do this so long
* as both subpasses render against the same attachments.
*/
assert(state->subpass_idx == subpass_idx - 1);
struct v3dv_subpass *prev_subpass = &state->pass->subpasses[state->subpass_idx];
struct v3dv_subpass *subpass = &state->pass->subpasses[subpass_idx];
if (subpass->ds_attachment.attachment !=
prev_subpass->ds_attachment.attachment)
return false;
if (subpass->color_count != prev_subpass->color_count)
return false;
for (uint32_t i = 0; i < subpass->color_count; i++) {
if (subpass->color_attachments[i].attachment !=
prev_subpass->color_attachments[i].attachment) {
return false;
}
}
/* Don't merge if the subpasses have different view masks, since in that
* case the framebuffer setup is different and we need to emit different
* RCLs.
*/
if (subpass->view_mask != prev_subpass->view_mask)
return false;
/* FIXME: Since some attachment formats can't be resolved using the TLB we
* need to emit separate resolve jobs for them and that would not be
* compatible with subpass merges. We could fix that by testing if any of
* the attachments to resolve doesn't support TLB resolves.
*/
if (prev_subpass->resolve_attachments || subpass->resolve_attachments ||
prev_subpass->resolve_depth || prev_subpass->resolve_stencil ||
subpass->resolve_depth || subpass->resolve_stencil) {
return false;
}
return true;
}
/**
* Computes and sets the job frame tiling information required to setup frame
* binning and rendering.
*/
static struct v3dv_frame_tiling *
job_compute_frame_tiling(struct v3dv_job *job,
uint32_t width,
uint32_t height,
uint32_t layers,
uint32_t render_target_count,
uint8_t max_internal_bpp,
bool msaa,
bool double_buffer)
{
assert(job);
struct v3dv_frame_tiling *tiling = &job->frame_tiling;
tiling->width = width;
tiling->height = height;
tiling->layers = layers;
tiling->render_target_count = render_target_count;
tiling->msaa = msaa;
tiling->internal_bpp = max_internal_bpp;
tiling->double_buffer = double_buffer;
/* Double-buffer is incompatible with MSAA */
assert(!tiling->msaa || !tiling->double_buffer);
v3d_choose_tile_size(render_target_count, max_internal_bpp,
tiling->msaa, tiling->double_buffer,
&tiling->tile_width, &tiling->tile_height);
tiling->draw_tiles_x = DIV_ROUND_UP(width, tiling->tile_width);
tiling->draw_tiles_y = DIV_ROUND_UP(height, tiling->tile_height);
/* Size up our supertiles until we get under the limit */
const uint32_t max_supertiles = 256;
tiling->supertile_width = 1;
tiling->supertile_height = 1;
for (;;) {
tiling->frame_width_in_supertiles =
DIV_ROUND_UP(tiling->draw_tiles_x, tiling->supertile_width);
tiling->frame_height_in_supertiles =
DIV_ROUND_UP(tiling->draw_tiles_y, tiling->supertile_height);
const uint32_t num_supertiles = tiling->frame_width_in_supertiles *
tiling->frame_height_in_supertiles;
if (num_supertiles < max_supertiles)
break;
if (tiling->supertile_width < tiling->supertile_height)
tiling->supertile_width++;
else
tiling->supertile_height++;
}
return tiling;
}
bool
v3dv_job_allocate_tile_state(struct v3dv_job *job)
{
struct v3dv_frame_tiling *tiling = &job->frame_tiling;
const uint32_t layers =
job->allocate_tile_state_for_all_layers ? tiling->layers : 1;
/* The PTB will request the tile alloc initial size per tile at start
* of tile binning.
*/
uint32_t tile_alloc_size = 64 * layers *
tiling->draw_tiles_x *
tiling->draw_tiles_y;
/* The PTB allocates in aligned 4k chunks after the initial setup. */
tile_alloc_size = align(tile_alloc_size, 4096);
/* Include the first two chunk allocations that the PTB does so that
* we definitely clear the OOM condition before triggering one (the HW
* won't trigger OOM during the first allocations).
*/
tile_alloc_size += 8192;
/* For performance, allocate some extra initial memory after the PTB's
* minimal allocations, so that we hopefully don't have to block the
* GPU on the kernel handling an OOM signal.
*/
tile_alloc_size += 512 * 1024;
job->tile_alloc = v3dv_bo_alloc(job->device, tile_alloc_size,
"tile_alloc", true);
if (!job->tile_alloc) {
v3dv_flag_oom(NULL, job);
return false;
}
v3dv_job_add_bo_unchecked(job, job->tile_alloc);
const uint32_t tsda_per_tile_size = 256;
const uint32_t tile_state_size = layers *
tiling->draw_tiles_x *
tiling->draw_tiles_y *
tsda_per_tile_size;
job->tile_state = v3dv_bo_alloc(job->device, tile_state_size, "TSDA", true);
if (!job->tile_state) {
v3dv_flag_oom(NULL, job);
return false;
}
v3dv_job_add_bo_unchecked(job, job->tile_state);
return true;
}
void
v3dv_job_start_frame(struct v3dv_job *job,
uint32_t width,
uint32_t height,
uint32_t layers,
bool allocate_tile_state_for_all_layers,
bool allocate_tile_state_now,
uint32_t render_target_count,
uint8_t max_internal_bpp,
bool msaa)
{
assert(job);
/* Start by computing frame tiling spec for this job assuming that
* double-buffer mode is disabled.
*/
const struct v3dv_frame_tiling *tiling =
job_compute_frame_tiling(job, width, height, layers,
render_target_count, max_internal_bpp,
msaa, false);
v3dv_cl_ensure_space_with_branch(&job->bcl, 256);
v3dv_return_if_oom(NULL, job);
job->allocate_tile_state_for_all_layers = allocate_tile_state_for_all_layers;
/* For subpass jobs we postpone tile state allocation until we are finishing
* the job and have made a decision about double-buffer.
*/
if (allocate_tile_state_now) {
if (!v3dv_job_allocate_tile_state(job))
return;
}
v3dv_X(job->device, job_emit_binning_prolog)(job, tiling,
allocate_tile_state_for_all_layers ? tiling->layers : 1);
job->ez_state = V3D_EZ_UNDECIDED;
job->first_ez_state = V3D_EZ_UNDECIDED;
}
static bool
job_should_enable_double_buffer(struct v3dv_job *job)
{
/* Inocmpatibility with double-buffer */
if (!job->can_use_double_buffer)
return false;
/* Too much geometry processing */
if (job->double_buffer_score.geom > 2000000)
return false;
/* Too little rendering to make up for tile store latency */
if (job->double_buffer_score.render < 100000)
return false;
return true;
}
static void
cmd_buffer_end_render_pass_frame(struct v3dv_cmd_buffer *cmd_buffer)
{
struct v3dv_job *job = cmd_buffer->state.job;
assert(job);
/* For subpass jobs we always emit the RCL here */
assert(v3dv_cl_offset(&job->rcl) == 0);
/* Decide if we want to enable double-buffer for this job. If we do, then
* we need to rewrite the TILE_BINNING_MODE_CFG packet in the BCL.
*/
if (job_should_enable_double_buffer(job)) {
assert(!job->frame_tiling.double_buffer);
job_compute_frame_tiling(job,
job->frame_tiling.width,
job->frame_tiling.height,
job->frame_tiling.layers,
job->frame_tiling.render_target_count,
job->frame_tiling.internal_bpp,
job->frame_tiling.msaa,
true);
v3dv_X(job->device, job_emit_enable_double_buffer)(job);
}
/* At this point we have decided whether we want to use double-buffer or
* not and the job's frame tiling represents that decision so we can
* allocate the tile state, which we need to do before we emit the RCL.
*/
v3dv_job_allocate_tile_state(job);
v3dv_X(cmd_buffer->device, cmd_buffer_emit_render_pass_rcl)(cmd_buffer);
v3dv_X(cmd_buffer->device, job_emit_binning_flush)(job);
}
struct v3dv_job *
v3dv_cmd_buffer_create_cpu_job(struct v3dv_device *device,
enum v3dv_job_type type,
struct v3dv_cmd_buffer *cmd_buffer,
uint32_t subpass_idx)
{
struct v3dv_job *job = vk_zalloc(&device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!job) {
v3dv_flag_oom(cmd_buffer, NULL);
return NULL;
}
v3dv_job_init(job, type, device, cmd_buffer, subpass_idx);
return job;
}
static void
cmd_buffer_add_cpu_jobs_for_pending_state(struct v3dv_cmd_buffer *cmd_buffer)
{
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
if (state->query.end.used_count > 0) {
const uint32_t query_count = state->query.end.used_count;
for (uint32_t i = 0; i < query_count; i++) {
assert(i < state->query.end.used_count);
struct v3dv_job *job =
v3dv_cmd_buffer_create_cpu_job(cmd_buffer->device,
V3DV_JOB_TYPE_CPU_END_QUERY,
cmd_buffer, -1);
v3dv_return_if_oom(cmd_buffer, NULL);
job->cpu.query_end = state->query.end.states[i];
list_addtail(&job->list_link, &cmd_buffer->jobs);
}
}
}
void
v3dv_cmd_buffer_finish_job(struct v3dv_cmd_buffer *cmd_buffer)
{
struct v3dv_job *job = cmd_buffer->state.job;
if (!job)
return;
/* Always clear BCL state after a job has been finished if we don't have
* a pending graphics barrier that could consume it (BCL barriers only
* apply to graphics jobs). This can happen if the application recorded
* a barrier involving geometry stages but none of the draw calls in the
* job actually required a binning sync.
*/
if (!(cmd_buffer->state.barrier.dst_mask & V3DV_BARRIER_GRAPHICS_BIT)) {
cmd_buffer->state.barrier.bcl_buffer_access = 0;
cmd_buffer->state.barrier.bcl_image_access = 0;
}
if (cmd_buffer->state.oom) {
v3dv_job_destroy(job);
cmd_buffer->state.job = NULL;
return;
}
/* If we have created a job for a command buffer then we should have
* recorded something into it: if the job was started in a render pass, it
* should at least have the start frame commands, otherwise, it should have
* a transfer command. The only exception are secondary command buffers
* inside a render pass.
*/
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
v3dv_cl_offset(&job->bcl) > 0);
/* When we merge multiple subpasses into the same job we must only emit one
* RCL, so we do that here, when we decided that we need to finish the job.
* Any rendering that happens outside a render pass is never merged, so
* the RCL should have been emitted by the time we got here.
*/
assert(v3dv_cl_offset(&job->rcl) != 0 || cmd_buffer->state.pass);
/* If we are finishing a job inside a render pass we have two scenarios:
*
* 1. It is a regular CL, in which case we will submit the job to the GPU,
* so we may need to generate an RCL and add a binning flush.
*
* 2. It is a partial CL recorded in a secondary command buffer, in which
* case we are not submitting it directly to the GPU but rather branch to
* it from a primary command buffer. In this case we just want to end
* the BCL with a RETURN_FROM_SUB_LIST and the RCL and binning flush
* will be the primary job that branches to this CL.
*/
if (cmd_buffer->state.pass) {
if (job->type == V3DV_JOB_TYPE_GPU_CL) {
cmd_buffer_end_render_pass_frame(cmd_buffer);
} else {
assert(job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
v3dv_X(cmd_buffer->device, cmd_buffer_end_render_pass_secondary)(cmd_buffer);
}
}
list_addtail(&job->list_link, &cmd_buffer->jobs);
cmd_buffer->state.job = NULL;
/* If we have recorded any state with this last GPU job that requires to
* emit CPU jobs after the job is completed, add them now. The only
* exception is secondary command buffers inside a render pass, because in
* that case we want to defer this until we finish recording the primary
* job into which we execute the secondary.
*/
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY ||
!cmd_buffer->state.pass) {
cmd_buffer_add_cpu_jobs_for_pending_state(cmd_buffer);
}
}
bool
v3dv_job_type_is_gpu(struct v3dv_job *job)
{
switch (job->type) {
case V3DV_JOB_TYPE_GPU_CL:
case V3DV_JOB_TYPE_GPU_CL_SECONDARY:
case V3DV_JOB_TYPE_GPU_TFU:
case V3DV_JOB_TYPE_GPU_CSD:
return true;
default:
return false;
}
}
static void
cmd_buffer_serialize_job_if_needed(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_job *job)
{
assert(cmd_buffer && job);
/* Serialization only affects GPU jobs, CPU jobs are always automatically
* serialized.
*/
if (!v3dv_job_type_is_gpu(job))
return;
uint8_t barrier_mask = cmd_buffer->state.barrier.dst_mask;
if (barrier_mask == 0)
return;
uint8_t bit = 0;
uint8_t *src_mask;
if (job->type == V3DV_JOB_TYPE_GPU_CSD) {
assert(!job->is_transfer);
bit = V3DV_BARRIER_COMPUTE_BIT;
src_mask = &cmd_buffer->state.barrier.src_mask_compute;
} else if (job->is_transfer) {
assert(job->type == V3DV_JOB_TYPE_GPU_CL ||
job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY ||
job->type == V3DV_JOB_TYPE_GPU_TFU);
bit = V3DV_BARRIER_TRANSFER_BIT;
src_mask = &cmd_buffer->state.barrier.src_mask_transfer;
} else {
assert(job->type == V3DV_JOB_TYPE_GPU_CL ||
job->type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
bit = V3DV_BARRIER_GRAPHICS_BIT;
src_mask = &cmd_buffer->state.barrier.src_mask_graphics;
}
if (barrier_mask & bit) {
job->serialize = *src_mask;
*src_mask = 0;
cmd_buffer->state.barrier.dst_mask &= ~bit;
}
}
void
v3dv_job_init(struct v3dv_job *job,
enum v3dv_job_type type,
struct v3dv_device *device,
struct v3dv_cmd_buffer *cmd_buffer,
int32_t subpass_idx)
{
assert(job);
/* Make sure we haven't made this new job current before calling here */
assert(!cmd_buffer || cmd_buffer->state.job != job);
job->type = type;
job->device = device;
job->cmd_buffer = cmd_buffer;
list_inithead(&job->list_link);
if (type == V3DV_JOB_TYPE_GPU_CL ||
type == V3DV_JOB_TYPE_GPU_CL_SECONDARY ||
type == V3DV_JOB_TYPE_GPU_CSD) {
job->bos =
_mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
job->bo_count = 0;
v3dv_cl_init(job, &job->indirect);
if (V3D_DBG(ALWAYS_FLUSH))
job->always_flush = true;
}
if (type == V3DV_JOB_TYPE_GPU_CL ||
type == V3DV_JOB_TYPE_GPU_CL_SECONDARY) {
v3dv_cl_init(job, &job->bcl);
v3dv_cl_init(job, &job->rcl);
}
if (cmd_buffer) {
/* Flag all state as dirty. Generally, we need to re-emit state for each
* new job.
*
* FIXME: there may be some exceptions, in which case we could skip some
* bits.
*/
cmd_buffer->state.dirty = ~0;
cmd_buffer->state.dirty_descriptor_stages = ~0;
/* Honor inheritance of occlussion queries in secondaries if requested */
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
cmd_buffer->state.inheritance.occlusion_query_enable) {
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_OCCLUSION_QUERY;
}
/* Keep track of the first subpass that we are recording in this new job.
* We will use this when we emit the RCL to decide how to emit our loads
* and stores.
*/
if (cmd_buffer->state.pass)
job->first_subpass = subpass_idx;
job->is_transfer = cmd_buffer->state.is_transfer;
cmd_buffer_serialize_job_if_needed(cmd_buffer, job);
job->perf = cmd_buffer->state.query.active_query.perf;
}
}
struct v3dv_job *
v3dv_cmd_buffer_start_job(struct v3dv_cmd_buffer *cmd_buffer,
int32_t subpass_idx,
enum v3dv_job_type type)
{
/* Don't create a new job if we can merge the current subpass into
* the current job.
*/
if (cmd_buffer->state.pass &&
subpass_idx != -1 &&
cmd_buffer_can_merge_subpass(cmd_buffer, subpass_idx)) {
cmd_buffer->state.job->is_subpass_finish = false;
return cmd_buffer->state.job;
}
/* Ensure we are not starting a new job without finishing a previous one */
if (cmd_buffer->state.job != NULL)
v3dv_cmd_buffer_finish_job(cmd_buffer);
assert(cmd_buffer->state.job == NULL);
struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!job) {
fprintf(stderr, "Error: failed to allocate CPU memory for job\n");
v3dv_flag_oom(cmd_buffer, NULL);
return NULL;
}
v3dv_job_init(job, type, cmd_buffer->device, cmd_buffer, subpass_idx);
cmd_buffer->state.job = job;
return job;
}
static void
cmd_buffer_reset(struct vk_command_buffer *vk_cmd_buffer,
VkCommandBufferResetFlags flags)
{
struct v3dv_cmd_buffer *cmd_buffer =
container_of(vk_cmd_buffer, struct v3dv_cmd_buffer, vk);
vk_command_buffer_reset(&cmd_buffer->vk);
if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_INITIALIZED) {
struct v3dv_device *device = cmd_buffer->device;
/* FIXME: For now we always free all resources as if
* VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT was set.
*/
if (cmd_buffer->status != V3DV_CMD_BUFFER_STATUS_NEW)
cmd_buffer_free_resources(cmd_buffer);
cmd_buffer_init(cmd_buffer, device);
}
assert(cmd_buffer->status == V3DV_CMD_BUFFER_STATUS_INITIALIZED);
}
static void
cmd_buffer_emit_resolve(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t dst_attachment_idx,
uint32_t src_attachment_idx,
VkImageAspectFlagBits aspect)
{
struct v3dv_image_view *src_iview =
cmd_buffer->state.attachments[src_attachment_idx].image_view;
struct v3dv_image_view *dst_iview =
cmd_buffer->state.attachments[dst_attachment_idx].image_view;
const VkRect2D *ra = &cmd_buffer->state.render_area;
VkImageResolve2 region = {
.sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2,
.srcSubresource = {
aspect,
src_iview->vk.base_mip_level,
src_iview->vk.base_array_layer,
src_iview->vk.layer_count,
},
.srcOffset = { ra->offset.x, ra->offset.y, 0 },
.dstSubresource = {
aspect,
dst_iview->vk.base_mip_level,
dst_iview->vk.base_array_layer,
dst_iview->vk.layer_count,
},
.dstOffset = { ra->offset.x, ra->offset.y, 0 },
.extent = { ra->extent.width, ra->extent.height, 1 },
};
struct v3dv_image *src_image = (struct v3dv_image *) src_iview->vk.image;
struct v3dv_image *dst_image = (struct v3dv_image *) dst_iview->vk.image;
VkResolveImageInfo2 resolve_info = {
.sType = VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2,
.srcImage = v3dv_image_to_handle(src_image),
.srcImageLayout = VK_IMAGE_LAYOUT_GENERAL,
.dstImage = v3dv_image_to_handle(dst_image),
.dstImageLayout = VK_IMAGE_LAYOUT_GENERAL,
.regionCount = 1,
.pRegions = &region,
};
VkCommandBuffer cmd_buffer_handle = v3dv_cmd_buffer_to_handle(cmd_buffer);
v3dv_CmdResolveImage2KHR(cmd_buffer_handle, &resolve_info);
}
static void
cmd_buffer_subpass_handle_pending_resolves(struct v3dv_cmd_buffer *cmd_buffer)
{
assert(cmd_buffer->state.subpass_idx < cmd_buffer->state.pass->subpass_count);
const struct v3dv_render_pass *pass = cmd_buffer->state.pass;
const struct v3dv_subpass *subpass =
&pass->subpasses[cmd_buffer->state.subpass_idx];
if (!subpass->resolve_attachments)
return;
/* At this point we have already ended the current subpass and now we are
* about to emit vkCmdResolveImage calls to get the resolves we can't handle
* handle in the subpass RCL.
*
* vkCmdResolveImage is not supposed to be called inside a render pass so
* before we call that we need to make sure our command buffer state reflects
* that we are no longer in a subpass by finishing the current job and
* resetting the framebuffer and render pass state temporarily and then
* restoring it after we are done with the resolves.
*/
if (cmd_buffer->state.job)
v3dv_cmd_buffer_finish_job(cmd_buffer);
struct v3dv_framebuffer *restore_fb = cmd_buffer->state.framebuffer;
struct v3dv_render_pass *restore_pass = cmd_buffer->state.pass;
uint32_t restore_subpass_idx = cmd_buffer->state.subpass_idx;
cmd_buffer->state.framebuffer = NULL;
cmd_buffer->state.pass = NULL;
cmd_buffer->state.subpass_idx = -1;
for (uint32_t i = 0; i < subpass->color_count; i++) {
const uint32_t src_attachment_idx =
subpass->color_attachments[i].attachment;
if (src_attachment_idx == VK_ATTACHMENT_UNUSED)
continue;
/* Skip if this attachment doesn't have a resolve or if it was already
* implemented as a TLB resolve.
*/
if (!cmd_buffer->state.attachments[src_attachment_idx].has_resolve ||
cmd_buffer->state.attachments[src_attachment_idx].use_tlb_resolve) {
continue;
}
const uint32_t dst_attachment_idx =
subpass->resolve_attachments[i].attachment;
assert(dst_attachment_idx != VK_ATTACHMENT_UNUSED);
cmd_buffer_emit_resolve(cmd_buffer, dst_attachment_idx, src_attachment_idx,
VK_IMAGE_ASPECT_COLOR_BIT);
}
const uint32_t ds_src_attachment_idx =
subpass->ds_attachment.attachment;
if (ds_src_attachment_idx != VK_ATTACHMENT_UNUSED &&
cmd_buffer->state.attachments[ds_src_attachment_idx].has_resolve &&
!cmd_buffer->state.attachments[ds_src_attachment_idx].use_tlb_resolve) {
assert(subpass->resolve_depth || subpass->resolve_stencil);
const VkImageAspectFlags ds_aspects =
(subpass->resolve_depth ? VK_IMAGE_ASPECT_DEPTH_BIT : 0) |
(subpass->resolve_stencil ? VK_IMAGE_ASPECT_STENCIL_BIT : 0);
const uint32_t ds_dst_attachment_idx =
subpass->ds_resolve_attachment.attachment;
assert(ds_dst_attachment_idx != VK_ATTACHMENT_UNUSED);
cmd_buffer_emit_resolve(cmd_buffer, ds_dst_attachment_idx,
ds_src_attachment_idx, ds_aspects);
}
cmd_buffer->state.framebuffer = restore_fb;
cmd_buffer->state.pass = restore_pass;
cmd_buffer->state.subpass_idx = restore_subpass_idx;
}
static VkResult
cmd_buffer_begin_render_pass_secondary(
struct v3dv_cmd_buffer *cmd_buffer,
const VkCommandBufferInheritanceInfo *inheritance_info)
{
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
assert(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT);
assert(inheritance_info);
cmd_buffer->state.pass =
v3dv_render_pass_from_handle(inheritance_info->renderPass);
assert(cmd_buffer->state.pass);
cmd_buffer->state.framebuffer =
v3dv_framebuffer_from_handle(inheritance_info->framebuffer);
assert(inheritance_info->subpass < cmd_buffer->state.pass->subpass_count);
cmd_buffer->state.subpass_idx = inheritance_info->subpass;
cmd_buffer->state.inheritance.occlusion_query_enable =
inheritance_info->occlusionQueryEnable;
/* Secondaries that execute inside a render pass won't start subpasses
* so we want to create a job for them here.
*/
struct v3dv_job *job =
v3dv_cmd_buffer_start_job(cmd_buffer, inheritance_info->subpass,
V3DV_JOB_TYPE_GPU_CL_SECONDARY);
if (!job) {
v3dv_flag_oom(cmd_buffer, NULL);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
/* Secondary command buffers don't know about the render area, but our
* scissor setup accounts for it, so let's make sure we make it large
* enough that it doesn't actually constrain any rendering. This should
* be fine, since the Vulkan spec states:
*
* "The application must ensure (using scissor if necessary) that all
* rendering is contained within the render area."
*/
const struct v3dv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
cmd_buffer->state.render_area.offset.x = 0;
cmd_buffer->state.render_area.offset.y = 0;
cmd_buffer->state.render_area.extent.width =
framebuffer ? framebuffer->width : V3D_MAX_IMAGE_DIMENSION;
cmd_buffer->state.render_area.extent.height =
framebuffer ? framebuffer->height : V3D_MAX_IMAGE_DIMENSION;
/* We only really execute double-buffer mode in primary jobs, so allow this
* mode in render pass secondaries to keep track of the double-buffer mode
* score in them and update the primaries accordingly when they are executed
* into them.
*/
job->can_use_double_buffer = true;
return VK_SUCCESS;
}
const struct vk_command_buffer_ops v3dv_cmd_buffer_ops = {
.create = cmd_buffer_create,
.reset = cmd_buffer_reset,
.destroy = cmd_buffer_destroy,
};
VKAPI_ATTR VkResult VKAPI_CALL
v3dv_BeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo)
{
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
/* If this is the first vkBeginCommandBuffer, we must initialize the
* command buffer's state. Otherwise, we must reset its state. In both
* cases we reset it.
*/
cmd_buffer_reset(&cmd_buffer->vk, 0);
assert(cmd_buffer->status == V3DV_CMD_BUFFER_STATUS_INITIALIZED);
cmd_buffer->usage_flags = pBeginInfo->flags;
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
VkResult result =
cmd_buffer_begin_render_pass_secondary(cmd_buffer,
pBeginInfo->pInheritanceInfo);
if (result != VK_SUCCESS)
return result;
}
}
cmd_buffer->status = V3DV_CMD_BUFFER_STATUS_RECORDING;
return VK_SUCCESS;
}
static void
cmd_buffer_update_tile_alignment(struct v3dv_cmd_buffer *cmd_buffer)
{
/* Render areas and scissor/viewport are only relevant inside render passes,
* otherwise we are dealing with transfer operations where these elements
* don't apply.
*/
assert(cmd_buffer->state.pass);
const VkRect2D *rect = &cmd_buffer->state.render_area;
/* We should only call this at the beginning of a subpass so we should
* always have framebuffer information available.
*/
assert(cmd_buffer->state.framebuffer);
cmd_buffer->state.tile_aligned_render_area =
v3dv_subpass_area_is_tile_aligned(cmd_buffer->device, rect,
cmd_buffer->state.framebuffer,
cmd_buffer->state.pass,
cmd_buffer->state.subpass_idx);
if (!cmd_buffer->state.tile_aligned_render_area) {
perf_debug("Render area for subpass %d of render pass %p doesn't "
"match render pass granularity.\n",
cmd_buffer->state.subpass_idx, cmd_buffer->state.pass);
}
}
static void
cmd_buffer_update_attachment_resolve_state(struct v3dv_cmd_buffer *cmd_buffer)
{
/* NOTE: This should be called after cmd_buffer_update_tile_alignment()
* since it relies on up-to-date information about subpass tile alignment.
*/
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
const struct v3dv_render_pass *pass = state->pass;
const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
for (uint32_t i = 0; i < subpass->color_count; i++) {
const uint32_t attachment_idx = subpass->color_attachments[i].attachment;
if (attachment_idx == VK_ATTACHMENT_UNUSED)
continue;
state->attachments[attachment_idx].has_resolve =
subpass->resolve_attachments &&
subpass->resolve_attachments[i].attachment != VK_ATTACHMENT_UNUSED;
state->attachments[attachment_idx].use_tlb_resolve =
state->attachments[attachment_idx].has_resolve &&
state->tile_aligned_render_area &&
pass->attachments[attachment_idx].try_tlb_resolve;
}
uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
if (ds_attachment_idx != VK_ATTACHMENT_UNUSED) {
uint32_t ds_resolve_attachment_idx =
subpass->ds_resolve_attachment.attachment;
state->attachments[ds_attachment_idx].has_resolve =
ds_resolve_attachment_idx != VK_ATTACHMENT_UNUSED;
assert(!state->attachments[ds_attachment_idx].has_resolve ||
(subpass->resolve_depth || subpass->resolve_stencil));
state->attachments[ds_attachment_idx].use_tlb_resolve =
state->attachments[ds_attachment_idx].has_resolve &&
state->tile_aligned_render_area &&
pass->attachments[ds_attachment_idx].try_tlb_resolve;
}
}
static void
cmd_buffer_state_set_attachment_clear_color(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t attachment_idx,
const VkClearColorValue *color)
{
assert(attachment_idx < cmd_buffer->state.pass->attachment_count);
const struct v3dv_render_pass_attachment *attachment =
&cmd_buffer->state.pass->attachments[attachment_idx];
uint32_t internal_type, internal_bpp;
const struct v3dv_format *format =
v3dv_X(cmd_buffer->device, get_format)(attachment->desc.format);
v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_output_format)
(format->rt_type, &internal_type, &internal_bpp);
uint32_t internal_size = 4 << internal_bpp;
struct v3dv_cmd_buffer_attachment_state *attachment_state =
&cmd_buffer->state.attachments[attachment_idx];
v3dv_X(cmd_buffer->device, get_hw_clear_color)
(color, internal_type, internal_size, &attachment_state->clear_value.color[0]);
attachment_state->vk_clear_value.color = *color;
}
static void
cmd_buffer_state_set_attachment_clear_depth_stencil(
struct v3dv_cmd_buffer *cmd_buffer,
uint32_t attachment_idx,
bool clear_depth, bool clear_stencil,
const VkClearDepthStencilValue *ds)
{
struct v3dv_cmd_buffer_attachment_state *attachment_state =
&cmd_buffer->state.attachments[attachment_idx];
if (clear_depth)
attachment_state->clear_value.z = ds->depth;
if (clear_stencil)
attachment_state->clear_value.s = ds->stencil;
attachment_state->vk_clear_value.depthStencil = *ds;
}
static void
cmd_buffer_state_set_clear_values(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t count, const VkClearValue *values)
{
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
const struct v3dv_render_pass *pass = state->pass;
/* There could be less clear values than attachments in the render pass, in
* which case we only want to process as many as we have, or there could be
* more, in which case we want to ignore those for which we don't have a
* corresponding attachment.
*/
count = MIN2(count, pass->attachment_count);
for (uint32_t i = 0; i < count; i++) {
const struct v3dv_render_pass_attachment *attachment =
&pass->attachments[i];
if (attachment->desc.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
continue;
VkImageAspectFlags aspects = vk_format_aspects(attachment->desc.format);
if (aspects & VK_IMAGE_ASPECT_COLOR_BIT) {
cmd_buffer_state_set_attachment_clear_color(cmd_buffer, i,
&values[i].color);
} else if (aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT)) {
cmd_buffer_state_set_attachment_clear_depth_stencil(
cmd_buffer, i,
aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
&values[i].depthStencil);
}
}
}
static void
cmd_buffer_state_set_attachments(struct v3dv_cmd_buffer *cmd_buffer,
const VkRenderPassBeginInfo *pRenderPassBegin)
{
V3DV_FROM_HANDLE(v3dv_render_pass, pass, pRenderPassBegin->renderPass);
V3DV_FROM_HANDLE(v3dv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
const VkRenderPassAttachmentBeginInfo *attach_begin =
vk_find_struct_const(pRenderPassBegin, RENDER_PASS_ATTACHMENT_BEGIN_INFO);
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
for (uint32_t i = 0; i < pass->attachment_count; i++) {
if (attach_begin && attach_begin->attachmentCount != 0) {
state->attachments[i].image_view =
v3dv_image_view_from_handle(attach_begin->pAttachments[i]);
} else if (framebuffer) {
state->attachments[i].image_view = framebuffer->attachments[i];
} else {
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
state->attachments[i].image_view = NULL;
}
}
}
static void
cmd_buffer_init_render_pass_attachment_state(struct v3dv_cmd_buffer *cmd_buffer,
const VkRenderPassBeginInfo *pRenderPassBegin)
{
cmd_buffer_state_set_clear_values(cmd_buffer,
pRenderPassBegin->clearValueCount,
pRenderPassBegin->pClearValues);
cmd_buffer_state_set_attachments(cmd_buffer, pRenderPassBegin);
}
static void
cmd_buffer_ensure_render_pass_attachment_state(struct v3dv_cmd_buffer *cmd_buffer)
{
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
const struct v3dv_render_pass *pass = state->pass;
if (state->attachment_alloc_count < pass->attachment_count) {
if (state->attachments > 0) {
assert(state->attachment_alloc_count > 0);
vk_free(&cmd_buffer->device->vk.alloc, state->attachments);
}
uint32_t size = sizeof(struct v3dv_cmd_buffer_attachment_state) *
pass->attachment_count;
state->attachments = vk_zalloc(&cmd_buffer->device->vk.alloc, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!state->attachments) {
v3dv_flag_oom(cmd_buffer, NULL);
return;
}
state->attachment_alloc_count = pass->attachment_count;
}
assert(state->attachment_alloc_count >= pass->attachment_count);
}
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdBeginRenderPass2(VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfo *pSubpassBeginInfo)
{
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
V3DV_FROM_HANDLE(v3dv_render_pass, pass, pRenderPassBegin->renderPass);
V3DV_FROM_HANDLE(v3dv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
state->pass = pass;
state->framebuffer = framebuffer;
cmd_buffer_ensure_render_pass_attachment_state(cmd_buffer);
v3dv_return_if_oom(cmd_buffer, NULL);
cmd_buffer_init_render_pass_attachment_state(cmd_buffer, pRenderPassBegin);
state->render_area = pRenderPassBegin->renderArea;
/* If our render area is smaller than the current clip window we will have
* to emit a new clip window to constraint it to the render area.
*/
uint32_t min_render_x = state->render_area.offset.x;
uint32_t min_render_y = state->render_area.offset.y;
uint32_t max_render_x = min_render_x + state->render_area.extent.width - 1;
uint32_t max_render_y = min_render_y + state->render_area.extent.height - 1;
uint32_t min_clip_x = state->clip_window.offset.x;
uint32_t min_clip_y = state->clip_window.offset.y;
uint32_t max_clip_x = min_clip_x + state->clip_window.extent.width - 1;
uint32_t max_clip_y = min_clip_y + state->clip_window.extent.height - 1;
if (min_render_x > min_clip_x || min_render_y > min_clip_y ||
max_render_x < max_clip_x || max_render_y < max_clip_y) {
state->dirty |= V3DV_CMD_DIRTY_SCISSOR;
}
/* Setup for first subpass */
v3dv_cmd_buffer_subpass_start(cmd_buffer, 0);
}
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdNextSubpass2(VkCommandBuffer commandBuffer,
const VkSubpassBeginInfo *pSubpassBeginInfo,
const VkSubpassEndInfo *pSubpassEndInfo)
{
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
assert(state->subpass_idx < state->pass->subpass_count - 1);
/* Finish the previous subpass */
v3dv_cmd_buffer_subpass_finish(cmd_buffer);
cmd_buffer_subpass_handle_pending_resolves(cmd_buffer);
/* Start the next subpass */
v3dv_cmd_buffer_subpass_start(cmd_buffer, state->subpass_idx + 1);
}
static void
cmd_buffer_emit_subpass_clears(struct v3dv_cmd_buffer *cmd_buffer)
{
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
assert(cmd_buffer->state.pass);
assert(cmd_buffer->state.subpass_idx < cmd_buffer->state.pass->subpass_count);
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
const struct v3dv_render_pass *pass = state->pass;
const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
/* We only need to emit subpass clears as draw calls when the render
* area is not aligned to tile boundaries or for GFXH-1461.
*/
if (cmd_buffer->state.tile_aligned_render_area &&
!subpass->do_depth_clear_with_draw &&
!subpass->do_depth_clear_with_draw) {
return;
}
uint32_t att_count = 0;
VkClearAttachment atts[V3D_MAX_DRAW_BUFFERS + 1]; /* 4 color + D/S */
/* We only need to emit subpass clears as draw calls for color attachments
* if the render area is not aligned to tile boundaries.
*/
if (!cmd_buffer->state.tile_aligned_render_area) {
for (uint32_t i = 0; i < subpass->color_count; i++) {
const uint32_t att_idx = subpass->color_attachments[i].attachment;
if (att_idx == VK_ATTACHMENT_UNUSED)
continue;
struct v3dv_render_pass_attachment *att = &pass->attachments[att_idx];
if (att->desc.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR)
continue;
if (state->subpass_idx != att->first_subpass)
continue;
atts[att_count].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
atts[att_count].colorAttachment = i;
atts[att_count].clearValue = state->attachments[att_idx].vk_clear_value;
att_count++;
}
}
/* For D/S we may also need to emit a subpass clear for GFXH-1461 */
const uint32_t ds_att_idx = subpass->ds_attachment.attachment;
if (ds_att_idx != VK_ATTACHMENT_UNUSED) {
struct v3dv_render_pass_attachment *att = &pass->attachments[ds_att_idx];
if (state->subpass_idx == att->first_subpass) {
VkImageAspectFlags aspects = vk_format_aspects(att->desc.format);
if (att->desc.loadOp != VK_ATTACHMENT_LOAD_OP_CLEAR ||
(cmd_buffer->state.tile_aligned_render_area &&
!subpass->do_depth_clear_with_draw)) {
aspects &= ~VK_IMAGE_ASPECT_DEPTH_BIT;
}
if (att->desc.stencilLoadOp != VK_ATTACHMENT_LOAD_OP_CLEAR ||
(cmd_buffer->state.tile_aligned_render_area &&
!subpass->do_stencil_clear_with_draw)) {
aspects &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
}
if (aspects) {
atts[att_count].aspectMask = aspects;
atts[att_count].colorAttachment = 0; /* Ignored */
atts[att_count].clearValue =
state->attachments[ds_att_idx].vk_clear_value;
att_count++;
}
}
}
if (att_count == 0)
return;
if (!cmd_buffer->state.tile_aligned_render_area) {
perf_debug("Render area doesn't match render pass granularity, falling "
"back to vkCmdClearAttachments for "
"VK_ATTACHMENT_LOAD_OP_CLEAR.\n");
} else if (subpass->do_depth_clear_with_draw ||
subpass->do_stencil_clear_with_draw) {
perf_debug("Subpass clears DEPTH but loads STENCIL (or viceversa), "
"falling back to vkCmdClearAttachments for "
"VK_ATTACHMENT_LOAD_OP_CLEAR.\n");
}
/* From the Vulkan 1.0 spec:
*
* "VK_ATTACHMENT_LOAD_OP_CLEAR specifies that the contents within the
* render area will be cleared to a uniform value, which is specified
* when a render pass instance is begun."
*
* So the clear is only constrained by the render area and not by pipeline
* state such as scissor or viewport, these are the semantics of
* vkCmdClearAttachments as well.
*/
VkCommandBuffer _cmd_buffer = v3dv_cmd_buffer_to_handle(cmd_buffer);
VkClearRect rect = {
.rect = state->render_area,
.baseArrayLayer = 0,
.layerCount = 1,
};
v3dv_CmdClearAttachments(_cmd_buffer, att_count, atts, 1, &rect);
}
bool
v3dv_cmd_buffer_check_needs_load(const struct v3dv_cmd_buffer_state *state,
VkImageAspectFlags aspect,
uint32_t first_subpass_idx,
VkAttachmentLoadOp load_op,
uint32_t last_subpass_idx,
VkAttachmentStoreOp store_op)
{
/* We call this with image->vk.aspects & aspect, so 0 means the aspect we are
* testing does not exist in the image.
*/
if (!aspect)
return false;
/* Attachment (or view) load operations apply on the first subpass that
* uses the attachment (or view), otherwise we always need to load.
*/
if (state->job->first_subpass > first_subpass_idx)
return true;
/* If the job is continuing a subpass started in another job, we always
* need to load.
*/
if (state->job->is_subpass_continue)
return true;
/* If the area is not aligned to tile boundaries and we are going to store,
* then we need to load to preserve contents outside the render area.
*/
if (!state->tile_aligned_render_area &&
v3dv_cmd_buffer_check_needs_store(state, aspect, last_subpass_idx,
store_op)) {
return true;
}
/* The attachment load operations must be LOAD */
return load_op == VK_ATTACHMENT_LOAD_OP_LOAD;
}
bool
v3dv_cmd_buffer_check_needs_store(const struct v3dv_cmd_buffer_state *state,
VkImageAspectFlags aspect,
uint32_t last_subpass_idx,
VkAttachmentStoreOp store_op)
{
/* We call this with image->vk.aspects & aspect, so 0 means the aspect we are
* testing does not exist in the image.
*/
if (!aspect)
return false;
/* Attachment (or view) store operations only apply on the last subpass
* where the attachment (or view) is used, in other subpasses we always
* need to store.
*/
if (state->subpass_idx < last_subpass_idx)
return true;
/* Attachment store operations only apply on the last job we emit on the the
* last subpass where the attachment is used, otherwise we always need to
* store.
*/
if (!state->job->is_subpass_finish)
return true;
/* The attachment store operation must be STORE */
return store_op == VK_ATTACHMENT_STORE_OP_STORE;
}
static void
cmd_buffer_subpass_check_double_buffer_mode(struct v3dv_cmd_buffer *cmd_buffer,
bool msaa)
{
const struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
struct v3dv_job *job = cmd_buffer->state.job;
assert(job);
job->can_use_double_buffer = false;
/* Double-buffer can only be used if requested via V3D_DEBUG */
if (!V3D_DBG(DOUBLE_BUFFER))
return;
/* Double-buffer cannot be enabled for MSAA jobs */
if (msaa)
return;
const struct v3dv_render_pass *pass = state->pass;
const struct v3dv_subpass *subpass = &pass->subpasses[state->subpass_idx];
/* FIXME: For now we discard multiview jobs (which have an implicit geometry
* shader) for this optimization. If we want to enable this with multiview
* we would need to check if any view (layer) in any attachment used by the
* job has loads and/or stores as we do below for regular attachments. Also,
* we would want to have a heuristic that doesn't automatically disable
* double-buffer in the presence of geometry shaders.
*/
if (state->pass->multiview_enabled)
return;
/* Tile loads are serialized against stores, in which case we don't get
* any benefits from enabling double-buffer and would just pay the price
* of a smaller tile size instead. Similarly, we only benefit from
* double-buffer if we have tile stores, as the point of this mode is
* to execute rendering of a new tile while we store the previous one to
* hide latency on the tile store operation.
*/
bool has_stores = false;
for (uint32_t i = 0; i < subpass->color_count; i++) {
uint32_t attachment_idx = subpass->color_attachments[i].attachment;
if (attachment_idx == VK_ATTACHMENT_UNUSED)
continue;
const struct v3dv_render_pass_attachment *attachment =
&state->pass->attachments[attachment_idx];
/* FIXME: This will check 'tile_aligned_render_area' but that was
* computed with a tile size without double-buffer. That is okay
* because if the larger tile size is aligned then we know the smaller
* tile size for double-buffer will be as well. However, we might
* still benefit from doing this check with the smaller tile size
* because it can happen that the smaller size is aligned and the
* larger size is not.
*/
if (v3dv_cmd_buffer_check_needs_load(state,
VK_IMAGE_ASPECT_COLOR_BIT,
attachment->first_subpass,
attachment->desc.loadOp,
attachment->last_subpass,
attachment->desc.storeOp)) {
return;
}
if (v3dv_cmd_buffer_check_needs_store(state,
VK_IMAGE_ASPECT_COLOR_BIT,
attachment->last_subpass,
attachment->desc.storeOp)) {
has_stores = true;
}
}
if (subpass->ds_attachment.attachment != VK_ATTACHMENT_UNUSED) {
uint32_t ds_attachment_idx = subpass->ds_attachment.attachment;
const struct v3dv_render_pass_attachment *ds_attachment =
&state->pass->attachments[ds_attachment_idx];
const VkImageAspectFlags ds_aspects =
vk_format_aspects(ds_attachment->desc.format);
if (v3dv_cmd_buffer_check_needs_load(state,
ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
ds_attachment->first_subpass,
ds_attachment->desc.loadOp,
ds_attachment->last_subpass,
ds_attachment->desc.storeOp)) {
return;
}
if (v3dv_cmd_buffer_check_needs_load(state,
ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
ds_attachment->first_subpass,
ds_attachment->desc.stencilLoadOp,
ds_attachment->last_subpass,
ds_attachment->desc.stencilStoreOp)) {
return;
}
has_stores |= v3dv_cmd_buffer_check_needs_store(state,
ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT,
ds_attachment->last_subpass,
ds_attachment->desc.storeOp);
has_stores |= v3dv_cmd_buffer_check_needs_store(state,
ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT,
ds_attachment->last_subpass,
ds_attachment->desc.stencilStoreOp);
}
job->can_use_double_buffer = has_stores;
}
static struct v3dv_job *
cmd_buffer_subpass_create_job(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t subpass_idx,
enum v3dv_job_type type)
{
assert(type == V3DV_JOB_TYPE_GPU_CL ||
type == V3DV_JOB_TYPE_GPU_CL_SECONDARY);
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
assert(subpass_idx < state->pass->subpass_count);
/* Starting a new job can trigger a finish of the current one, so don't
* change the command buffer state for the new job until we are done creating
* the new job.
*/
struct v3dv_job *job =
v3dv_cmd_buffer_start_job(cmd_buffer, subpass_idx, type);
if (!job)
return NULL;
state->subpass_idx = subpass_idx;
/* If we are starting a new job we need to setup binning. We only do this
* for V3DV_JOB_TYPE_GPU_CL jobs because V3DV_JOB_TYPE_GPU_CL_SECONDARY
* jobs are not submitted to the GPU directly, and are instead meant to be
* branched to from other V3DV_JOB_TYPE_GPU_CL jobs.
*/
if (type == V3DV_JOB_TYPE_GPU_CL &&
job->first_subpass == state->subpass_idx) {
const struct v3dv_subpass *subpass =
&state->pass->subpasses[state->subpass_idx];
const struct v3dv_framebuffer *framebuffer = state->framebuffer;
uint8_t internal_bpp;
bool msaa;
v3dv_X(job->device, framebuffer_compute_internal_bpp_msaa)
(framebuffer, state->attachments, subpass, &internal_bpp, &msaa);
/* From the Vulkan spec:
*
* "If the render pass uses multiview, then layers must be one and
* each attachment requires a number of layers that is greater than
* the maximum bit index set in the view mask in the subpasses in
* which it is used."
*
* So when multiview is enabled, we take the number of layers from the
* last bit set in the view mask.
*/
uint32_t layers = framebuffer->layers;
if (subpass->view_mask != 0) {
assert(framebuffer->layers == 1);
layers = util_last_bit(subpass->view_mask);
}
v3dv_job_start_frame(job,
framebuffer->width,
framebuffer->height,
layers,
true, false,
subpass->color_count,
internal_bpp,
msaa);
}
return job;
}
struct v3dv_job *
v3dv_cmd_buffer_subpass_start(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t subpass_idx)
{
assert(cmd_buffer->state.pass);
assert(subpass_idx < cmd_buffer->state.pass->subpass_count);
struct v3dv_job *job =
cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
V3DV_JOB_TYPE_GPU_CL);
if (!job)
return NULL;
/* Check if our render area is aligned to tile boundaries. We have to do
* this in each subpass because the subset of attachments used can change
* and with that the tile size selected by the hardware can change too.
*/
cmd_buffer_update_tile_alignment(cmd_buffer);
/* Decide if we can use double-buffer for this subpass job */
cmd_buffer_subpass_check_double_buffer_mode(cmd_buffer, job->frame_tiling.msaa);
cmd_buffer_update_attachment_resolve_state(cmd_buffer);
/* If we can't use TLB clears then we need to emit draw clears for any
* LOAD_OP_CLEAR attachments in this subpass now. We might also need to emit
* Depth/Stencil clears if we hit GFXH-1461.
*
* Secondary command buffers don't start subpasses (and may not even have
* framebuffer state), so we only care about this in primaries. The only
* exception could be a secondary runnning inside a subpass that needs to
* record a meta operation (with its own render pass) that relies on
* attachment load clears, but we don't have any instances of that right
* now.
*/
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
cmd_buffer_emit_subpass_clears(cmd_buffer);
return job;
}
struct v3dv_job *
v3dv_cmd_buffer_subpass_resume(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t subpass_idx)
{
assert(cmd_buffer->state.pass);
assert(subpass_idx < cmd_buffer->state.pass->subpass_count);
struct v3dv_job *job;
if (cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
V3DV_JOB_TYPE_GPU_CL);
} else {
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
job = cmd_buffer_subpass_create_job(cmd_buffer, subpass_idx,
V3DV_JOB_TYPE_GPU_CL_SECONDARY);
}
if (!job)
return NULL;
job->is_subpass_continue = true;
return job;
}
void
v3dv_cmd_buffer_subpass_finish(struct v3dv_cmd_buffer *cmd_buffer)
{
/* We can end up here without a job if the last command recorded into the
* subpass already finished the job (for example a pipeline barrier). In
* that case we miss to set the is_subpass_finish flag, but that is not
* required for proper behavior.
*/
struct v3dv_job *job = cmd_buffer->state.job;
if (job)
job->is_subpass_finish = true;
}
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdEndRenderPass2(VkCommandBuffer commandBuffer,
const VkSubpassEndInfo *pSubpassEndInfo)
{
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
/* Finalize last subpass */
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
assert(state->subpass_idx == state->pass->subpass_count - 1);
v3dv_cmd_buffer_subpass_finish(cmd_buffer);
v3dv_cmd_buffer_finish_job(cmd_buffer);
cmd_buffer_subpass_handle_pending_resolves(cmd_buffer);
/* We are no longer inside a render pass */
state->framebuffer = NULL;
state->pass = NULL;
state->subpass_idx = -1;
}
VKAPI_ATTR VkResult VKAPI_CALL
v3dv_EndCommandBuffer(VkCommandBuffer commandBuffer)
{
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
if (cmd_buffer->state.oom)
return VK_ERROR_OUT_OF_HOST_MEMORY;
/* Primaries should have ended any recording jobs by the time they hit
* vkEndRenderPass (if we are inside a render pass). Commands outside
* a render pass instance (for both primaries and secondaries) spawn
* complete jobs too. So the only case where we can get here without
* finishing a recording job is when we are recording a secondary
* inside a render pass.
*/
if (cmd_buffer->state.job) {
assert(cmd_buffer->vk.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
cmd_buffer->state.pass);
v3dv_cmd_buffer_finish_job(cmd_buffer);
}
cmd_buffer->status = V3DV_CMD_BUFFER_STATUS_EXECUTABLE;
return VK_SUCCESS;
}
static void
clone_bo_list(struct v3dv_cmd_buffer *cmd_buffer,
struct list_head *dst,
struct list_head *src)
{
assert(cmd_buffer);
list_inithead(dst);
list_for_each_entry(struct v3dv_bo, bo, src, list_link) {
struct v3dv_bo *clone_bo =
vk_alloc(&cmd_buffer->device->vk.alloc, sizeof(struct v3dv_bo), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!clone_bo) {
v3dv_flag_oom(cmd_buffer, NULL);
return;
}
*clone_bo = *bo;
list_addtail(&clone_bo->list_link, dst);
}
}
/* Clones a job for inclusion in the given command buffer. Note that this
* doesn't make a deep copy so the cloned job it doesn't own any resources.
* Useful when we need to have a job in more than one list, which happens
* for jobs recorded in secondary command buffers when we want to execute
* them in primaries.
*/
struct v3dv_job *
v3dv_job_clone_in_cmd_buffer(struct v3dv_job *job,
struct v3dv_cmd_buffer *cmd_buffer)
{
struct v3dv_job *clone_job = vk_alloc(&job->device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!clone_job) {
v3dv_flag_oom(cmd_buffer, NULL);
return NULL;
}
/* Cloned jobs don't duplicate resources! */
*clone_job = *job;
clone_job->is_clone = true;
clone_job->cmd_buffer = cmd_buffer;
list_addtail(&clone_job->list_link, &cmd_buffer->jobs);
/* We need to regen the BO lists so that they point to the BO list in the
* cloned job. Otherwise functions like list_length() will loop forever.
*/
if (job->type == V3DV_JOB_TYPE_GPU_CL) {
clone_bo_list(cmd_buffer, &clone_job->bcl.bo_list, &job->bcl.bo_list);
clone_bo_list(cmd_buffer, &clone_job->rcl.bo_list, &job->rcl.bo_list);
clone_bo_list(cmd_buffer, &clone_job->indirect.bo_list,
&job->indirect.bo_list);
}
return clone_job;
}
void
v3dv_cmd_buffer_merge_barrier_state(struct v3dv_barrier_state *dst,
struct v3dv_barrier_state *src)
{
dst->dst_mask |= src->dst_mask;
dst->src_mask_graphics |= src->src_mask_graphics;
dst->src_mask_compute |= src->src_mask_compute;
dst->src_mask_transfer |= src->src_mask_transfer;
dst->bcl_buffer_access |= src->bcl_buffer_access;
dst->bcl_image_access |= src->bcl_image_access;
}
static void
cmd_buffer_execute_outside_pass(struct v3dv_cmd_buffer *primary,
uint32_t cmd_buffer_count,
const VkCommandBuffer *cmd_buffers)
{
struct v3dv_barrier_state pending_barrier = { 0 };
for (uint32_t i = 0; i < cmd_buffer_count; i++) {
V3DV_FROM_HANDLE(v3dv_cmd_buffer, secondary, cmd_buffers[i]);
assert(!(secondary->usage_flags &
VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
/* Secondary command buffers that execute outside a render pass create
* complete jobs with an RCL and tile setup, so we simply want to merge
* their job list into the primary's. However, because they may be
* executed into multiple primaries at the same time and we only have a
* single list_link in each job, we can't just add then to the primary's
* job list and we instead have to clone them first.
*
* Alternatively, we could create a "execute secondary" CPU job that
* when executed in a queue, would submit all the jobs in the referenced
* secondary command buffer. However, this would raise some challenges
* to make it work with the implementation of wait threads in the queue
* which we use for event waits, for example.
*/
list_for_each_entry(struct v3dv_job, secondary_job,
&secondary->jobs, list_link) {
/* These can only happen inside a render pass */
assert(secondary_job->type != V3DV_JOB_TYPE_GPU_CL_SECONDARY);
struct v3dv_job *job = v3dv_job_clone_in_cmd_buffer(secondary_job, primary);
if (!job)
return;
if (pending_barrier.dst_mask) {
/* FIXME: do the same we do for primaries and only choose the
* relevant src masks.
*/
job->serialize = pending_barrier.src_mask_graphics |
pending_barrier.src_mask_transfer |
pending_barrier.src_mask_compute;
if (pending_barrier.bcl_buffer_access ||
pending_barrier.bcl_image_access) {
job->needs_bcl_sync = true;
}
memset(&pending_barrier, 0, sizeof(pending_barrier));
}
}
/* If this secondary had any pending barrier state we will need that
* barrier state consumed with whatever comes after it (first job in
* the next secondary or the primary, if this was the last secondary).
*/
assert(secondary->state.barrier.dst_mask ||
(!secondary->state.barrier.bcl_buffer_access &&
!secondary->state.barrier.bcl_image_access));
pending_barrier = secondary->state.barrier;
}
if (pending_barrier.dst_mask) {
v3dv_cmd_buffer_merge_barrier_state(&primary->state.barrier,
&pending_barrier);
}
}
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdExecuteCommands(VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers)
{
V3DV_FROM_HANDLE(v3dv_cmd_buffer, primary, commandBuffer);
if (primary->state.pass != NULL) {
v3dv_X(primary->device, cmd_buffer_execute_inside_pass)
(primary, commandBufferCount, pCommandBuffers);
} else {
cmd_buffer_execute_outside_pass(primary,
commandBufferCount, pCommandBuffers);
}
}
/* This goes though the list of possible dynamic states in the pipeline and,
* for those that are not configured as dynamic, copies relevant state into
* the command buffer.
*/
static void
cmd_buffer_bind_pipeline_static_state(struct v3dv_cmd_buffer *cmd_buffer,
const struct v3dv_dynamic_state *src)
{
struct v3dv_dynamic_state *dest = &cmd_buffer->state.dynamic;
uint32_t dynamic_mask = src->mask;
uint32_t dirty = 0;
if (!(dynamic_mask & V3DV_DYNAMIC_VIEWPORT)) {
dest->viewport.count = src->viewport.count;
if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
src->viewport.count * sizeof(VkViewport))) {
typed_memcpy(dest->viewport.viewports,
src->viewport.viewports,
src->viewport.count);
typed_memcpy(dest->viewport.scale, src->viewport.scale,
src->viewport.count);
typed_memcpy(dest->viewport.translate, src->viewport.translate,
src->viewport.count);
dirty |= V3DV_CMD_DIRTY_VIEWPORT;
}
}
if (!(dynamic_mask & V3DV_DYNAMIC_SCISSOR)) {
dest->scissor.count = src->scissor.count;
if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
src->scissor.count * sizeof(VkRect2D))) {
typed_memcpy(dest->scissor.scissors,
src->scissor.scissors, src->scissor.count);
dirty |= V3DV_CMD_DIRTY_SCISSOR;
}
}
if (!(dynamic_mask & V3DV_DYNAMIC_STENCIL_COMPARE_MASK)) {
if (memcmp(&dest->stencil_compare_mask, &src->stencil_compare_mask,
sizeof(src->stencil_compare_mask))) {
dest->stencil_compare_mask = src->stencil_compare_mask;
dirty |= V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK;
}
}
if (!(dynamic_mask & V3DV_DYNAMIC_STENCIL_WRITE_MASK)) {
if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
sizeof(src->stencil_write_mask))) {
dest->stencil_write_mask = src->stencil_write_mask;
dirty |= V3DV_CMD_DIRTY_STENCIL_WRITE_MASK;
}
}
if (!(dynamic_mask & V3DV_DYNAMIC_STENCIL_REFERENCE)) {
if (memcmp(&dest->stencil_reference, &src->stencil_reference,
sizeof(src->stencil_reference))) {
dest->stencil_reference = src->stencil_reference;
dirty |= V3DV_CMD_DIRTY_STENCIL_REFERENCE;
}
}
if (!(dynamic_mask & V3DV_DYNAMIC_BLEND_CONSTANTS)) {
if (memcmp(dest->blend_constants, src->blend_constants,
sizeof(src->blend_constants))) {
memcpy(dest->blend_constants, src->blend_constants,
sizeof(src->blend_constants));
dirty |= V3DV_CMD_DIRTY_BLEND_CONSTANTS;
}
}
if (!(dynamic_mask & V3DV_DYNAMIC_DEPTH_BIAS)) {
if (memcmp(&dest->depth_bias, &src->depth_bias,
sizeof(src->depth_bias))) {
memcpy(&dest->depth_bias, &src->depth_bias, sizeof(src->depth_bias));
dirty |= V3DV_CMD_DIRTY_DEPTH_BIAS;
}
}
if (!(dynamic_mask & V3DV_DYNAMIC_LINE_WIDTH)) {
if (dest->line_width != src->line_width) {
dest->line_width = src->line_width;
dirty |= V3DV_CMD_DIRTY_LINE_WIDTH;
}
}
if (!(dynamic_mask & V3DV_DYNAMIC_COLOR_WRITE_ENABLE)) {
if (dest->color_write_enable != src->color_write_enable) {
dest->color_write_enable = src->color_write_enable;
dirty |= V3DV_CMD_DIRTY_COLOR_WRITE_ENABLE;
}
}
cmd_buffer->state.dynamic.mask = dynamic_mask;
cmd_buffer->state.dirty |= dirty;
}
static void
bind_graphics_pipeline(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_pipeline *pipeline)
{
assert(pipeline && !(pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT));
if (cmd_buffer->state.gfx.pipeline == pipeline)
return;
cmd_buffer->state.gfx.pipeline = pipeline;
cmd_buffer_bind_pipeline_static_state(cmd_buffer, &pipeline->dynamic_state);
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_PIPELINE;
}
static void
bind_compute_pipeline(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_pipeline *pipeline)
{
assert(pipeline && pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
if (cmd_buffer->state.compute.pipeline == pipeline)
return;
cmd_buffer->state.compute.pipeline = pipeline;
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_COMPUTE_PIPELINE;
}
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdBindPipeline(VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint,
VkPipeline _pipeline)
{
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
V3DV_FROM_HANDLE(v3dv_pipeline, pipeline, _pipeline);
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_COMPUTE:
bind_compute_pipeline(cmd_buffer, pipeline);
break;
case VK_PIPELINE_BIND_POINT_GRAPHICS:
bind_graphics_pipeline(cmd_buffer, pipeline);
break;
default:
assert(!"invalid bind point");
break;
}
}
/* FIXME: C&P from radv. tu has similar code. Perhaps common place? */
void
v3dv_viewport_compute_xform(const VkViewport *viewport,
float scale[3],
float translate[3])
{
float x = viewport->x;
float y = viewport->y;
float half_width = 0.5f * viewport->width;
float half_height = 0.5f * viewport->height;
double n = viewport->minDepth;
double f = viewport->maxDepth;
scale[0] = half_width;
translate[0] = half_width + x;
scale[1] = half_height;
translate[1] = half_height + y;
scale[2] = (f - n);
translate[2] = n;
/* It seems that if the scale is small enough the hardware won't clip
* correctly so we work around this my choosing the smallest scale that
* seems to work.
*
* This case is exercised by CTS:
* dEQP-VK.draw.inverted_depth_ranges.nodepthclamp_deltazero
*/
const float min_abs_scale = 0.000009f;
if (fabs(scale[2]) < min_abs_scale)
scale[2] = min_abs_scale * (scale[2] < 0 ? -1.0f : 1.0f);
}
/* Considers the pipeline's negative_one_to_one state and applies it to the
* current viewport transform if needed to produce the resulting Z translate
* and scale parameters.
*/
void
v3dv_cmd_buffer_state_get_viewport_z_xform(struct v3dv_cmd_buffer_state *state,
uint32_t vp_idx,
float *translate_z, float *scale_z)
{
const struct v3dv_viewport_state *vp_state = &state->dynamic.viewport;
float t = vp_state->translate[vp_idx][2];
float s = vp_state->scale[vp_idx][2];
assert(state->gfx.pipeline);
if (state->gfx.pipeline->negative_one_to_one) {
t = (t + vp_state->viewports[vp_idx].maxDepth) * 0.5f;
s *= 0.5f;
}
if (translate_z)
*translate_z = t;
if (scale_z)
*scale_z = s;
}
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetViewport(VkCommandBuffer commandBuffer,
uint32_t firstViewport,
uint32_t viewportCount,
const VkViewport *pViewports)
{
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
const uint32_t total_count = firstViewport + viewportCount;
assert(firstViewport < MAX_VIEWPORTS);
assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
if (state->dynamic.viewport.count < total_count)
state->dynamic.viewport.count = total_count;
if (!memcmp(state->dynamic.viewport.viewports + firstViewport,
pViewports, viewportCount * sizeof(*pViewports))) {
return;
}
memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
viewportCount * sizeof(*pViewports));
for (uint32_t i = firstViewport; i < total_count; i++) {
v3dv_viewport_compute_xform(&state->dynamic.viewport.viewports[i],
state->dynamic.viewport.scale[i],
state->dynamic.viewport.translate[i]);
}
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_VIEWPORT;
}
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdSetScissor(VkCommandBuffer commandBuffer,
uint32_t firstScissor,
uint32_t scissorCount,
const VkRect2D *pScissors)
{
V3DV_FROM_HANDLE(v3dv_cmd_buffer, cmd_buffer, commandBuffer);
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
assert(firstScissor < MAX_SCISSORS);
assert(firstScissor + scissorCount >= 1 &&
firstScissor + scissorCount <= MAX_SCISSORS);
if (state->dynamic.scissor.count < firstScissor + scissorCount)
state->dynamic.scissor.count = firstScissor + scissorCount;
if (!memcmp(state->dynamic.scissor.scissors + firstScissor,
pScissors, scissorCount * sizeof(*pScissors))) {
return;
}
memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
scissorCount * sizeof(*pScissors));
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_SCISSOR;
}
static void
emit_scissor(struct v3dv_cmd_buffer *cmd_buffer)
{
if (cmd_buffer->state.dynamic.viewport.count == 0)
return;
struct v3dv_dynamic_state *dynamic = &cmd_buffer->state.dynamic;
/* FIXME: right now we only support one viewport. viewporst[0] would work
* now, but would need to change if we allow multiple viewports.
*/
float *vptranslate = dynamic->viewport.translate[0];
float *vpscale = dynamic->viewport.scale[0];
float vp_minx = -fabsf(vpscale[0]) + vptranslate[0];
float vp_maxx = fabsf(vpscale[0]) + vptranslate[0];
float vp_miny = -fabsf(vpscale[1]) + vptranslate[1];
float vp_maxy = fabsf(vpscale[1]) + vptranslate[1];
/* Quoting from v3dx_emit:
* "Clip to the scissor if it's enabled, but still clip to the
* drawable regardless since that controls where the binner
* tries to put things.
*
* Additionally, always clip the rendering to the viewport,
* since the hardware does guardband clipping, meaning
* primitives would rasterize outside of the view volume."
*/
uint32_t minx, miny, maxx, maxy;
/* From the Vulkan spec:
*
* "The application must ensure (using scissor if necessary) that all
* rendering is contained within the render area. The render area must be
* contained within the framebuffer dimensions."
*
* So it is the application's responsibility to ensure this. Still, we can
* help by automatically restricting the scissor rect to the render area.
*/
minx = MAX2(vp_minx, cmd_buffer->state.render_area.offset.x);
miny = MAX2(vp_miny, cmd_buffer->state.render_area.offset.y);
maxx = MIN2(vp_maxx, cmd_buffer->state.render_area.offset.x +
cmd_buffer->state.render_area.extent.width);
maxy = MIN2(vp_maxy, cmd_buffer->state.render_area.offset.y +
cmd_buffer->state.render_area.extent.height);
minx = vp_minx;
miny = vp_miny;
maxx = vp_maxx;
maxy = vp_maxy;
/* Clip against user provided scissor if needed.
*
* FIXME: right now we only allow one scissor. Below would need to be
* updated if we support more
*/
if (dynamic->scissor.count > 0) {
VkRect2D *scissor = &dynamic->scissor.scissors[0];
minx = MAX2(minx, scissor->offset.x);
miny = MAX2(miny, scissor->offset.y);
maxx = MIN2(maxx, scissor->offset.x + scissor->extent.width);
maxy = MIN2(maxy, scissor->offset.y + scissor->extent.height);
}
/* If the scissor is outside the viewport area we end up with
* min{x,y} > max{x,y}.
*/
if (minx > maxx)
maxx = minx;
if (miny > maxy)
maxy = miny;
cmd_buffer->state.clip_window.offset.x = minx;
cmd_buffer->state.clip_window.offset.y = miny;
cmd_buffer->state.clip_window.extent.width = maxx - minx;
cmd_buffer->state.clip_window.extent.height = maxy - miny;
v3dv_X(cmd_buffer->device, job_emit_clip_window)
(cmd_buffer->state.job, &cmd_buffer->state.clip_window);
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_SCISSOR;
}
static void
update_gfx_uniform_state(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t dirty_uniform_state)
{
/* We need to update uniform streams if any piece of state that is passed
* to the shader as a uniform may have changed.
*
* If only descriptor sets are dirty then we can safely ignore updates
* for shader stages that don't access descriptors.
*/
struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
assert(pipeline);
const bool has_new_pipeline = dirty_uniform_state & V3DV_CMD_DIRTY_PIPELINE;
const bool has_new_viewport = dirty_uniform_state & V3DV_CMD_DIRTY_VIEWPORT;
const bool has_new_push_constants = dirty_uniform_state & V3DV_CMD_DIRTY_PUSH_CONSTANTS;
const bool has_new_descriptors = dirty_uniform_state & V3DV_CMD_DIRTY_DESCRIPTOR_SETS;
const bool has_new_view_index = dirty_uniform_state & V3DV_CMD_DIRTY_VIEW_INDEX;
/* VK_SHADER_STAGE_FRAGMENT_BIT */
const bool has_new_descriptors_fs =
has_new_descriptors &&
(cmd_buffer->state.dirty_descriptor_stages & VK_SHADER_STAGE_FRAGMENT_BIT);
const bool has_new_push_constants_fs =
has_new_push_constants &&
(cmd_buffer->state.dirty_push_constants_stages & VK_SHADER_STAGE_FRAGMENT_BIT);
const bool needs_fs_update = has_new_pipeline ||
has_new_view_index ||
has_new_push_constants_fs ||
has_new_descriptors_fs;
if (needs_fs_update) {
struct v3dv_shader_variant *fs_variant =
pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
cmd_buffer->state.uniforms.fs =
v3dv_write_uniforms(cmd_buffer, pipeline, fs_variant);
}
/* VK_SHADER_STAGE_GEOMETRY_BIT */
if (pipeline->has_gs) {
const bool has_new_descriptors_gs =
has_new_descriptors &&
(cmd_buffer->state.dirty_descriptor_stages &
VK_SHADER_STAGE_GEOMETRY_BIT);
const bool has_new_push_constants_gs =
has_new_push_constants &&
(cmd_buffer->state.dirty_push_constants_stages &
VK_SHADER_STAGE_GEOMETRY_BIT);
const bool needs_gs_update = has_new_viewport ||
has_new_view_index ||
has_new_pipeline ||
has_new_push_constants_gs ||
has_new_descriptors_gs;
if (needs_gs_update) {
struct v3dv_shader_variant *gs_variant =
pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY];
struct v3dv_shader_variant *gs_bin_variant =
pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN];
cmd_buffer->state.uniforms.gs =
v3dv_write_uniforms(cmd_buffer, pipeline, gs_variant);
cmd_buffer->state.uniforms.gs_bin =
v3dv_write_uniforms(cmd_buffer, pipeline, gs_bin_variant);
}
}
/* VK_SHADER_STAGE_VERTEX_BIT */
const bool has_new_descriptors_vs =
has_new_descriptors &&
(cmd_buffer->state.dirty_descriptor_stages & VK_SHADER_STAGE_VERTEX_BIT);
const bool has_new_push_constants_vs =
has_new_push_constants &&
(cmd_buffer->state.dirty_push_constants_stages & VK_SHADER_STAGE_VERTEX_BIT);
const bool needs_vs_update = has_new_viewport ||
has_new_view_index ||
has_new_pipeline ||
has_new_push_constants_vs ||
has_new_descriptors_vs;
if (needs_vs_update) {
struct v3dv_shader_variant *vs_variant =
pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
struct v3dv_shader_variant *vs_bin_variant =
pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN];
cmd_buffer->state.uniforms.vs =
v3dv_write_uniforms(cmd_buffer, pipeline, vs_variant);
cmd_buffer->state.uniforms.vs_bin =
v3dv_write_uniforms(cmd_buffer, pipeline, vs_bin_variant);
}
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_VIEW_INDEX;
}
/* This stores command buffer state that we might be about to stomp for
* a meta operation.
*/
void
v3dv_cmd_buffer_meta_state_push(struct v3dv_cmd_buffer *cmd_buffer,
bool push_descriptor_state)
{
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
/* Attachment state.
*
* We store this state even if we are not currently in a subpass
* (subpass_idx != -1) because we may get here to implement subpass
* resolves via vkCmdResolveImage from
* cmd_buffer_subpass_handle_pending_resolves. In that scenario we pretend
* we are no longer in a subpass because Vulkan disallows image resolves
* via vkCmdResolveImage during subpasses, but we still need to preserve
* attachment state because we may have more subpasses to go through
* after processing resolves in the current subass.
*/
const uint32_t attachment_state_item_size =
sizeof(struct v3dv_cmd_buffer_attachment_state);
const uint32_t attachment_state_total_size =
attachment_state_item_size * state->attachment_alloc_count;
if (state->meta.attachment_alloc_count < state->attachment_alloc_count) {
if (state->meta.attachment_alloc_count > 0)
vk_free(&cmd_buffer->device->vk.alloc, state->meta.attachments);
state->meta.attachments = vk_zalloc(&cmd_buffer->device->vk.alloc,
attachment_state_total_size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!state->meta.attachments) {
v3dv_flag_oom(cmd_buffer, NULL);
return;
}
state->meta.attachment_alloc_count = state->attachment_alloc_count;
}
state->meta.attachment_count = state->attachment_alloc_count;
memcpy(state->meta.attachments, state->attachments,
attachment_state_total_size);
if (state->subpass_idx != -1) {
state->meta.subpass_idx = state->subpass_idx;
state->meta.framebuffer = v3dv_framebuffer_to_handle(state->framebuffer);
state->meta.pass = v3dv_render_pass_to_handle(state->pass);
state->meta.tile_aligned_render_area = state->tile_aligned_render_area;
memcpy(&state->meta.render_area, &state->render_area, sizeof(VkRect2D));
}
/* We expect that meta operations are graphics-only, so we only take into
* account the graphics pipeline, and the graphics state
*/
state->meta.gfx.pipeline = state->gfx.pipeline;
memcpy(&state->meta.dynamic, &state->dynamic, sizeof(state->dynamic));
struct v3dv_descriptor_state *gfx_descriptor_state =
&cmd_buffer->state.gfx.descriptor_state;
if (push_descriptor_state) {
if (gfx_descriptor_state->valid != 0) {
memcpy(&state->meta.gfx.descriptor_state, gfx_descriptor_state,
sizeof(state->gfx.descriptor_state));
}
state->meta.has_descriptor_state = true;
} else {
state->meta.has_descriptor_state = false;
}
if (cmd_buffer->state.push_constants_size > 0) {
state->meta.push_constants_size = cmd_buffer->state.push_constants_size;
memcpy(state->meta.push_constants, cmd_buffer->state.push_constants_data,
cmd_buffer->state.push_constants_size);
cmd_buffer->state.push_constants_size = 0;
}
}
/* This restores command buffer state after a meta operation
*/
void
v3dv_cmd_buffer_meta_state_pop(struct v3dv_cmd_buffer *cmd_buffer,
bool needs_subpass_resume)
{
struct v3dv_cmd_buffer_state *state = &cmd_buffer->state;
/* Attachment state */
assert(state->meta.attachment_count <= state->attachment_alloc_count);
const uint32_t attachment_state_item_size =
sizeof(struct v3dv_cmd_buffer_attachment_state);
const uint32_t attachment_state_total_size =
attachment_state_item_size * state->meta.attachment_count;
memcpy(state->attachments, state->meta.attachments,
attachment_state_total_size);
if (state->meta.subpass_idx != -1) {
state->pass = v3dv_render_pass_from_handle(state->meta.pass);
state->framebuffer = v3dv_framebuffer_from_handle(state->meta.framebuffer);
state->tile_aligned_render_area = state->meta.tile_aligned_render_area;
memcpy(&state->render_area, &state->meta.render_area, sizeof(VkRect2D));
/* Is needs_subpass_resume is true it means that the emitted the meta
* operation in its own job (possibly with an RT config that is
* incompatible with the current subpass), so resuming subpass execution
* after it requires that we create a new job with the subpass RT setup.
*/
if (needs_subpass_resume)
v3dv_cmd_buffer_subpass_resume(cmd_buffer, state->meta.subpass_idx);
} else {
state->subpass_idx = -1;
}
if (state->meta.gfx.pipeline != NULL) {
struct v3dv_pipeline *pipeline = state->meta.gfx.pipeline;
VkPipelineBindPoint pipeline_binding =
v3dv_pipeline_get_binding_point(pipeline);
v3dv_CmdBindPipeline(v3dv_cmd_buffer_to_handle(cmd_buffer),
pipeline_binding,
v3dv_pipeline_to_handle(state->meta.gfx.pipeline));
} else {
state->gfx.pipeline = NULL;
}
/* Restore dynamic state */
memcpy(&state->dynamic, &state->meta.dynamic, sizeof(state->dynamic));
state->dirty = ~0;
if (state->meta.has_descriptor_state) {
if (state->meta.gfx.descriptor_state.valid != 0) {
memcpy(&state->gfx.descriptor_state, &state->meta.gfx.descriptor_state,
sizeof(state->gfx.descriptor_state));
} else {
state->gfx.descriptor_state.valid = 0;
}
}
/* We only need to restore push constant data if we had any data in the
* original command buffer and the meta operation wrote new push constant
* data.
*/
if (state->meta.push_constants_size > 0 &&
cmd_buffer->state.push_constants_size > 0) {
memcpy(cmd_buffer->state.push_constants_data, state->meta.push_constants,
state->meta.push_constants_size);
}
cmd_buffer->state.push_constants_size = state->meta.push_constants_size;
state->meta.gfx.pipeline = NULL;
state->meta.framebuffer = VK_NULL_HANDLE;
state->meta.pass = VK_NULL_HANDLE;
state->meta.subpass_idx = -1;
state->meta.has_descriptor_state = false;
state->meta.push_constants_size = 0;
}
static struct v3dv_job *
cmd_buffer_pre_draw_split_job(struct v3dv_cmd_buffer *cmd_buffer)
{
struct v3dv_job *job = cmd_buffer->state.job;
assert(job);
/* If the job has been flagged with 'always_flush' and it has already
* recorded any draw calls then we need to start a new job for it.
*/
if (job->always_flush && job->draw_count > 0) {
assert(cmd_buffer->state.pass);
/* First, flag the current job as not being the last in the
* current subpass
*/
job->is_subpass_finish = false;
/* Now start a new job in the same subpass and flag it as continuing
* the current subpass.
*/
job = v3dv_cmd_buffer_subpass_resume(cmd_buffer,
cmd_buffer->state.subpass_idx);
assert(job->draw_count == 0);
/* Inherit the 'always flush' behavior */
job->always_flush = true;
}
assert(job->draw_count == 0 || !job->always_flush);
return job;
}
/**
* The Vulkan spec states:
*
* "It is legal for a subpass to use no color or depth/stencil
* attachments (...) This kind of subpass can use shader side effects such
* as image stores and atomics to produce an output. In this case, the
* subpass continues to use the width, height, and layers of the framebuffer
* to define the dimensions of the rendering area, and the
* rasterizationSamples from each pipeline’s
* VkPipelineMultisampleStateCreateInfo to define the number of samples used
* in rasterization."
*
* We need to enable MSAA in the TILE_BINNING_MODE_CFG packet, which we
* emit when we start a new frame at the begining of a subpass. At that point,
* if the framebuffer doesn't have any attachments we won't enable MSAA and
* the job won't be valid in the scenario described by the spec.
*
* This function is intended to be called before a draw call and will test if
* we are in that scenario, in which case, it will restart the current job
* with MSAA enabled.
*/
static void
cmd_buffer_restart_job_for_msaa_if_needed(struct v3dv_cmd_buffer *cmd_buffer)
{
assert(cmd_buffer->state.job);
/* We don't support variableMultisampleRate so we know that all pipelines
* bound in the same subpass must have matching number of samples, so we
* can do this check only on the first draw call.
*/
if (cmd_buffer->state.job->draw_count > 0)
return;
/* We only need to restart the frame if the pipeline requires MSAA but
* our frame tiling didn't enable it.
*/
if (!cmd_buffer->state.gfx.pipeline->msaa ||
cmd_buffer->state.job->frame_tiling.msaa) {
return;
}
/* FIXME: Secondary command buffers don't start frames. Instead, they are
* recorded into primary jobs that start them. For secondaries, we should
* still handle this scenario, but we should do that when we record them
* into primaries by testing if any of the secondaries has multisampled
* draw calls in them, and then using that info to decide if we need to
* restart the primary job into which they are being recorded.
*/
if (cmd_buffer->vk.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY)
return;
/* Drop the current job and restart it with MSAA enabled */
struct v3dv_job *old_job = cmd_buffer->state.job;
cmd_buffer->state.job = NULL;
struct v3dv_job *job = vk_zalloc(&cmd_buffer->device->vk.alloc,
sizeof(struct v3dv_job), 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!job) {
v3dv_flag_oom(cmd_buffer, NULL);
return;
}
v3dv_job_init(job, V3DV_JOB_TYPE_GPU_CL, cmd_buffer->device, cmd_buffer,
cmd_buffer->state.subpass_idx);
cmd_buffer->state.job = job;
v3dv_job_start_frame(job,
old_job->frame_tiling.width,
old_job->frame_tiling.height,
old_job->frame_tiling.layers,
true, false,
old_job->frame_tiling.render_target_count,
old_job->frame_tiling.internal_bpp,
true /* msaa */);
v3dv_job_destroy(old_job);
}
static bool
cmd_buffer_binning_sync_required(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_pipeline *pipeline,
bool indexed, bool indirect)
{
const struct v3dv_descriptor_maps *vs_bin_maps =
pipeline->shared_data->maps[BROADCOM_SHADER_VERTEX_BIN];
const struct v3dv_descriptor_maps *gs_bin_maps =
pipeline->shared_data->maps[BROADCOM_SHADER_GEOMETRY_BIN];
VkAccessFlags buffer_access =
cmd_buffer->state.barrier.bcl_buffer_access;
if (buffer_access) {
/* Index buffer read */
if (indexed && (buffer_access & (VK_ACCESS_2_INDEX_READ_BIT |
VK_ACCESS_2_MEMORY_READ_BIT))) {
return true;
}
/* Indirect buffer read */
if (indirect && (buffer_access & (VK_ACCESS_2_INDIRECT_COMMAND_READ_BIT |
VK_ACCESS_2_MEMORY_READ_BIT))) {
return true;
}
/* Attribute read */
if (buffer_access & (VK_ACCESS_2_VERTEX_ATTRIBUTE_READ_BIT |
VK_ACCESS_2_MEMORY_READ_BIT)) {
const struct v3d_vs_prog_data *prog_data =
pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN]->prog_data.vs;
for (int i = 0; i < ARRAY_SIZE(prog_data->vattr_sizes); i++) {
if (prog_data->vattr_sizes[i] > 0)
return true;
}
}
/* UBO / SSBO read */
if (buffer_access & (VK_ACCESS_2_UNIFORM_READ_BIT |
VK_ACCESS_2_SHADER_READ_BIT |
VK_ACCESS_2_MEMORY_READ_BIT |
VK_ACCESS_2_SHADER_STORAGE_READ_BIT)) {
if (vs_bin_maps->ubo_map.num_desc > 0 ||
vs_bin_maps->ssbo_map.num_desc > 0) {
return true;
}
if (gs_bin_maps && (gs_bin_maps->ubo_map.num_desc > 0 ||
gs_bin_maps->ssbo_map.num_desc > 0)) {
return true;
}
}
/* SSBO write */
if (buffer_access & (VK_ACCESS_2_SHADER_WRITE_BIT |
VK_ACCESS_2_MEMORY_WRITE_BIT |
VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT)) {
if (vs_bin_maps->ssbo_map.num_desc > 0)
return true;
if (gs_bin_maps && gs_bin_maps->ssbo_map.num_desc > 0)
return true;
}
/* Texel Buffer read */
if (buffer_access & (VK_ACCESS_2_SHADER_SAMPLED_READ_BIT |
VK_ACCESS_2_MEMORY_READ_BIT)) {
if (vs_bin_maps->texture_map.num_desc > 0)
return true;
if (gs_bin_maps && gs_bin_maps->texture_map.num_desc > 0)
return true;
}
}
VkAccessFlags image_access =
cmd_buffer->state.barrier.bcl_image_access;
if (image_access) {
/* Image load / store */
if (image_access & (VK_ACCESS_2_SHADER_READ_BIT |
VK_ACCESS_2_SHADER_WRITE_BIT |
VK_ACCESS_2_SHADER_SAMPLED_READ_BIT |
VK_ACCESS_2_SHADER_STORAGE_READ_BIT |
VK_ACCESS_2_SHADER_STORAGE_WRITE_BIT |
VK_ACCESS_2_MEMORY_READ_BIT |
VK_ACCESS_2_MEMORY_WRITE_BIT)) {
if (vs_bin_maps->texture_map.num_desc > 0 ||
vs_bin_maps->sampler_map.num_desc > 0) {
return true;
}
if (gs_bin_maps && (gs_bin_maps->texture_map.num_desc > 0 ||
gs_bin_maps->sampler_map.num_desc > 0)) {
return true;
}
}
}
return false;
}
void
v3dv_cmd_buffer_consume_bcl_sync(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_job *job)
{
job->needs_bcl_sync = true;
cmd_buffer->state.barrier.bcl_buffer_access = 0;
cmd_buffer->state.barrier.bcl_image_access = 0;
}
static inline uint32_t
compute_prog_score(struct v3dv_shader_variant *vs)
{
const uint32_t inst_count = vs->qpu_insts_size / sizeof(uint64_t);
const uint32_t tmu_count = vs->prog_data.base->tmu_count +
vs->prog_data.base->tmu_spills +
vs->prog_data.base->tmu_fills;
return inst_count + 4 * tmu_count;
}
static void
job_update_double_buffer_score(struct v3dv_job *job,
struct v3dv_pipeline *pipeline,
uint32_t vertex_count,
VkExtent2D *render_area)
{
/* FIXME: assume anything with GS workloads is too expensive */
struct v3dv_shader_variant *gs_bin =
pipeline->shared_data->variants[BROADCOM_SHADER_GEOMETRY_BIN];
if (gs_bin) {
job->can_use_double_buffer = false;
return;
}
/* Keep track of vertex processing: too much geometry processing would not
* be good for double-buffer.
*/
struct v3dv_shader_variant *vs_bin =
pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX_BIN];
assert(vs_bin);
uint32_t geom_score = vertex_count * compute_prog_score(vs_bin);
struct v3dv_shader_variant *vs =
pipeline->shared_data->variants[BROADCOM_SHADER_VERTEX];
assert(vs);
uint32_t vs_score = vertex_count * compute_prog_score(vs);
geom_score += vs_score;
job->double_buffer_score.geom += geom_score;
/* Compute pixel rendering cost.
*
* We estimate that on average a draw would render 0.2% of the pixels in
* the render area. That would be a 64x64 region in a 1920x1080 area.
*/
struct v3dv_shader_variant *fs =
pipeline->shared_data->variants[BROADCOM_SHADER_FRAGMENT];
assert(fs);
uint32_t pixel_count = 0.002f * render_area->width * render_area->height;
uint32_t render_score = vs_score + pixel_count * compute_prog_score(fs);
job->double_buffer_score.render += render_score;
}
void
v3dv_cmd_buffer_emit_pre_draw(struct v3dv_cmd_buffer *cmd_buffer,
bool indexed, bool indirect,
uint32_t vertex_count)
{
assert(cmd_buffer->state.gfx.pipeline);
assert(!(cmd_buffer->state.gfx.pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT));
/* If we emitted a pipeline barrier right before this draw we won't have
* an active job. In that case, create a new job continuing the current
* subpass.
*/
if (!cmd_buffer->state.job) {
v3dv_cmd_buffer_subpass_resume(cmd_buffer,
cmd_buffer->state.subpass_idx);
}
/* Restart single sample job for MSAA pipeline if needed */
cmd_buffer_restart_job_for_msaa_if_needed(cmd_buffer);
/* If the job is configured to flush on every draw call we need to create
* a new job now.
*/
struct v3dv_job *job = cmd_buffer_pre_draw_split_job(cmd_buffer);
job->draw_count++;
/* Track VK_KHR_buffer_device_address usage in the job */
struct v3dv_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
job->uses_buffer_device_address |= pipeline->uses_buffer_device_address;
/* If this job is serialized (has consumed a barrier) then check if we need
* to sync at the binning stage by testing if the binning shaders involved
* with the draw call require access to external resources.
*/
if (job->serialize && (cmd_buffer->state.barrier.bcl_buffer_access ||
cmd_buffer->state.barrier.bcl_image_access)) {
assert(!job->needs_bcl_sync);
if (cmd_buffer_binning_sync_required(cmd_buffer, pipeline,
indexed, indirect)) {
v3dv_cmd_buffer_consume_bcl_sync(cmd_buffer, job);
}
}
/* GL shader state binds shaders, uniform and vertex attribute state. The
* compiler injects uniforms to handle some descriptor types (such as
* textures), so we need to regen that when descriptor state changes.
*
* We also need to emit new shader state if we have a dirty viewport since
* that will require that we new uniform state for QUNIFORM_VIEWPORT_*.
*/
uint32_t *dirty = &cmd_buffer->state.dirty;
const uint32_t dirty_uniform_state =
*dirty & (V3DV_CMD_DIRTY_PIPELINE |
V3DV_CMD_DIRTY_PUSH_CONSTANTS |
V3DV_CMD_DIRTY_DESCRIPTOR_SETS |
V3DV_CMD_DIRTY_VIEWPORT |
V3DV_CMD_DIRTY_VIEW_INDEX);
if (dirty_uniform_state)
update_gfx_uniform_state(cmd_buffer, dirty_uniform_state);
struct v3dv_device *device = cmd_buffer->device;
if (dirty_uniform_state || (*dirty & V3DV_CMD_DIRTY_VERTEX_BUFFER))
v3dv_X(device, cmd_buffer_emit_gl_shader_state)(cmd_buffer);
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE)) {
v3dv_X(device, cmd_buffer_emit_configuration_bits)(cmd_buffer);
v3dv_X(device, cmd_buffer_emit_varyings_state)(cmd_buffer);
}
if (*dirty & (V3DV_CMD_DIRTY_VIEWPORT | V3DV_CMD_DIRTY_SCISSOR)) {
emit_scissor(cmd_buffer);
}
if (*dirty & V3DV_CMD_DIRTY_VIEWPORT) {
v3dv_X(device, cmd_buffer_emit_viewport)(cmd_buffer);
}
if (*dirty & V3DV_CMD_DIRTY_INDEX_BUFFER)
v3dv_X(device, cmd_buffer_emit_index_buffer)(cmd_buffer);
const uint32_t dynamic_stencil_dirty_flags =
V3DV_CMD_DIRTY_STENCIL_COMPARE_MASK |
V3DV_CMD_DIRTY_STENCIL_WRITE_MASK |
V3DV_CMD_DIRTY_STENCIL_REFERENCE;
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | dynamic_stencil_dirty_flags))
v3dv_X(device, cmd_buffer_emit_stencil)(cmd_buffer);
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_DEPTH_BIAS))
v3dv_X(device, cmd_buffer_emit_depth_bias)(cmd_buffer);
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_BLEND_CONSTANTS))
v3dv_X(device, cmd_buffer_emit_blend)(cmd_buffer);
if (*dirty & V3DV_CMD_DIRTY_OCCLUSION_QUERY)
v3dv_X(device, cmd_buffer_emit_occlusion_query)(cmd_buffer);
if (*dirty & V3DV_CMD_DIRTY_LINE_WIDTH)
v3dv_X(device, cmd_buffer_emit_line_width)(cmd_buffer);
if (*dirty & V3DV_CMD_DIRTY_PIPELINE)
v3dv_X(device, cmd_buffer_emit_sample_state)(cmd_buffer);
if (*dirty & (V3DV_CMD_DIRTY_PIPELINE | V3DV_CMD_DIRTY_COLOR_WRITE_ENABLE))
v3dv_X(device, cmd_buffer_emit_color_write_mask)(cmd_buffer);
/* We disable double-buffer mode if indirect draws are used because in that
* case we don't know the vertex count.
*/
if (indirect) {
job->can_use_double_buffer = false;
} else if (job->can_use_double_buffer) {
job_update_double_buffer_score(job, pipeline, vertex_count,
&cmd_buffer->state.render_area.extent);
}
cmd_buffer->state.dirty &= ~V3DV_CMD_DIRTY_PIPELINE;
}
static inline void
cmd_buffer_set_view_index(struct v3dv_cmd_buffer *cmd_buffer,
uint32_t view_index)
{
cmd_buffer->state.view_index = view_index;
cmd_buffer->state.dirty |= V3DV_CMD_DIRTY_VIEW_INDEX;
}
static void
cmd_buffer_draw(struct v3dv_cmd_buffer *cmd_buffer,
struct v3dv_draw_info *info)
{
uint32_t vertex_count =
info->vertex_count * info->instance_count;
struct v3dv_render_pass *pass = cmd_buffer->state.pass;
if (likely(!pass->multiview_enabled)) {
v3dv_cmd_buffer_emit_pre_draw(cmd_buffer, false, false, vertex_count);
v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw)(cmd_buffer, info);
return;
}
uint32_t view_mask = pass->subpasses[cmd_buffer->state.subpass_idx].view_mask;
while (view_mask) {
cmd_buffer_set_view_index(cmd_buffer, u_bit_scan(&view_mask));
v3dv_cmd_buffer_emit_pre_draw(cmd_buffer, false, false, vertex_count);
v3dv_X(cmd_buffer->device, cmd_buffer_emit_draw)(cmd_buffer, info);
}
}
VKAPI_ATTR void VKAPI_CALL
v3dv_CmdDraw(VkCommandBuffer commandBuffer,
uint32_t vertexCount,
uint32_t instanceCount,
uint32_t firstVertex,
uint32_t firstInstance)
{
if (vertexCount == 0 || instanceCount == 0)
return;
V3DV_FROM_HANDLE(v3dv_cmd_buffer,