blob: 5c32bb78b8430fb22269ffa69b8a8776d06c24f1 [file] [log] [blame]
/*
* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (c) 2015-2021 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Author: Chia-I Wu <olvaffe@gmail.com>
* Author: Chris Forbes <chrisf@ijw.co.nz>
* Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Mike Stroyan <mike@LunarG.com>
* Author: Tobin Ehlis <tobine@google.com>
* Author: Tony Barbour <tony@LunarG.com>
* Author: Cody Northrop <cnorthrop@google.com>
* Author: Dave Houlton <daveh@lunarg.com>
* Author: Jeremy Kniager <jeremyk@lunarg.com>
* Author: Shannon McPherson <shannon@lunarg.com>
* Author: John Zulauf <jzulauf@lunarg.com>
*/
#include "../layer_validation_tests.h"
#include "vk_extension_helper.h"
#include <algorithm>
#include <array>
#include <chrono>
#include <memory>
#include <mutex>
#include <thread>
#include "cast_utils.h"
//
// POSITIVE VALIDATION TESTS
//
// These tests do not expect to encounter ANY validation errors pass only if this is true
TEST_F(VkPositiveLayerTest, MultiplaneGetImageSubresourceLayout) {
TEST_DESCRIPTION("Positive test, query layout of a single plane of a multiplane image. (repro Github #2530)");
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR;
ci.extent = {128, 128, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_LINEAR;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Verify format
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT);
if (!supported) {
printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix);
return; // Assume there's low ROI on searching for different mp formats
}
VkImage image;
VkResult err = vk::CreateImage(device(), &ci, NULL, &image);
ASSERT_VK_SUCCESS(err);
// Query layout of 3rd plane
VkImageSubresource subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
subres.mipLevel = 0;
subres.arrayLayer = 0;
VkSubresourceLayout layout = {};
m_errorMonitor->ExpectSuccess();
vk::GetImageSubresourceLayout(device(), image, &subres, &layout);
m_errorMonitor->VerifyNotFound();
vk::DestroyImage(device(), image, NULL);
}
TEST_F(VkPositiveLayerTest, OwnershipTranfersImage) {
TEST_DESCRIPTION("Valid image ownership transfers that shouldn't create errors");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT);
if (no_gfx == UINT32_MAX) {
printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix);
return;
}
VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get();
VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue);
// Create an "exclusive" image owned by the graphics queue.
VkImageObj image(m_device);
VkFlags image_use = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, image_use, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
auto image_subres = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1);
auto image_barrier = image.image_memory_barrier(0, 0, image.Layout(), image.Layout(), image_subres);
image_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_;
image_barrier.dstQueueFamilyIndex = no_gfx;
ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, nullptr, &image_barrier);
// Change layouts while changing ownership
image_barrier.srcQueueFamilyIndex = no_gfx;
image_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_;
image_barrier.oldLayout = image.Layout();
// Make sure the new layout is different from the old
if (image_barrier.oldLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
image_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
} else {
image_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, nullptr, &image_barrier);
}
TEST_F(VkPositiveLayerTest, OwnershipTranfersBuffer) {
TEST_DESCRIPTION("Valid buffer ownership transfers that shouldn't create errors");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT);
if (no_gfx == UINT32_MAX) {
printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix);
return;
}
VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get();
VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue);
// Create a buffer
const VkDeviceSize buffer_size = 256;
uint8_t data[buffer_size] = {0xFF};
VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT);
ASSERT_TRUE(buffer.initialized());
auto buffer_barrier = buffer.buffer_memory_barrier(0, 0, 0, VK_WHOLE_SIZE);
// Let gfx own it.
buffer_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_;
buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_;
ValidOwnershipTransferOp(m_errorMonitor, m_commandBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
&buffer_barrier, nullptr);
// Transfer it to non-gfx
buffer_barrier.dstQueueFamilyIndex = no_gfx;
ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, &buffer_barrier, nullptr);
// Transfer it to gfx
buffer_barrier.srcQueueFamilyIndex = no_gfx;
buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_;
ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, &buffer_barrier, nullptr);
}
TEST_F(VkPositiveLayerTest, UncompressedToCompressedImageCopy) {
TEST_DESCRIPTION("Image copies between compressed and uncompressed images");
ASSERT_NO_FATAL_FAILURE(Init());
// Verify format support
// Size-compatible (64-bit) formats. Uncompressed is 64 bits per texel, compressed is 64 bits per 4x4 block (or 4bpt).
if (!ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_TILING_OPTIMAL,
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR) ||
!ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_IMAGE_TILING_OPTIMAL,
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR)) {
printf("%s Required formats/features not supported - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix);
return;
}
VkImageObj uncomp_10x10t_image(m_device); // Size = 10 * 10 * 64 = 6400
VkImageObj comp_10x10b_40x40t_image(m_device); // Size = 40 * 40 * 4 = 6400
uncomp_10x10t_image.Init(10, 10, 1, VK_FORMAT_R16G16B16A16_UINT,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
comp_10x10b_40x40t_image.Init(40, 40, 1, VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
if (!uncomp_10x10t_image.initialized() || !comp_10x10b_40x40t_image.initialized()) {
printf("%s Unable to initialize surfaces - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix);
return;
}
// Both copies represent the same number of bytes. Bytes Per Texel = 1 for bc6, 16 for uncompressed
// Copy compressed to uncompressed
VkImageCopy copy_region = {};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
// Copy from uncompressed to compressed
copy_region.extent = {10, 10, 1}; // Dimensions in (uncompressed) texels
vk::CmdCopyImage(m_commandBuffer->handle(), uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL,
comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
// The next copy swaps source and dest s.t. we need an execution barrier on for the prior source and an access barrier for
// prior dest
auto image_barrier = LvlInitStruct<VkImageMemoryBarrier>();
image_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
image_barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
image_barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
image_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
image_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
image_barrier.image = comp_10x10b_40x40t_image.handle();
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr,
0, nullptr, 1, &image_barrier);
// And from compressed to uncompressed
copy_region.extent = {40, 40, 1}; // Dimensions in (compressed) texels
vk::CmdCopyImage(m_commandBuffer->handle(), comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL,
uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyNotFound();
m_commandBuffer->end();
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, TestAliasedMemoryTracking) {
TEST_DESCRIPTION(
"Create a buffer, allocate memory, bind memory, destroy the buffer, create an image, and bind the same memory to it");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
auto buffer = std::unique_ptr<VkBufferObj>(new VkBufferObj());
VkDeviceSize buff_size = 256;
buffer->init_no_mem(*DeviceObj(), VkBufferObj::create_info(buff_size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; // mandatory format
image_create_info.extent.width = 64; // at least 4096x4096 is supported
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
VkImageObj image(DeviceObj());
image.init_no_mem(*DeviceObj(), image_create_info);
const auto buffer_memory_requirements = buffer->memory_requirements();
const auto image_memory_requirements = image.memory_requirements();
vk_testing::DeviceMemory mem;
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = (std::max)(buffer_memory_requirements.size, image_memory_requirements.size);
bool has_memtype =
m_device->phy().set_memory_type(buffer_memory_requirements.memoryTypeBits & image_memory_requirements.memoryTypeBits,
&alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!has_memtype) {
printf("%s Failed to find a host visible memory type for both a buffer and an image. Test skipped.\n", kSkipPrefix);
return;
}
mem.init(*DeviceObj(), alloc_info);
auto pData = mem.map();
std::memset(pData, 0xCADECADE, static_cast<size_t>(buff_size));
mem.unmap();
buffer->bind_memory(mem, 0);
// NOW, destroy the buffer. Obviously, the resource no longer occupies this
// memory. In fact, it was never used by the GPU.
// Just be sure, wait for idle.
buffer.reset(nullptr);
vk::DeviceWaitIdle(m_device->device());
// VALIDATION FAILURE:
image.bind_memory(mem, 0);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, SamplerMirrorClampToEdgeWithoutFeature) {
TEST_DESCRIPTION("Use VK_KHR_sampler_mirror_clamp_to_edge in 1.1 before samplerMirrorClampToEdge feature was added");
m_errorMonitor->ExpectSuccess();
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (DeviceValidationVersion() != VK_API_VERSION_1_1) {
printf("%s Tests requires Vulkan 1.1 exactly, skipping test\n", kSkipPrefix);
return;
}
VkSampler sampler = VK_NULL_HANDLE;
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
vk::CreateSampler(m_device->device(), &sampler_info, NULL, &sampler);
vk::DestroySampler(m_device->device(), sampler, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, SamplerMirrorClampToEdgeWithoutFeature12) {
TEST_DESCRIPTION("Use VK_KHR_sampler_mirror_clamp_to_edge in 1.2 using the extension");
m_errorMonitor->ExpectSuccess();
SetTargetApiVersion(VK_API_VERSION_1_2);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s Tests requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkSampler sampler = VK_NULL_HANDLE;
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
vk::CreateSampler(m_device->device(), &sampler_info, NULL, &sampler);
vk::DestroySampler(m_device->device(), sampler, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, SamplerMirrorClampToEdgeWithFeature) {
TEST_DESCRIPTION("Use VK_KHR_sampler_mirror_clamp_to_edge in 1.2 with feature bit enabled");
m_errorMonitor->ExpectSuccess();
SetTargetApiVersion(VK_API_VERSION_1_2);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s Tests requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
auto features12 = LvlInitStruct<VkPhysicalDeviceVulkan12Features>();
features12.samplerMirrorClampToEdge = VK_TRUE;
auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&features12);
vk::GetPhysicalDeviceFeatures2(gpu(), &features2);
if (features12.samplerMirrorClampToEdge != VK_TRUE) {
printf("samplerMirrorClampToEdge not supported, skipping test\n");
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkSampler sampler = VK_NULL_HANDLE;
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
vk::CreateSampler(m_device->device(), &sampler_info, NULL, &sampler);
vk::DestroySampler(m_device->device(), sampler, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, NonCoherentMemoryMapping) {
TEST_DESCRIPTION(
"Ensure that validations handling of non-coherent memory mapping while using VK_WHOLE_SIZE does not cause access "
"violations");
VkResult err;
uint8_t *pData;
ASSERT_NO_FATAL_FAILURE(Init());
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
mem_reqs.memoryTypeBits = 0xFFFFFFFF;
const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize;
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.memoryTypeIndex = 0;
static const VkDeviceSize allocation_size = 32 * atom_size;
alloc_info.allocationSize = allocation_size;
// Find a memory configurations WITHOUT a COHERENT bit, otherwise exit
bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
pass = m_device->phy().set_memory_type(
mem_reqs.memoryTypeBits, &alloc_info,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
printf("%s Couldn't find a memory type wihtout a COHERENT bit.\n", kSkipPrefix);
return;
}
}
}
err = vk::AllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
// Map/Flush/Invalidate using WHOLE_SIZE and zero offsets and entire mapped range
m_errorMonitor->ExpectSuccess();
err = vk::MapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
VkMappedMemoryRange mmr = {};
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = 0;
mmr.size = VK_WHOLE_SIZE;
err = vk::FlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
err = vk::InvalidateMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
vk::UnmapMemory(m_device->device(), mem);
// Map/Flush/Invalidate using WHOLE_SIZE and an offset and entire mapped range
m_errorMonitor->ExpectSuccess();
err = vk::MapMemory(m_device->device(), mem, 5 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = 6 * atom_size;
mmr.size = VK_WHOLE_SIZE;
err = vk::FlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
err = vk::InvalidateMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
vk::UnmapMemory(m_device->device(), mem);
// Map with offset and size
// Flush/Invalidate subrange of mapped area with offset and size
m_errorMonitor->ExpectSuccess();
err = vk::MapMemory(m_device->device(), mem, 3 * atom_size, 9 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = 4 * atom_size;
mmr.size = 2 * atom_size;
err = vk::FlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
err = vk::InvalidateMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
vk::UnmapMemory(m_device->device(), mem);
// Map without offset and flush WHOLE_SIZE with two separate offsets
m_errorMonitor->ExpectSuccess();
err = vk::MapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = allocation_size - (4 * atom_size);
mmr.size = VK_WHOLE_SIZE;
err = vk::FlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
mmr.offset = allocation_size - (6 * atom_size);
mmr.size = VK_WHOLE_SIZE;
err = vk::FlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
vk::UnmapMemory(m_device->device(), mem);
vk::FreeMemory(m_device->device(), mem, NULL);
}
TEST_F(VkPositiveLayerTest, CreateImageViewFollowsParameterCompatibilityRequirements) {
TEST_DESCRIPTION("Verify that creating an ImageView with valid usage does not generate validation errors.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
VK_IMAGE_TYPE_2D,
VK_FORMAT_R8G8B8A8_UNORM,
{128, 128, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image(m_device);
image.init(&imgInfo);
ASSERT_TRUE(image.initialized());
image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ValidUsage) {
TEST_DESCRIPTION("Verify that creating an image view from an image with valid usage doesn't generate validation errors");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
// Verify that we can create a view with usage INPUT_ATTACHMENT
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView imageView;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8G8B8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
vk::CreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyNotFound();
vk::DestroyImageView(m_device->device(), imageView, NULL);
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, BindSparse) {
TEST_DESCRIPTION("Bind 2 memory ranges to one image using vkQueueBindSparse, destroy the image and then free the memory");
ASSERT_NO_FATAL_FAILURE(Init());
auto index = m_device->graphics_queue_node_index_;
if (!(m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) {
printf("%s Graphics queue does not have sparse binding bit.\n", kSkipPrefix);
return;
}
if (!m_device->phy().features().sparseBinding) {
printf("%s Device does not support sparse bindings.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
VkImage image;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 64;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
VkResult err = vk::CreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
VkDeviceMemory memory_one, memory_two;
bool pass;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = NULL;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vk::GetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
// Find an image big enough to allow sparse mapping of 2 memory regions
// Increase the image size until it is at least twice the
// size of the required alignment, to ensure we can bind both
// allocated memory blocks to the image on aligned offsets.
while (memory_reqs.size < (memory_reqs.alignment * 2)) {
vk::DestroyImage(m_device->device(), image, nullptr);
image_create_info.extent.width *= 2;
image_create_info.extent.height *= 2;
err = vk::CreateImage(m_device->device(), &image_create_info, nullptr, &image);
ASSERT_VK_SUCCESS(err);
vk::GetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
}
// Allocate 2 memory regions of minimum alignment size, bind one at 0, the other
// at the end of the first
memory_info.allocationSize = memory_reqs.alignment;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vk::AllocateMemory(m_device->device(), &memory_info, NULL, &memory_one);
ASSERT_VK_SUCCESS(err);
err = vk::AllocateMemory(m_device->device(), &memory_info, NULL, &memory_two);
ASSERT_VK_SUCCESS(err);
VkSparseMemoryBind binds[2];
binds[0].flags = 0;
binds[0].memory = memory_one;
binds[0].memoryOffset = 0;
binds[0].resourceOffset = 0;
binds[0].size = memory_info.allocationSize;
binds[1].flags = 0;
binds[1].memory = memory_two;
binds[1].memoryOffset = 0;
binds[1].resourceOffset = memory_info.allocationSize;
binds[1].size = memory_info.allocationSize;
VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo;
opaqueBindInfo.image = image;
opaqueBindInfo.bindCount = 2;
opaqueBindInfo.pBinds = binds;
VkFence fence = VK_NULL_HANDLE;
VkBindSparseInfo bindSparseInfo = {};
bindSparseInfo.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
bindSparseInfo.imageOpaqueBindCount = 1;
bindSparseInfo.pImageOpaqueBinds = &opaqueBindInfo;
vk::QueueBindSparse(m_device->m_queue, 1, &bindSparseInfo, fence);
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyImage(m_device->device(), image, NULL);
vk::FreeMemory(m_device->device(), memory_one, NULL);
vk::FreeMemory(m_device->device(), memory_two, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, BindSparseFreeMemory) {
TEST_DESCRIPTION("Test using a sparse image after freeing memory that was bound to it.");
ASSERT_NO_FATAL_FAILURE(Init());
auto index = m_device->graphics_queue_node_index_;
if (!(m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) {
printf("%s Graphics queue does not have sparse binding bit.\n", kSkipPrefix);
return;
}
if (!m_device->phy().features().sparseResidencyImage2D) {
printf("%s Device does not support sparseResidencyImage2D.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
VkImage image;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 512;
image_create_info.extent.height = 512;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT;
VkResult err = vk::CreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
VkDeviceMemory memory;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = NULL;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vk::GetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
memory_info.allocationSize = memory_reqs.size;
bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vk::AllocateMemory(m_device->device(), &memory_info, NULL, &memory);
ASSERT_VK_SUCCESS(err);
VkSparseMemoryBind bind;
bind.flags = 0;
bind.memory = memory;
bind.memoryOffset = 0;
bind.resourceOffset = 0;
bind.size = memory_info.allocationSize;
VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo;
opaqueBindInfo.image = image;
opaqueBindInfo.bindCount = 1;
opaqueBindInfo.pBinds = &bind;
VkFence fence = VK_NULL_HANDLE;
VkBindSparseInfo bindSparseInfo = {};
bindSparseInfo.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
bindSparseInfo.imageOpaqueBindCount = 1;
bindSparseInfo.pImageOpaqueBinds = &opaqueBindInfo;
// Bind to the memory
vk::QueueBindSparse(m_device->m_queue, 1, &bindSparseInfo, fence);
// Bind back to NULL
bind.memory = VK_NULL_HANDLE;
vk::QueueBindSparse(m_device->m_queue, 1, &bindSparseInfo, fence);
vk::QueueWaitIdle(m_device->m_queue);
// Free the memory, then use the image in a new command buffer
vk::FreeMemory(m_device->device(), memory, NULL);
m_commandBuffer->begin();
auto img_barrier = LvlInitStruct<VkImageMemoryBarrier>();
img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.image = image;
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}};
VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
vk::CmdClearColorImage(m_commandBuffer->handle(), image, VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &range);
m_commandBuffer->end();
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = nullptr;
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyImage(m_device->device(), image, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, BindSparseMetadata) {
TEST_DESCRIPTION("Bind memory for the metadata aspect of a sparse image");
ASSERT_NO_FATAL_FAILURE(Init());
auto index = m_device->graphics_queue_node_index_;
if (!(m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) {
printf("%s Graphics queue does not have sparse binding bit.\n", kSkipPrefix);
return;
}
if (!m_device->phy().features().sparseResidencyImage2D) {
printf("%s Device does not support sparse residency for images.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
// Create a sparse image
VkImage image;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 64;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT;
VkResult err = vk::CreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
// Query image memory requirements
VkMemoryRequirements memory_reqs;
vk::GetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
// Query sparse memory requirements
uint32_t sparse_reqs_count = 0;
vk::GetImageSparseMemoryRequirements(m_device->device(), image, &sparse_reqs_count, nullptr);
std::vector<VkSparseImageMemoryRequirements> sparse_reqs(sparse_reqs_count);
vk::GetImageSparseMemoryRequirements(m_device->device(), image, &sparse_reqs_count, sparse_reqs.data());
// Find requirements for metadata aspect
const VkSparseImageMemoryRequirements *metadata_reqs = nullptr;
for (auto const &aspect_sparse_reqs : sparse_reqs) {
if (aspect_sparse_reqs.formatProperties.aspectMask == VK_IMAGE_ASPECT_METADATA_BIT) {
metadata_reqs = &aspect_sparse_reqs;
}
}
if (!metadata_reqs) {
printf("%s Sparse image does not require memory for metadata.\n", kSkipPrefix);
} else {
// Allocate memory for the metadata
VkDeviceMemory metadata_memory = VK_NULL_HANDLE;
VkMemoryAllocateInfo metadata_memory_info = {};
metadata_memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
metadata_memory_info.allocationSize = metadata_reqs->imageMipTailSize;
m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &metadata_memory_info, 0);
err = vk::AllocateMemory(m_device->device(), &metadata_memory_info, NULL, &metadata_memory);
ASSERT_VK_SUCCESS(err);
// Bind metadata
VkSparseMemoryBind sparse_bind = {};
sparse_bind.resourceOffset = metadata_reqs->imageMipTailOffset;
sparse_bind.size = metadata_reqs->imageMipTailSize;
sparse_bind.memory = metadata_memory;
sparse_bind.memoryOffset = 0;
sparse_bind.flags = VK_SPARSE_MEMORY_BIND_METADATA_BIT;
VkSparseImageOpaqueMemoryBindInfo opaque_bind_info = {};
opaque_bind_info.image = image;
opaque_bind_info.bindCount = 1;
opaque_bind_info.pBinds = &sparse_bind;
VkBindSparseInfo bind_info = {};
bind_info.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
bind_info.imageOpaqueBindCount = 1;
bind_info.pImageOpaqueBinds = &opaque_bind_info;
vk::QueueBindSparse(m_device->m_queue, 1, &bind_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
// Cleanup
vk::QueueWaitIdle(m_device->m_queue);
vk::FreeMemory(m_device->device(), metadata_memory, NULL);
}
vk::DestroyImage(m_device->device(), image, NULL);
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, BarrierLayoutToImageUsage) {
TEST_DESCRIPTION("Ensure barriers' new and old VkImageLayout are compatible with their images' VkImageUsageFlags");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.pNext = NULL;
img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
{
VkImageObj img_color(m_device);
img_color.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_color.initialized());
VkImageObj img_ds1(m_device);
img_ds1.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_ds1.initialized());
VkImageObj img_ds2(m_device);
img_ds2.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_ds2.initialized());
VkImageObj img_xfer_src(m_device);
img_xfer_src.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_xfer_src.initialized());
VkImageObj img_xfer_dst(m_device);
img_xfer_dst.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_xfer_dst.initialized());
VkImageObj img_sampled(m_device);
img_sampled.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_sampled.initialized());
VkImageObj img_input(m_device);
img_input.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_input.initialized());
const struct {
VkImageObj &image_obj;
VkImageLayout old_layout;
VkImageLayout new_layout;
} buffer_layouts[] = {
// clang-format off
{img_color, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_ds1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_ds2, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_sampled, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_input, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_xfer_src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_xfer_dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
// clang-format on
};
const uint32_t layout_count = sizeof(buffer_layouts) / sizeof(buffer_layouts[0]);
m_commandBuffer->begin();
for (uint32_t i = 0; i < layout_count; ++i) {
img_barrier.image = buffer_layouts[i].image_obj.handle();
const VkImageUsageFlags usage = buffer_layouts[i].image_obj.usage();
img_barrier.subresourceRange.aspectMask = (usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)
? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)
: VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.oldLayout = buffer_layouts[i].old_layout;
img_barrier.newLayout = buffer_layouts[i].new_layout;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
img_barrier.oldLayout = buffer_layouts[i].new_layout;
img_barrier.newLayout = buffer_layouts[i].old_layout;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
}
m_commandBuffer->end();
img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
}
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ClearColorImageWithValidRange) {
TEST_DESCRIPTION("Record clear color with a valid VkImageSubresourceRange");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(image.create_info().arrayLayers == 1);
ASSERT_TRUE(image.initialized());
image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}};
m_commandBuffer->begin();
const auto cb_handle = m_commandBuffer->handle();
// Try good case
{
m_errorMonitor->ExpectSuccess();
VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
vk::CmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyNotFound();
}
image.ImageMemoryBarrier(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
// Try good case with VK_REMAINING
{
m_errorMonitor->ExpectSuccess();
VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS};
vk::CmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyNotFound();
}
}
TEST_F(VkPositiveLayerTest, ClearDepthStencilWithValidRange) {
TEST_DESCRIPTION("Record clear depth with a valid VkImageSubresourceRange");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkImageObj image(m_device);
image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(image.create_info().arrayLayers == 1);
ASSERT_TRUE(image.initialized());
const VkImageAspectFlags ds_aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
image.SetLayout(ds_aspect, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const VkClearDepthStencilValue clear_value = {};
m_commandBuffer->begin();
const auto cb_handle = m_commandBuffer->handle();
// Try good case
{
m_errorMonitor->ExpectSuccess();
VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 1};
vk::CmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyNotFound();
}
image.ImageMemoryBarrier(m_commandBuffer, ds_aspect, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
// Try good case with VK_REMAINING
{
m_errorMonitor->ExpectSuccess();
VkImageSubresourceRange range = {ds_aspect, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS};
vk::CmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyNotFound();
}
}
TEST_F(VkPositiveLayerTest, ExternalMemory) {
TEST_DESCRIPTION("Perform a copy through a pair of buffers linked by external memory");
#ifdef _WIN32
const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR;
#else
const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external memory instance extensions
std::vector<const char *> reqd_instance_extensions = {
{VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}};
for (auto extension_name : reqd_instance_extensions) {
if (InstanceExtensionSupported(extension_name)) {
m_instance_extension_names.push_back(extension_name);
} else {
printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name);
return;
}
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Check for import/export capability
VkPhysicalDeviceExternalBufferInfoKHR ebi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR, nullptr, 0,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, handle_type};
VkExternalBufferPropertiesKHR ebp = {VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR, nullptr, {0, 0, 0}};
auto vkGetPhysicalDeviceExternalBufferPropertiesKHR =
(PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)vk::GetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalBufferPropertiesKHR");
ASSERT_TRUE(vkGetPhysicalDeviceExternalBufferPropertiesKHR != nullptr);
vkGetPhysicalDeviceExternalBufferPropertiesKHR(gpu(), &ebi, &ebp);
if (!(ebp.externalMemoryProperties.compatibleHandleTypes & handle_type) ||
!(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) ||
!(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External buffer does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
// Check if dedicated allocation is required
bool dedicated_allocation =
ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR;
if (dedicated_allocation) {
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
} else {
printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix);
return;
}
}
// Check for external memory device extensions
if (DeviceExtensionSupported(gpu(), nullptr, ext_mem_extension_name)) {
m_device_extension_names.push_back(ext_mem_extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
} else {
printf("%s External memory extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
VkMemoryPropertyFlags mem_flags = 0;
const VkDeviceSize buffer_size = 1024;
// Create export and import buffers
const VkExternalMemoryBufferCreateInfoKHR external_buffer_info = {VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR,
nullptr, handle_type};
auto buffer_info = VkBufferObj::create_info(buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT);
buffer_info.pNext = &external_buffer_info;
VkBufferObj buffer_export;
buffer_export.init_no_mem(*m_device, buffer_info);
VkBufferObj buffer_import;
buffer_import.init_no_mem(*m_device, buffer_info);
// Allocation info
auto alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_export.memory_requirements(), mem_flags);
// Add export allocation info to pNext chain
VkExportMemoryAllocateInfoKHR export_info = {VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, nullptr, handle_type};
alloc_info.pNext = &export_info;
// Add dedicated allocation info to pNext chain if required
VkMemoryDedicatedAllocateInfoKHR dedicated_info = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, nullptr,
VK_NULL_HANDLE, buffer_export.handle()};
if (dedicated_allocation) {
export_info.pNext = &dedicated_info;
}
// Allocate memory to be exported
vk_testing::DeviceMemory memory_export;
memory_export.init(*m_device, alloc_info);
// Bind exported memory
buffer_export.bind_memory(memory_export, 0);
#ifdef _WIN32
// Export memory to handle
auto vkGetMemoryWin32HandleKHR =
(PFN_vkGetMemoryWin32HandleKHR)vk::GetInstanceProcAddr(instance(), "vkGetMemoryWin32HandleKHR");
ASSERT_TRUE(vkGetMemoryWin32HandleKHR != nullptr);
VkMemoryGetWin32HandleInfoKHR mghi = {VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR, nullptr, memory_export.handle(),
handle_type};
HANDLE handle;
ASSERT_VK_SUCCESS(vkGetMemoryWin32HandleKHR(m_device->device(), &mghi, &handle));
VkImportMemoryWin32HandleInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, nullptr, handle_type,
handle};
#else
// Export memory to fd
auto vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vk::GetInstanceProcAddr(instance(), "vkGetMemoryFdKHR");
ASSERT_TRUE(vkGetMemoryFdKHR != nullptr);
VkMemoryGetFdInfoKHR mgfi = {VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, nullptr, memory_export.handle(), handle_type};
int fd;
ASSERT_VK_SUCCESS(vkGetMemoryFdKHR(m_device->device(), &mgfi, &fd));
VkImportMemoryFdInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, nullptr, handle_type, fd};
#endif
// Import memory
alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_import.memory_requirements(), mem_flags);
alloc_info.pNext = &import_info;
vk_testing::DeviceMemory memory_import;
memory_import.init(*m_device, alloc_info);
// Bind imported memory
buffer_import.bind_memory(memory_import, 0);
// Create test buffers and fill input buffer
VkMemoryPropertyFlags mem_prop = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
VkBufferObj buffer_input;
buffer_input.init_as_src_and_dst(*m_device, buffer_size, mem_prop);
auto input_mem = (uint8_t *)buffer_input.memory().map();
for (uint32_t i = 0; i < buffer_size; i++) {
input_mem[i] = (i & 0xFF);
}
buffer_input.memory().unmap();
VkBufferObj buffer_output;
buffer_output.init_as_src_and_dst(*m_device, buffer_size, mem_prop);
// Copy from input buffer to output buffer through the exported/imported memory
m_commandBuffer->begin();
VkBufferCopy copy_info = {0, 0, buffer_size};
vk::CmdCopyBuffer(m_commandBuffer->handle(), buffer_input.handle(), buffer_export.handle(), 1, &copy_info);
// Insert memory barrier to guarantee copy order
VkMemoryBarrier mem_barrier = {VK_STRUCTURE_TYPE_MEMORY_BARRIER, nullptr, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT};
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1,
&mem_barrier, 0, nullptr, 0, nullptr);
vk::CmdCopyBuffer(m_commandBuffer->handle(), buffer_import.handle(), buffer_output.handle(), 1, &copy_info);
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, GetMemoryRequirements2) {
TEST_DESCRIPTION(
"Get memory requirements with VK_KHR_get_memory_requirements2 instead of core entry points and verify layers do not emit "
"errors when objects are bound and used");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Check for VK_KHR_get_memory_requirementes2 extensions
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
} else {
printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
// Create a test buffer
VkBufferObj buffer;
buffer.init_no_mem(*m_device,
VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT));
// Use extension to get buffer memory requirements
auto vkGetBufferMemoryRequirements2KHR = reinterpret_cast<PFN_vkGetBufferMemoryRequirements2KHR>(
vk::GetDeviceProcAddr(m_device->device(), "vkGetBufferMemoryRequirements2KHR"));
ASSERT_TRUE(vkGetBufferMemoryRequirements2KHR != nullptr);
VkBufferMemoryRequirementsInfo2KHR buffer_info = {VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr,
buffer.handle()};
VkMemoryRequirements2KHR buffer_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR};
vkGetBufferMemoryRequirements2KHR(m_device->device(), &buffer_info, &buffer_reqs);
// Allocate and bind buffer memory
vk_testing::DeviceMemory buffer_memory;
buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_reqs.memoryRequirements, 0));
vk::BindBufferMemory(m_device->device(), buffer.handle(), buffer_memory.handle(), 0);
// Create a test image
auto image_ci = vk_testing::Image::create_info();
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.extent.width = 32;
image_ci.extent.height = 32;
image_ci.format = VK_FORMAT_R8G8B8A8_UNORM;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
vk_testing::Image image;
image.init_no_mem(*m_device, image_ci);
// Use extension to get image memory requirements
auto vkGetImageMemoryRequirements2KHR = reinterpret_cast<PFN_vkGetImageMemoryRequirements2KHR>(
vk::GetDeviceProcAddr(m_device->device(), "vkGetImageMemoryRequirements2KHR"));
ASSERT_TRUE(vkGetImageMemoryRequirements2KHR != nullptr);
VkImageMemoryRequirementsInfo2KHR image_info = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr,
image.handle()};
VkMemoryRequirements2KHR image_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR};
vkGetImageMemoryRequirements2KHR(m_device->device(), &image_info, &image_reqs);
// Allocate and bind image memory
vk_testing::DeviceMemory image_memory;
image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image_reqs.memoryRequirements, 0));
vk::BindImageMemory(m_device->device(), image.handle(), image_memory.handle(), 0);
// Now execute arbitrary commands that use the test buffer and image
m_commandBuffer->begin();
// Fill buffer with 0
vk::CmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0);
// Transition and clear image
const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT);
const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_GENERAL, subresource_range);
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
const VkClearColorValue color = {};
vk::CmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range);
// Submit and verify no validation errors
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, BindMemory2) {
TEST_DESCRIPTION(
"Bind memory with VK_KHR_bind_memory2 instead of core entry points and verify layers do not emit errors when objects are "
"used");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
// Check for VK_KHR_get_memory_requirementes2 extensions
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
} else {
printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
// Create a test buffer
VkBufferObj buffer;
buffer.init_no_mem(*m_device, VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
// Allocate buffer memory
vk_testing::DeviceMemory buffer_memory;
buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer.memory_requirements(), 0));
// Bind buffer memory with extension
auto vkBindBufferMemory2KHR =
reinterpret_cast<PFN_vkBindBufferMemory2KHR>(vk::GetDeviceProcAddr(m_device->device(), "vkBindBufferMemory2KHR"));
ASSERT_TRUE(vkBindBufferMemory2KHR != nullptr);
VkBindBufferMemoryInfoKHR buffer_bind_info = {VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR, nullptr, buffer.handle(),
buffer_memory.handle(), 0};
vkBindBufferMemory2KHR(m_device->device(), 1, &buffer_bind_info);
// Create a test image
auto image_ci = vk_testing::Image::create_info();
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.extent.width = 32;
image_ci.extent.height = 32;
image_ci.format = VK_FORMAT_R8G8B8A8_UNORM;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
vk_testing::Image image;
image.init_no_mem(*m_device, image_ci);
// Allocate image memory
vk_testing::DeviceMemory image_memory;
image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(), 0));
// Bind image memory with extension
auto vkBindImageMemory2KHR =
reinterpret_cast<PFN_vkBindImageMemory2KHR>(vk::GetDeviceProcAddr(m_device->device(), "vkBindImageMemory2KHR"));
ASSERT_TRUE(vkBindImageMemory2KHR != nullptr);
VkBindImageMemoryInfoKHR image_bind_info = {VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR, nullptr, image.handle(),
image_memory.handle(), 0};
vkBindImageMemory2KHR(m_device->device(), 1, &image_bind_info);
// Now execute arbitrary commands that use the test buffer and image
m_commandBuffer->begin();
// Fill buffer with 0
vk::CmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0);
// Transition and clear image
const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT);
const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_GENERAL, subresource_range);
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
const VkClearColorValue color = {};
vk::CmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range);
// Submit and verify no validation errors
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, MultiplaneImageCopyBufferToImage) {
TEST_DESCRIPTION("Positive test of multiplane copy buffer to image");
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR; // All planes of equal extent
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
ci.extent = {16, 16, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features);
if (!supported) {
printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix);
return; // Assume there's low ROI on searching for different mp formats
}
VkImageObj image(m_device);
image.init(&ci);
m_commandBuffer->reset();
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
image.ImageMemoryBarrier(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, 0, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
std::array<VkImageAspectFlagBits, 3> aspects = {
{VK_IMAGE_ASPECT_PLANE_0_BIT, VK_IMAGE_ASPECT_PLANE_1_BIT, VK_IMAGE_ASPECT_PLANE_2_BIT}};
std::array<VkBufferObj, 3> buffers;
VkMemoryPropertyFlags reqs = 0;
VkBufferImageCopy copy = {};
copy.imageSubresource.layerCount = 1;
copy.imageExtent.depth = 1;
copy.imageExtent.height = 16;
copy.imageExtent.width = 16;
for (size_t i = 0; i < aspects.size(); ++i) {
buffers[i].init_as_src(*m_device, (VkDeviceSize)16 * 16 * 1, reqs);
copy.imageSubresource.aspectMask = aspects[i];
vk::CmdCopyBufferToImage(m_commandBuffer->handle(), buffers[i].handle(), image.handle(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy);
}
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, MultiplaneImageTests) {
TEST_DESCRIPTION("Positive test of multiplane image operations");
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Create aliased function pointers for 1.0 and 1.1 contexts
PFN_vkBindImageMemory2KHR vkBindImageMemory2Function = nullptr;
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2Function = nullptr;
PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2Function = nullptr;
if (DeviceValidationVersion() >= VK_API_VERSION_1_1) {
vkBindImageMemory2Function = vk::BindImageMemory2;
vkGetImageMemoryRequirements2Function = vk::GetImageMemoryRequirements2;
vkGetPhysicalDeviceMemoryProperties2Function = vk::GetPhysicalDeviceMemoryProperties2;
} else {
vkBindImageMemory2Function = (PFN_vkBindImageMemory2KHR)vk::GetDeviceProcAddr(m_device->handle(), "vkBindImageMemory2KHR");
vkGetImageMemoryRequirements2Function =
(PFN_vkGetImageMemoryRequirements2KHR)vk::GetDeviceProcAddr(m_device->handle(), "vkGetImageMemoryRequirements2KHR");
vkGetPhysicalDeviceMemoryProperties2Function = (PFN_vkGetPhysicalDeviceMemoryProperties2KHR)vk::GetDeviceProcAddr(
m_device->handle(), "vkGetPhysicalDeviceMemoryProperties2KHR");
}
if (!vkBindImageMemory2Function || !vkGetImageMemoryRequirements2Function || !vkGetPhysicalDeviceMemoryProperties2Function) {
printf("%s Did not find required device extension support; test skipped.\n", kSkipPrefix);
return;
}
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR; // All planes of equal extent
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
ci.extent = {128, 128, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Verify format
VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features);
if (!supported) {
printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix);
return; // Assume there's low ROI on searching for different mp formats
}
VkImage image;
ASSERT_VK_SUCCESS(vk::CreateImage(device(), &ci, NULL, &image));
// Allocate & bind memory
VkPhysicalDeviceMemoryProperties phys_mem_props;
vk::GetPhysicalDeviceMemoryProperties(gpu(), &phys_mem_props);
VkMemoryRequirements mem_reqs;
vk::GetImageMemoryRequirements(device(), image, &mem_reqs);
VkDeviceMemory mem_obj = VK_NULL_HANDLE;
VkMemoryPropertyFlagBits mem_props = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
for (uint32_t type = 0; type < phys_mem_props.memoryTypeCount; type++) {
if ((mem_reqs.memoryTypeBits & (1 << type)) &&
((phys_mem_props.memoryTypes[type].propertyFlags & mem_props) == mem_props)) {
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = mem_reqs.size;
alloc_info.memoryTypeIndex = type;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &alloc_info, NULL, &mem_obj));
break;
}
}
if (VK_NULL_HANDLE == mem_obj) {
printf("%s Unable to allocate image memory. Skipping test.\n", kSkipPrefix);
vk::DestroyImage(device(), image, NULL);
return;
}
ASSERT_VK_SUCCESS(vk::BindImageMemory(device(), image, mem_obj, 0));
// Copy plane 0 to plane 2
VkImageCopy copyRegion = {};
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR;
copyRegion.srcSubresource.mipLevel = 0;
copyRegion.srcSubresource.baseArrayLayer = 0;
copyRegion.srcSubresource.layerCount = 1;
copyRegion.srcOffset = {0, 0, 0};
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
copyRegion.dstSubresource.mipLevel = 0;
copyRegion.dstSubresource.baseArrayLayer = 0;
copyRegion.dstSubresource.layerCount = 1;
copyRegion.dstOffset = {0, 0, 0};
copyRegion.extent.width = 128;
copyRegion.extent.height = 128;
copyRegion.extent.depth = 1;
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
m_commandBuffer->CopyImage(image, VK_IMAGE_LAYOUT_GENERAL, image, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
vk::FreeMemory(device(), mem_obj, NULL);
vk::DestroyImage(device(), image, NULL);
// Repeat bind test on a DISJOINT multi-planar image, with per-plane memory objects, using API2 variants
//
features |= VK_FORMAT_FEATURE_DISJOINT_BIT;
ci.flags = VK_IMAGE_CREATE_DISJOINT_BIT;
if (ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features)) {
ASSERT_VK_SUCCESS(vk::CreateImage(device(), &ci, NULL, &image));
// Allocate & bind memory
VkPhysicalDeviceMemoryProperties2 phys_mem_props2 = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2};
vkGetPhysicalDeviceMemoryProperties2Function(gpu(), &phys_mem_props2);
VkImagePlaneMemoryRequirementsInfo image_plane_req = {VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO};
VkImageMemoryRequirementsInfo2 mem_req_info2 = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2};
mem_req_info2.pNext = &image_plane_req;
mem_req_info2.image = image;
VkMemoryRequirements2 mem_reqs2 = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
VkDeviceMemory p0_mem, p1_mem, p2_mem;
mem_props = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
VkMemoryAllocateInfo alloc_info = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO};
// Plane 0
image_plane_req.planeAspect = VK_IMAGE_ASPECT_PLANE_0_BIT;
vkGetImageMemoryRequirements2Function(device(), &mem_req_info2, &mem_reqs2);
uint32_t mem_type = 0;
for (mem_type = 0; mem_type < phys_mem_props2.memoryProperties.memoryTypeCount; mem_type++) {
if ((mem_reqs2.memoryRequirements.memoryTypeBits & (1 << mem_type)) &&
((phys_mem_props2.memoryProperties.memoryTypes[mem_type].propertyFlags & mem_props) == mem_props)) {
alloc_info.memoryTypeIndex = mem_type;
break;
}
}
alloc_info.allocationSize = mem_reqs2.memoryRequirements.size;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &alloc_info, NULL, &p0_mem));
// Plane 1 & 2 use same memory type
image_plane_req.planeAspect = VK_IMAGE_ASPECT_PLANE_1_BIT;
vkGetImageMemoryRequirements2Function(device(), &mem_req_info2, &mem_reqs2);
alloc_info.allocationSize = mem_reqs2.memoryRequirements.size;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &alloc_info, NULL, &p1_mem));
image_plane_req.planeAspect = VK_IMAGE_ASPECT_PLANE_2_BIT;
vkGetImageMemoryRequirements2Function(device(), &mem_req_info2, &mem_reqs2);
alloc_info.allocationSize = mem_reqs2.memoryRequirements.size;
ASSERT_VK_SUCCESS(vk::AllocateMemory(device(), &alloc_info, NULL, &p2_mem));
// Set up 3-plane binding
VkBindImageMemoryInfo bind_info[3];
for (int plane = 0; plane < 3; plane++) {
bind_info[plane].sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bind_info[plane].pNext = nullptr;
bind_info[plane].image = image;
bind_info[plane].memoryOffset = 0;
}
bind_info[0].memory = p0_mem;
bind_info[1].memory = p1_mem;
bind_info[2].memory = p2_mem;
m_errorMonitor->ExpectSuccess();
vkBindImageMemory2Function(device(), 3, bind_info);
m_errorMonitor->VerifyNotFound();
vk::FreeMemory(device(), p0_mem, NULL);
vk::FreeMemory(device(), p1_mem, NULL);
vk::FreeMemory(device(), p2_mem, NULL);
vk::DestroyImage(device(), image, NULL);
}
// Test that changing the layout of ASPECT_COLOR also changes the layout of the individual planes
VkBufferObj buffer;
VkMemoryPropertyFlags reqs = 0;
buffer.init_as_src(*m_device, (VkDeviceSize)128 * 128 * 3, reqs);
VkImageObj mpimage(m_device);
mpimage.Init(256, 256, 1, VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
VkBufferImageCopy copy_region = {};
copy_region.bufferRowLength = 128;
copy_region.bufferImageHeight = 128;
copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR;
copy_region.imageSubresource.layerCount = 1;
copy_region.imageExtent.height = 64;
copy_region.imageExtent.width = 64;
copy_region.imageExtent.depth = 1;
vk::ResetCommandBuffer(m_commandBuffer->handle(), 0);
m_commandBuffer->begin();
mpimage.ImageMemoryBarrier(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
vk::CmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), mpimage.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&copy_region);
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyNotFound();
// Test to verify that views of multiplanar images have layouts tracked correctly
// by changing the image's layout then using a view of that image
VkImageView view;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = mpimage.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
OneOffDescriptorSet descriptor_set(m_device,
{
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err;
err = vk::CreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_});
descriptor_set.WriteDescriptorImageInfo(0, view, sampler);
descriptor_set.UpdateDescriptorSets();
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragSamplerShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
img_barrier.image = mpimage.handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_set.set_, 0, nullptr);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
vk::QueueWaitIdle(m_device->m_queue);
vk::DestroyImageView(m_device->device(), view, NULL);
vk::DestroySampler(m_device->device(), sampler, nullptr);
}
TEST_F(VkPositiveLayerTest, TestFormatCompatibility) {
TEST_DESCRIPTION("Test format compatibility");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkFormat format = VK_FORMAT_R12X4G12X4_UNORM_2PACK16;
VkImageFormatListCreateInfo format_list = LvlInitStruct<VkImageFormatListCreateInfo>();
format_list.viewFormatCount = 1;
format_list.pViewFormats = &format;
VkImageCreateInfo image_create_info = LvlInitStruct<VkImageCreateInfo>(&format_list);
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 32;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.flags = 0;
m_errorMonitor->ExpectSuccess();
VkImage image;
vk::CreateImage(m_device->device(), &image_create_info, nullptr, &image);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, TestCreatingFramebufferFrom3DImage) {
TEST_DESCRIPTION("Validate creating a framebuffer from a 3D image.");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_1_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
} else {
printf("%s Extension %s not supported, skipping tests\n", kSkipPrefix, VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->ExpectSuccess();
VkImageCreateInfo image_ci = LvlInitStruct<VkImageCreateInfo>();
image_ci.flags = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
image_ci.imageType = VK_IMAGE_TYPE_3D;
image_ci.format = VK_FORMAT_B8G8R8A8_UNORM;
image_ci.extent.width = 32;
image_ci.extent.height = 32;
image_ci.extent.depth = 4;
image_ci.mipLevels = 1;
image_ci.arrayLayers = 1;
image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageObj image(m_device);
image.init(&image_ci);
VkImageViewCreateInfo dsvci = LvlInitStruct<VkImageViewCreateInfo>();
dsvci.image = image.handle();
dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY;
dsvci.format = VK_FORMAT_B8G8R8A8_UNORM;
dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
dsvci.subresourceRange.baseMipLevel = 0;
dsvci.subresourceRange.layerCount = 4;
dsvci.subresourceRange.baseArrayLayer = 0;
dsvci.subresourceRange.levelCount = 1;
VkImageView view;
vk::CreateImageView(m_device->device(), &dsvci, nullptr, &view);
VkFramebufferCreateInfo fci = LvlInitStruct<VkFramebufferCreateInfo>();
fci.renderPass = m_renderPass;
fci.attachmentCount = 1;
fci.pAttachments = &view;
fci.width = 32;
fci.height = 32;
fci.layers = 4;
VkFramebuffer framebuffer;
vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &framebuffer);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, TestMappingMemoryWithMultiInstanceHeapFlag) {
TEST_DESCRIPTION("Test mapping memory that uses memory heap with VK_MEMORY_HEAP_MULTI_INSTANCE_BIT");
AddRequiredExtensions(VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AreRequestedExtensionsEnabled()) {
printf("%s Extension %s is not supported, skipping test.\n", kSkipPrefix, VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkPhysicalDeviceMemoryProperties memory_info;
vk::GetPhysicalDeviceMemoryProperties(gpu(), &memory_info);
uint32_t memory_index = std::numeric_limits<uint32_t>::max();
for (uint32_t i = 0; i < memory_info.memoryTypeCount; ++i) {
if ((memory_info.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) {
if (memory_info.memoryHeaps[memory_info.memoryTypes[i].heapIndex].flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) {
memory_index = i;
break;
}
}
}
if (memory_index == std::numeric_limits<uint32_t>::max()) {
printf("%s Did not host visible memory from memory heap with VK_MEMORY_HEAP_MULTI_INSTANCE_BIT bit ; skipped.\n",
kSkipPrefix);
return;
}
VkMemoryAllocateInfo mem_alloc = LvlInitStruct<VkMemoryAllocateInfo>();
mem_alloc.allocationSize = 64;
mem_alloc.memoryTypeIndex = memory_index;
VkDeviceMemory memory;
vk::AllocateMemory(m_device->device(), &mem_alloc, nullptr, &memory);
uint32_t *pData;
m_errorMonitor->ExpectSuccess();
vk::MapMemory(device(), memory, 0, VK_WHOLE_SIZE, 0, (void **)&pData);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CmdCopySwapchainImage) {
TEST_DESCRIPTION("Run vkCmdCopyImage with a swapchain image");
#if defined(VK_USE_PLATFORM_ANDROID_KHR)
printf(
"%s According to valid usage, VkBindImageMemoryInfo-memory should be NULL. But Android will crash if memory is NULL, "
"skipping CmdCopySwapchainImage test\n",
kSkipPrefix);
return;
#endif
SetTargetApiVersion(VK_API_VERSION_1_2);
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping CmdCopySwapchainImage test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping CmdCopySwapchainImage test\n", kSkipPrefix);
return;
}
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s This test requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
if (IsDriver(VK_DRIVER_ID_MESA_RADV)) {
// Seeing the same crash as the Android comment above
printf("%s This test should not be run on the RADV driver\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!InitSwapchain(VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
printf("%s Cannot create surface or swapchain, skipping CmdCopySwapchainImage test\n", kSkipPrefix);
return;
}
auto image_create_info = LvlInitStruct<VkImageCreateInfo>();
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = m_surface_formats[0].format;
image_create_info.extent.width = m_surface_capabilities.minImageExtent.width;
image_create_info.extent.height = m_surface_capabilities.minImageExtent.height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkImageObj srcImage(m_device);
srcImage.init(&image_create_info);
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
auto image_swapchain_create_info = LvlInitStruct<VkImageSwapchainCreateInfoKHR>();
image_swapchain_create_info.swapchain = m_swapchain;
image_create_info.pNext = &image_swapchain_create_info;
VkImage image_from_swapchain;
vk::CreateImage(device(), &image_create_info, NULL, &image_from_swapchain);
auto bind_swapchain_info = LvlInitStruct<VkBindImageMemorySwapchainInfoKHR>();
bind_swapchain_info.swapchain = m_swapchain;
bind_swapchain_info.imageIndex = 0;
auto bind_info = LvlInitStruct<VkBindImageMemoryInfo>(&bind_swapchain_info);
bind_info.image = image_from_swapchain;
bind_info.memory = VK_NULL_HANDLE;
bind_info.memoryOffset = 0;
vk::BindImageMemory2(m_device->device(), 1, &bind_info);
VkImageCopy copy_region = {};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
copy_region.extent = {std::min(10u, m_surface_capabilities.minImageExtent.width),
std::min(10u, m_surface_capabilities.minImageExtent.height), 1};
m_commandBuffer->begin();
m_errorMonitor->ExpectSuccess();
vk::CmdCopyImage(m_commandBuffer->handle(), srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, image_from_swapchain,
VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyNotFound();
vk::DestroyImage(m_device->device(), image_from_swapchain, NULL);
DestroySwapchain();
}
TEST_F(VkPositiveLayerTest, TransferImageToSwapchainDeviceGroup) {
TEST_DESCRIPTION("Transfer an image to a swapchain's image between device group");
#if defined(VK_USE_PLATFORM_ANDROID_KHR)
printf(
"%s According to valid usage, VkBindImageMemoryInfo-memory should be NULL. But Android will crash if memory is NULL, "
"skipping test\n",
kSkipPrefix);
return;
#endif
SetTargetApiVersion(VK_API_VERSION_1_2);
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s This test requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
if (IsDriver(VK_DRIVER_ID_MESA_RADV)) {
// Seeing the same crash as the Android comment above
printf("%s This test should not be run on the RADV driver\n", kSkipPrefix);
return;
}
uint32_t physical_device_group_count = 0;
vk::EnumeratePhysicalDeviceGroups(instance(), &physical_device_group_count, nullptr);
if (physical_device_group_count == 0) {
printf("%s physical_device_group_count is 0, skipping test\n", kSkipPrefix);
return;
}
std::vector<VkPhysicalDeviceGroupProperties> physical_device_group(physical_device_group_count,
{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES});
vk::EnumeratePhysicalDeviceGroups(instance(), &physical_device_group_count, physical_device_group.data());
VkDeviceGroupDeviceCreateInfo create_device_pnext = {};
create_device_pnext.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO;
create_device_pnext.physicalDeviceCount = physical_device_group[0].physicalDeviceCount;
create_device_pnext.pPhysicalDevices = physical_device_group[0].physicalDevices;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &create_device_pnext));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!InitSwapchain(VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
printf("%s Cannot create surface or swapchain, skipping test\n", kSkipPrefix);
return;
}
auto image_create_info = LvlInitStruct<VkImageCreateInfo>();
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = m_surface_formats[0].format;
image_create_info.extent.width = m_surface_capabilities.minImageExtent.width;
image_create_info.extent.height = m_surface_capabilities.minImageExtent.height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkImageObj src_Image(m_device);
src_Image.init(&image_create_info);
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
auto image_swapchain_create_info = LvlInitStruct<VkImageSwapchainCreateInfoKHR>();
image_swapchain_create_info.swapchain = m_swapchain;
image_create_info.pNext = &image_swapchain_create_info;
VkImage peer_image;
vk::CreateImage(device(), &image_create_info, NULL, &peer_image);
auto bind_devicegroup_info = LvlInitStruct<VkBindImageMemoryDeviceGroupInfo>();
bind_devicegroup_info.deviceIndexCount = 2;
std::array<uint32_t, 2> deviceIndices = {{0, 0}};
bind_devicegroup_info.pDeviceIndices = deviceIndices.data();
bind_devicegroup_info.splitInstanceBindRegionCount = 0;
bind_devicegroup_info.pSplitInstanceBindRegions = nullptr;
auto bind_swapchain_info = LvlInitStruct<VkBindImageMemorySwapchainInfoKHR>(&bind_devicegroup_info);
bind_swapchain_info.swapchain = m_swapchain;
bind_swapchain_info.imageIndex = 0;
auto bind_info = LvlInitStruct<VkBindImageMemoryInfo>(&bind_swapchain_info);
bind_info.image = peer_image;
bind_info.memory = VK_NULL_HANDLE;
bind_info.memoryOffset = 0;
vk::BindImageMemory2(m_device->device(), 1, &bind_info);
uint32_t swapchain_images_count = 0;
vk::GetSwapchainImagesKHR(device(), m_swapchain, &swapchain_images_count, nullptr);
std::vector<VkImage> swapchain_images;
swapchain_images.resize(swapchain_images_count);
vk::GetSwapchainImagesKHR(device(), m_swapchain, &swapchain_images_count, swapchain_images.data());
m_commandBuffer->begin();
auto img_barrier = LvlInitStruct<VkImageMemoryBarrier>();
img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
img_barrier.image = swapchain_images[0];
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
VkImageCopy copy_region = {};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
copy_region.extent = {10, 10, 1};
vk::CmdCopyImage(m_commandBuffer->handle(), src_Image.handle(), VK_IMAGE_LAYOUT_GENERAL, peer_image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);
m_commandBuffer->end();
m_errorMonitor->ExpectSuccess();
m_commandBuffer->QueueCommandBuffer();
m_errorMonitor->VerifyNotFound();
vk::DestroyImage(m_device->device(), peer_image, NULL);
DestroySwapchain();
}
TEST_F(VkPositiveLayerTest, SwapchainImageLayout) {
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping CmdCopySwapchainImage test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping CmdCopySwapchainImage test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
if (!InitSwapchain()) {
printf("%s Cannot create surface or swapchain, skipping CmdCopySwapchainImage test\n", kSkipPrefix);
return;
}
uint32_t image_index, image_count;
PFN_vkGetSwapchainImagesKHR fpGetSwapchainImagesKHR =
(PFN_vkGetSwapchainImagesKHR)vk::GetDeviceProcAddr(m_device->handle(), "vkGetSwapchainImagesKHR");
fpGetSwapchainImagesKHR(m_device->handle(), m_swapchain, &image_count, NULL);
VkImage *swapchainImages = (VkImage *)malloc(image_count * sizeof(VkImage));
fpGetSwapchainImagesKHR(m_device->handle(), m_swapchain, &image_count, swapchainImages);
VkFenceCreateInfo fenceci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0};
VkFence fence;
VkResult ret = vk::CreateFence(m_device->device(), &fenceci, nullptr, &fence);
ASSERT_VK_SUCCESS(ret);
ret = vk::AcquireNextImageKHR(m_device->handle(), m_swapchain, UINT64_MAX, VK_NULL_HANDLE, fence, &image_index);
ASSERT_VK_SUCCESS(ret);
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_B8G8R8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, &subpass, 0, nullptr};
VkRenderPass rp1, rp2;
ret = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp1);
ASSERT_VK_SUCCESS(ret);
attach[0].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
ret = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp2);
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
swapchainImages[image_index],
VK_IMAGE_VIEW_TYPE_2D,
VK_FORMAT_B8G8R8A8_UNORM,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY},
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
};
VkImageView view;
ret = vk::CreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(ret);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp1, 1, &view, 32, 32, 1};
VkFramebuffer fb1, fb2;
ret = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb1);
fci.renderPass = rp2;
ret = vk::CreateFramebuffer(m_device->device(), &fci, nullptr, &fb2);
ASSERT_VK_SUCCESS(ret);
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp1, fb1, {{0, 0}, {32, 32}}, 0, nullptr};
m_commandBuffer->begin();
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vk::CmdEndRenderPass(m_commandBuffer->handle());
rpbi.framebuffer = fb2;
rpbi.renderPass = rp2;
vk::CmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vk::CmdEndRenderPass(m_commandBuffer->handle());
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.srcAccessMask = 0;
img_barrier.dstAccessMask = 0;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
img_barrier.image = swapchainImages[image_index];
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_commandBuffer->end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vk::WaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
vk::ResetFences(m_device->device(), 1, &fence);
m_errorMonitor->ExpectSuccess();
vk::QueueSubmit(m_device->m_queue, 1, &submit_info, fence);
m_errorMonitor->VerifyNotFound();
vk::WaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
free(swapchainImages);
vk::DestroyFramebuffer(m_device->device(), fb1, NULL);
vk::DestroyRenderPass(m_device->device(), rp1, NULL);
vk::DestroyFramebuffer(m_device->device(), fb2, NULL);
vk::DestroyRenderPass(m_device->device(), rp2, NULL);
vk::DestroyFence(m_device->device(), fence, NULL);
vk::DestroyImageView(m_device->device(), view, NULL);
DestroySwapchain();
}
TEST_F(VkPositiveLayerTest, SubresourceLayout) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
auto image_ci = vk_testing::Image::create_info();
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.extent.width = 64;
image_ci.extent.height = 64;
image_ci.mipLevels = 7;
image_ci.arrayLayers = 6;
image_ci.format = VK_FORMAT_R8_UINT;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
vk_testing::Image image;
image.init(*m_device, image_ci);
m_commandBuffer->begin();
const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT);
auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, subresource_range);
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.subresourceRange.baseMipLevel = 1;
barrier.subresourceRange.levelCount = 1;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.subresourceRange.baseMipLevel = 2;
vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ImagelessLayoutTracking) {
TEST_DESCRIPTION("Test layout tracking on imageless framebuffers");
m_errorMonitor->ExpectSuccess(kErrorBit | kWarningBit);
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required device extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
SetTargetApiVersion(VK_API_VERSION_1_2);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s This test requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
if (IsDriver(VK_DRIVER_ID_MESA_RADV)) {
// According to valid usage, VkBindImageMemoryInfo-memory should be NULL. But RADV will crash if memory is NULL, "
printf("%s This test should not be run on the RADV driver\n", kSkipPrefix);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME);
} else {
printf("%s test requires VK_KHR_imageless_framebuffer, not available. Skipping.\n", kSkipPrefix);
return;
}
if (!AddSwapchainDeviceExtension()) {
printf("%s swapchain extensions not supported, skipping test\n", kSkipPrefix);
return;
}
VkPhysicalDeviceImagelessFramebufferFeaturesKHR physicalDeviceImagelessFramebufferFeatures = {};
physicalDeviceImagelessFramebufferFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR;
physicalDeviceImagelessFramebufferFeatures.imagelessFramebuffer = VK_TRUE;
VkPhysicalDeviceFeatures2 physicalDeviceFeatures2 = {};
physicalDeviceFeatures2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
physicalDeviceFeatures2.pNext = &physicalDeviceImagelessFramebufferFeatures;
uint32_t physical_device_group_count = 0;
vk::EnumeratePhysicalDeviceGroups(instance(), &physical_device_group_count, nullptr);
if (physical_device_group_count == 0) {
printf("%s physical_device_group_count is 0, skipping test\n", kSkipPrefix);
return;
}
std::vector<VkPhysicalDeviceGroupProperties> physical_device_group(physical_device_group_count,
{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES});
vk::EnumeratePhysicalDeviceGroups(instance(), &physical_device_group_count, physical_device_group.data());
VkDeviceGroupDeviceCreateInfo create_device_pnext = {};
create_device_pnext.sType = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO;
create_device_pnext.physicalDeviceCount = physical_device_group[0].physicalDeviceCount;
create_device_pnext.pPhysicalDevices = physical_device_group[0].physicalDevices;
create_device_pnext.pNext = &physicalDeviceFeatures2;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &create_device_pnext, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
if (!InitSwapchain(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
printf("%s Cannot create surface or swapchain, skipping test\n", kSkipPrefix);
return;
}
uint32_t attachmentWidth = m_surface_capabilities.minImageExtent.width;
uint32_t attachmentHeight = m_surface_capabilities.minImageExtent.height;
VkFormat attachmentFormat = m_surface_formats[0].format;
VkAttachmentDescription attachmentDescription[] = {{0, attachmentFormat, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR}};
VkAttachmentReference attachmentReference = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &attachmentReference, nullptr, nullptr, 0, nullptr},
};
VkRenderPassCreateInfo renderPassCreateInfo = {
VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attachmentDescription, 1, subpasses, 0, nullptr};
VkRenderPass renderPass;
vk::CreateRenderPass(m_device->device(), &renderPassCreateInfo, NULL, &renderPass);
// Create an image to use in an imageless framebuffer. Bind swapchain memory to it.
auto image_swapchain_create_info = LvlInitStruct<VkImageSwapchainCreateInfoKHR>();
image_swapchain_create_info.swapchain = m_swapchain;
VkImageCreateInfo imageCreateInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
&image_swapchain_create_info,
0,
VK_IMAGE_TYPE_2D,
attachmentFormat,
{attachmentWidth, attachmentHeight, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image(m_device);
image.init_no_mem(*m_device, imageCreateInfo);
auto bind_devicegroup_info = LvlInitStruct<VkBindImageMemoryDeviceGroupInfo>();
bind_devicegroup_info.deviceIndexCount = 2;
std::array<uint32_t, 2> deviceIndices = {{0, 0}};
bind_devicegroup_info.pDeviceIndices = deviceIndices.data();
bind_devicegroup_info.splitInstanceBindRegionCount = 0;
bind_devicegroup_info.pSplitInstanceBindRegions = nullptr;
auto bind_swapchain_info = LvlInitStruct<VkBindImageMemorySwapchainInfoKHR>(&bind_devicegroup_info);
bind_swapchain_info.swapchain = m_swapchain;
bind_swapchain_info.imageIndex = 0;
auto bind_info = LvlInitStruct<VkBindImageMemoryInfo>(&bind_swapchain_info);
bind_info.image = image.image();
bind_info.memory = VK_NULL_HANDLE;
bind_info.memoryOffset = 0;
vk::BindImageMemory2(m_device->device(), 1, &bind_info);
uint32_t swapchain_images_count = 0;
vk::GetSwapchainImagesKHR(device(), m_swapchain, &swapchain_images_count, nullptr);
std::vector<VkImage> swapchain_images;
swapchain_images.resize(swapchain_images_count);
vk::GetSwapchainImagesKHR(device(), m_swapchain, &swapchain_images_count, swapchain_images.data());
uint32_t current_buffer;
VkSemaphore image_acquired;
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vk::CreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &image_acquired);
vk::AcquireNextImageKHR(device(), m_swapchain, UINT64_MAX, image_acquired, VK_NULL_HANDLE, &current_buffer);
VkImageView imageView = image.targetView(attachmentFormat);
VkFramebufferAttachmentImageInfoKHR framebufferAttachmentImageInfo = {VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR,
nullptr,
0,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
attachmentWidth,
attachmentHeight,
1,
1,
&attachmentFormat};
VkFramebufferAttachmentsCreateInfoKHR framebufferAttachmentsCreateInfo = {};
framebufferAttachmentsCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR;
framebufferAttachmentsCreateInfo.attachmentImageInfoCount = 1;
framebufferAttachmentsCreateInfo.pAttachmentImageInfos = &framebufferAttachmentImageInfo;
VkFramebufferCreateInfo framebufferCreateInfo = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
&framebufferAttachmentsCreateInfo,
VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR,
renderPass,
1,
reinterpret_cast<const VkImageView *>(1),
attachmentWidth,
attachmentHeight,
1};
VkFramebuffer framebuffer;
vk::CreateFramebuffer(m_device->device(), &framebufferCreateInfo, nullptr, &framebuffer);
VkRenderPassAttachmentBeginInfoKHR renderPassAttachmentBeginInfo = {VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR,
nullptr, 1, &imageView};
VkRenderPassBeginInfo renderPassBeginInfo = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
&renderPassAttachmentBeginInfo,
renderPass,
framebuffer,
{{0, 0}, {attachmentWidth, attachmentHeight}},
0,
nullptr};
// RenderPass should change the image layout of both the swapchain image and the aliased image to PRESENT_SRC_KHR
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(renderPassBeginInfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
VkFenceObj fence;
fence.init(*m_device, VkFenceObj::create_info());
m_commandBuffer->QueueCommandBuffer(fence);
VkPresentInfoKHR present = {};
present.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
present.pSwapchains = &m_swapchain;
present.pImageIndices = &current_buffer;
present.swapchainCount = 1;
vk::QueuePresentKHR(m_device->m_queue, &present);
m_errorMonitor->VerifyNotFound();
DestroySwapchain();
vk::DestroyRenderPass(m_device->device(), renderPass, nullptr);
vk::DestroySemaphore(m_device->device(), image_acquired, nullptr);
vk::DestroyFramebuffer(m_device->device(), framebuffer, nullptr);
}