blob: dd2b80078acad829e6ef866d5bc3f5add70bc901 [file] [log] [blame]
/*
* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (c) 2015-2021 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Author: Chia-I Wu <olvaffe@gmail.com>
* Author: Chris Forbes <chrisf@ijw.co.nz>
* Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Mike Stroyan <mike@LunarG.com>
* Author: Tobin Ehlis <tobine@google.com>
* Author: Tony Barbour <tony@LunarG.com>
* Author: Cody Northrop <cnorthrop@google.com>
* Author: Dave Houlton <daveh@lunarg.com>
* Author: Jeremy Kniager <jeremyk@lunarg.com>
* Author: Shannon McPherson <shannon@lunarg.com>
* Author: John Zulauf <jzulauf@lunarg.com>
*/
#include "../layer_validation_tests.h"
#include "vk_extension_helper.h"
#include <algorithm>
#include <array>
#include <chrono>
#include <memory>
#include <mutex>
#include <thread>
#include "cast_utils.h"
//
// POSITIVE VALIDATION TESTS
//
// These tests do not expect to encounter ANY validation errors pass only if this is true
TEST_F(VkPositiveLayerTest, StatelessValidationDisable) {
TEST_DESCRIPTION("Specify a non-zero value for a reserved parameter with stateless validation disabled");
VkValidationFeatureDisableEXT disables[] = {VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT};
VkValidationFeaturesEXT features = {};
features.sType = VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT;
features.disabledValidationFeatureCount = 1;
features.pDisabledValidationFeatures = disables;
VkCommandPoolCreateFlags pool_flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, pool_flags, &features));
m_errorMonitor->ExpectSuccess();
// Specify 0 for a reserved VkFlags parameter. Normally this is expected to trigger an stateless validation error, but this
// validation was disabled via the features extension, so no errors should be forthcoming.
VkEvent event_handle = VK_NULL_HANDLE;
VkEventCreateInfo event_info = {};
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_info.flags = 1;
vk::CreateEvent(device(), &event_info, NULL, &event_handle);
vk::DestroyEvent(device(), event_handle, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, TestDestroyFreeNullHandles) {
VkResult err;
TEST_DESCRIPTION("Call all applicable destroy and free routines with NULL handles, expecting no validation errors");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
vk::DestroyBuffer(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyBufferView(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyCommandPool(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyDescriptorPool(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyDescriptorSetLayout(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyDevice(VK_NULL_HANDLE, NULL);
vk::DestroyEvent(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyFence(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyFramebuffer(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyImage(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyImageView(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyInstance(VK_NULL_HANDLE, NULL);
vk::DestroyPipeline(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyPipelineCache(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyPipelineLayout(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyQueryPool(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyRenderPass(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroySampler(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroySemaphore(m_device->device(), VK_NULL_HANDLE, NULL);
vk::DestroyShaderModule(m_device->device(), VK_NULL_HANDLE, NULL);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vk::CreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffers[3] = {};
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vk::AllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffers[1]);
vk::FreeCommandBuffers(m_device->device(), command_pool, 3, command_buffers);
vk::DestroyCommandPool(m_device->device(), command_pool, NULL);
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vk::CreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 2;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptor_sets[3] = {};
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vk::AllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_sets[1]);
ASSERT_VK_SUCCESS(err);
vk::FreeDescriptorSets(m_device->device(), ds_pool, 3, descriptor_sets);
vk::DestroyDescriptorPool(m_device->device(), ds_pool, NULL);
vk::FreeMemory(m_device->device(), VK_NULL_HANDLE, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, Maintenance1Tests) {
TEST_DESCRIPTION("Validate various special cases for the Maintenance1_KHR extension");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_1_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE_1_EXTENSION_NAME);
} else {
printf("%s Maintenance1 Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
VkCommandBufferObj cmd_buf(m_device, m_commandPool);
cmd_buf.begin();
// Set Negative height, should give error if Maintenance 1 is not enabled
VkViewport viewport = {0, 0, 16, -16, 0, 1};
vk::CmdSetViewport(cmd_buf.handle(), 0, 1, &viewport);
cmd_buf.end();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ValidStructPNext) {
TEST_DESCRIPTION("Verify that a valid pNext value is handled correctly");
// Positive test to check parameter_validation and unique_objects support for NV_dedicated_allocation
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME);
} else {
printf("%s VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME Extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info = {};
dedicated_buffer_create_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV;
dedicated_buffer_create_info.pNext = nullptr;
dedicated_buffer_create_info.dedicatedAllocation = VK_TRUE;
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.pNext = &dedicated_buffer_create_info;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkBuffer buffer;
VkResult err = vk::CreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
vk::GetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs);
VkDedicatedAllocationMemoryAllocateInfoNV dedicated_memory_info = {};
dedicated_memory_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV;
dedicated_memory_info.pNext = nullptr;
dedicated_memory_info.buffer = buffer;
dedicated_memory_info.image = VK_NULL_HANDLE;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = &dedicated_memory_info;
memory_info.allocationSize = memory_reqs.size;
bool pass;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
VkDeviceMemory buffer_memory;
err = vk::AllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory);
ASSERT_VK_SUCCESS(err);
err = vk::BindBufferMemory(m_device->device(), buffer, buffer_memory, 0);
ASSERT_VK_SUCCESS(err);
vk::DestroyBuffer(m_device->device(), buffer, NULL);
vk::FreeMemory(m_device->device(), buffer_memory, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ParameterLayerFeatures2Capture) {
TEST_DESCRIPTION("Ensure parameter_validation_layer correctly captures physical device features");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkResult err;
m_errorMonitor->ExpectSuccess();
VkPhysicalDeviceFeatures2KHR features2;
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
features2.pNext = nullptr;
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
// We're not creating a valid m_device, but the phy wrapper is useful
vk_testing::PhysicalDevice physical_device(gpu());
vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties());
// Only request creation with queuefamilies that have at least one queue
std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
auto qci = queue_info.data();
for (uint32_t i = 0; i < queue_info.size(); ++i) {
if (qci[i].queueCount) {
create_queue_infos.push_back(qci[i]);
}
}
VkDeviceCreateInfo dev_info = {};
dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
dev_info.pNext = &features2;
dev_info.flags = 0;
dev_info.queueCreateInfoCount = create_queue_infos.size();
dev_info.pQueueCreateInfos = create_queue_infos.data();
dev_info.enabledLayerCount = 0;
dev_info.ppEnabledLayerNames = nullptr;
dev_info.enabledExtensionCount = 0;
dev_info.ppEnabledExtensionNames = nullptr;
dev_info.pEnabledFeatures = nullptr;
VkDevice device;
err = vk::CreateDevice(gpu(), &dev_info, nullptr, &device);
ASSERT_VK_SUCCESS(err);
if (features2.features.samplerAnisotropy) {
// Test that the parameter layer is caching the features correctly using CreateSampler
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
// If the features were not captured correctly, this should cause an error
sampler_ci.anisotropyEnable = VK_TRUE;
sampler_ci.maxAnisotropy = physical_device.properties().limits.maxSamplerAnisotropy;
VkSampler sampler = VK_NULL_HANDLE;
err = vk::CreateSampler(device, &sampler_ci, nullptr, &sampler);
ASSERT_VK_SUCCESS(err);
vk::DestroySampler(device, sampler, nullptr);
} else {
printf("%s Feature samplerAnisotropy not enabled; parameter_layer check skipped.\n", kSkipPrefix);
}
// Verify the core validation layer has captured the physical device features by creating a a query pool.
if (features2.features.pipelineStatisticsQuery) {
VkQueryPool query_pool;
VkQueryPoolCreateInfo qpci{};
qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS;
qpci.queryCount = 1;
err = vk::CreateQueryPool(device, &qpci, nullptr, &query_pool);
ASSERT_VK_SUCCESS(err);
vk::DestroyQueryPool(device, query_pool, nullptr);
} else {
printf("%s Feature pipelineStatisticsQuery not enabled; core_validation_layer check skipped.\n", kSkipPrefix);
}
vk::DestroyDevice(device, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ApiVersionZero) {
TEST_DESCRIPTION("Check that apiVersion = 0 is valid.");
m_errorMonitor->ExpectSuccess();
app_info_.apiVersion = 0U;
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, RayTracingPipelineNV) {
TEST_DESCRIPTION("Test VK_NV_ray_tracing.");
if (!CreateNVRayTracingPipelineHelper::InitInstanceExtensions(*this, m_instance_extension_names)) {
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
if (!CreateNVRayTracingPipelineHelper::InitDeviceExtensions(*this, m_device_extension_names)) {
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
auto ignore_update = [](CreateNVRayTracingPipelineHelper &helper) {};
CreateNVRayTracingPipelineHelper::OneshotPositiveTest(*this, ignore_update);
}
TEST_F(VkPositiveLayerTest, TestPhysicalDeviceSurfaceSupport) {
TEST_DESCRIPTION("Test if physical device supports surface.");
SetTargetApiVersion(VK_API_VERSION_1_1);
if (!AddSurfaceInstanceExtension()) {
printf("%s surface extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s Test requires Vulkan 1.1+, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
if (!InitSurface()) {
printf("%s Cannot create surface, skipping test\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkBool32 supported;
vk::GetPhysicalDeviceSurfaceSupportKHR(gpu(), 0, m_surface, &supported);
if (supported) {
uint32_t count;
vk::GetPhysicalDeviceSurfaceFormatsKHR(gpu(), m_surface, &count, nullptr);
}
}
TEST_F(VkPositiveLayerTest, ModifyPnext) {
TEST_DESCRIPTION("Make sure invalid values in pNext structures are ignored at query time");
m_errorMonitor->ExpectSuccess();
SetTargetApiVersion(VK_API_VERSION_1_2);
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s test requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME);
m_device_extension_names.push_back(VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME);
} else {
printf("%s test requires %s", kSkipPrefix, VK_NV_FRAGMENT_SHADING_RATE_ENUMS_EXTENSION_NAME);
}
auto shading = LvlInitStruct<VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV>();
shading.maxFragmentShadingRateInvocationCount = static_cast<VkSampleCountFlagBits>(0);
auto props = LvlInitStruct<VkPhysicalDeviceProperties2>(&shading);
vk::GetPhysicalDeviceProperties2(gpu(), &props);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, HostQueryResetSuccess) {
// This is a positive test. No failures are expected.
TEST_DESCRIPTION("Use vkResetQueryPoolEXT normally");
if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME)) {
printf("%s Extension %s not supported by device; skipped.\n", kSkipPrefix, VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME);
VkPhysicalDeviceHostQueryResetFeaturesEXT host_query_reset_features{};
host_query_reset_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT;
host_query_reset_features.hostQueryReset = VK_TRUE;
VkPhysicalDeviceFeatures2 pd_features2{};
pd_features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
pd_features2.pNext = &host_query_reset_features;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2));
auto fpvkResetQueryPoolEXT = (PFN_vkResetQueryPoolEXT)vk::GetDeviceProcAddr(m_device->device(), "vkResetQueryPoolEXT");
m_errorMonitor->ExpectSuccess();
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vk::CreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
fpvkResetQueryPoolEXT(m_device->device(), query_pool, 0, 1);
vk::DestroyQueryPool(m_device->device(), query_pool, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, UseFirstQueueUnqueried) {
TEST_DESCRIPTION("Use first queue family and one queue without first querying with vkGetPhysicalDeviceQueueFamilyProperties");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
const float q_priority[] = {1.0f};
VkDeviceQueueCreateInfo queue_ci = {};
queue_ci.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_ci.queueFamilyIndex = 0;
queue_ci.queueCount = 1;
queue_ci.pQueuePriorities = q_priority;
VkDeviceCreateInfo device_ci = {};
device_ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_ci.queueCreateInfoCount = 1;
device_ci.pQueueCreateInfos = &queue_ci;
m_errorMonitor->ExpectSuccess();
VkDevice test_device;
vk::CreateDevice(gpu(), &device_ci, nullptr, &test_device);
m_errorMonitor->VerifyNotFound();
vk::DestroyDevice(test_device, nullptr);
}
// Android loader returns an error in this case
#if !defined(ANDROID)
TEST_F(VkPositiveLayerTest, GetDevProcAddrNullPtr) {
TEST_DESCRIPTION("Call GetDeviceProcAddr on an enabled instance extension expecting nullptr");
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (InstanceExtensionSupported(VK_KHR_SURFACE_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
} else {
printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_SURFACE_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
auto fpDestroySurface = (PFN_vkCreateValidationCacheEXT)vk::GetDeviceProcAddr(m_device->device(), "vkDestroySurfaceKHR");
if (fpDestroySurface) {
m_errorMonitor->SetError("Null was expected!");
}
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, GetDevProcAddrExtensions) {
TEST_DESCRIPTION("Call GetDeviceProcAddr with and without extension enabled");
SetTargetApiVersion(VK_API_VERSION_1_1);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_1) {
printf("%s GetDevProcAddrExtensions requires Vulkan 1.1+, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
auto vkTrimCommandPool = vk::GetDeviceProcAddr(m_device->device(), "vkTrimCommandPool");
auto vkTrimCommandPoolKHR = vk::GetDeviceProcAddr(m_device->device(), "vkTrimCommandPoolKHR");
if (nullptr == vkTrimCommandPool) m_errorMonitor->SetError("Unexpected null pointer");
if (nullptr != vkTrimCommandPoolKHR) m_errorMonitor->SetError("Didn't receive expected null pointer");
const char *const extension = {VK_KHR_MAINTENANCE_1_EXTENSION_NAME};
const float q_priority[] = {1.0f};
VkDeviceQueueCreateInfo queue_ci = {};
queue_ci.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_ci.queueFamilyIndex = 0;
queue_ci.queueCount = 1;
queue_ci.pQueuePriorities = q_priority;
VkDeviceCreateInfo device_ci = {};
device_ci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_ci.enabledExtensionCount = 1;
device_ci.ppEnabledExtensionNames = &extension;
device_ci.queueCreateInfoCount = 1;
device_ci.pQueueCreateInfos = &queue_ci;
VkDevice device;
vk::CreateDevice(gpu(), &device_ci, NULL, &device);
vkTrimCommandPoolKHR = vk::GetDeviceProcAddr(device, "vkTrimCommandPoolKHR");
if (nullptr == vkTrimCommandPoolKHR) m_errorMonitor->SetError("Unexpected null pointer");
m_errorMonitor->VerifyNotFound();
vk::DestroyDevice(device, nullptr);
}
#endif
TEST_F(VkPositiveLayerTest, Vulkan12Features) {
TEST_DESCRIPTION("Enable feature via Vulkan12features struct");
SetTargetApiVersion(VK_API_VERSION_1_2);
ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor));
if (DeviceValidationVersion() < VK_API_VERSION_1_2) {
printf("%s Vulkan12Struct requires Vulkan 1.2+, skipping test\n", kSkipPrefix);
return;
}
VkPhysicalDeviceFeatures2 features2 = {};
auto bda_features = LvlInitStruct<VkPhysicalDeviceBufferDeviceAddressFeatures>();
PFN_vkGetPhysicalDeviceFeatures2 vkGetPhysicalDeviceFeatures2 =
(PFN_vkGetPhysicalDeviceFeatures2)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2 != nullptr);
features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&bda_features);
vkGetPhysicalDeviceFeatures2(gpu(), &features2);
if (!bda_features.bufferDeviceAddress) {
printf("Buffer Device Address feature not supported, skipping test\n");
return;
}
m_errorMonitor->ExpectSuccess();
VkPhysicalDeviceVulkan12Features features12 = {};
features12.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES;
features12.bufferDeviceAddress = true;
features2.pNext = &features12;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
uint32_t qfi = 0;
VkBufferCreateInfo bci = {};
bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bci.usage = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
bci.size = 8;
bci.queueFamilyIndexCount = 1;
bci.pQueueFamilyIndices = &qfi;
VkBuffer buffer;
vk::CreateBuffer(m_device->device(), &bci, NULL, &buffer);
VkMemoryRequirements buffer_mem_reqs = {};
vk::GetBufferMemoryRequirements(m_device->device(), buffer, &buffer_mem_reqs);
VkMemoryAllocateInfo buffer_alloc_info = {};
buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
buffer_alloc_info.allocationSize = buffer_mem_reqs.size;
m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0);
VkMemoryAllocateFlagsInfo alloc_flags = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO};
alloc_flags.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
buffer_alloc_info.pNext = &alloc_flags;
VkDeviceMemory buffer_mem;
VkResult err = vk::AllocateMemory(m_device->device(), &buffer_alloc_info, NULL, &buffer_mem);
ASSERT_VK_SUCCESS(err);
vk::BindBufferMemory(m_device->device(), buffer, buffer_mem, 0);
VkBufferDeviceAddressInfo bda_info = {};
bda_info.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO;
bda_info.buffer = buffer;
auto vkGetBufferDeviceAddress =
(PFN_vkGetBufferDeviceAddress)vk::GetDeviceProcAddr(m_device->device(), "vkGetBufferDeviceAddress");
ASSERT_TRUE(vkGetBufferDeviceAddress != nullptr);
vkGetBufferDeviceAddress(m_device->device(), &bda_info);
m_errorMonitor->VerifyNotFound();
// Also verify that we don't get the KHR extension address without enabling the KHR extension
auto vkGetBufferDeviceAddressKHR =
(PFN_vkGetBufferDeviceAddressKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetBufferDeviceAddressKHR");
if (nullptr != vkGetBufferDeviceAddressKHR) m_errorMonitor->SetError("Didn't receive expected null pointer");
m_errorMonitor->VerifyNotFound();
vk::DestroyBuffer(m_device->device(), buffer, NULL);
vk::FreeMemory(m_device->device(), buffer_mem, NULL);
}
TEST_F(VkPositiveLayerTest, QueueThreading) {
TEST_DESCRIPTION("Test concurrent Queue access from vkGet and vkSubmit");
using namespace std::chrono;
using std::thread;
ASSERT_NO_FATAL_FAILURE(InitFramework());
ASSERT_NO_FATAL_FAILURE(InitState());
const auto queue_family = DeviceObj()->GetDefaultQueue()->get_family_index();
constexpr uint32_t queue_index = 0;
VkCommandPoolObj command_pool(DeviceObj(), queue_family);
const VkDevice device_h = device();
VkQueue queue_h;
vk::GetDeviceQueue(device(), queue_family, queue_index, &queue_h);
VkQueueObj queue_o(queue_h, queue_family);
const VkCommandBufferAllocateInfo cbai = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, nullptr, command_pool.handle(),
VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1};
vk_testing::CommandBuffer mock_cmdbuff(*DeviceObj(), cbai);
const VkCommandBufferBeginInfo cbbi{VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, nullptr};
mock_cmdbuff.begin(&cbbi);
mock_cmdbuff.end();
std::mutex queue_mutex;
constexpr auto test_duration = seconds{2};
const auto timer_begin = steady_clock::now();
const auto &testing_thread1 = [&]() {
for (auto timer_now = steady_clock::now(); timer_now - timer_begin < test_duration; timer_now = steady_clock::now()) {
VkQueue dummy_q;
vk::GetDeviceQueue(device_h, queue_family, queue_index, &dummy_q);
}
};
const auto &testing_thread2 = [&]() {
for (auto timer_now = steady_clock::now(); timer_now - timer_begin < test_duration; timer_now = steady_clock::now()) {
VkSubmitInfo si = {};
si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
si.commandBufferCount = 1;
si.pCommandBuffers = &mock_cmdbuff.handle();
queue_mutex.lock();
ASSERT_VK_SUCCESS(vk::QueueSubmit(queue_h, 1, &si, VK_NULL_HANDLE));
queue_mutex.unlock();
}
};
const auto &testing_thread3 = [&]() {
for (auto timer_now = steady_clock::now(); timer_now - timer_begin < test_duration; timer_now = steady_clock::now()) {
queue_mutex.lock();
ASSERT_VK_SUCCESS(vk::QueueWaitIdle(queue_h));
queue_mutex.unlock();
}
};
Monitor().ExpectSuccess();
std::array<thread, 3> threads = {thread(testing_thread1), thread(testing_thread2), thread(testing_thread3)};
for (auto &t : threads) t.join();
Monitor().VerifyNotFound();
vk::QueueWaitIdle(queue_h);
}