blob: 6f2ebc35d87045fa1e823fbf531b6ffa711f73bf [file] [log] [blame]
/*
* Copyright (c) 2015-2017 The Khronos Group Inc.
* Copyright (c) 2015-2017 Valve Corporation
* Copyright (c) 2015-2017 LunarG, Inc.
* Copyright (c) 2015-2017 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Author: Chia-I Wu <olvaffe@gmail.com>
* Author: Chris Forbes <chrisf@ijw.co.nz>
* Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Mike Stroyan <mike@LunarG.com>
* Author: Tobin Ehlis <tobine@google.com>
* Author: Tony Barbour <tony@LunarG.com>
* Author: Cody Northrop <cnorthrop@google.com>
* Author: Dave Houlton <daveh@lunarg.com>
* Author: Jeremy Kniager <jeremyk@lunarg.com>
*/
#ifdef ANDROID
#include "vulkan_wrapper.h"
#else
#define NOMINMAX
#include <vulkan/vulkan.h>
#endif
#include "layers/vk_device_profile_api_layer.h"
#if defined(ANDROID) && defined(VALIDATION_APK)
#include <android/log.h>
#include <android_native_app_glue.h>
#endif
#include "icd-spv.h"
#include "test_common.h"
#include "vk_format_utils.h"
#include "vk_layer_config.h"
#include "vk_validation_error_messages.h"
#include "vkrenderframework.h"
#include "vk_typemap_helper.h"
#include <algorithm>
#include <cmath>
#include <functional>
#include <limits>
#include <memory>
#include <unordered_set>
//--------------------------------------------------------------------------------------
// Mesh and VertexFormat Data
//--------------------------------------------------------------------------------------
enum BsoFailSelect {
BsoFailNone,
BsoFailLineWidth,
BsoFailDepthBias,
BsoFailViewport,
BsoFailScissor,
BsoFailBlend,
BsoFailDepthBounds,
BsoFailStencilReadMask,
BsoFailStencilWriteMask,
BsoFailStencilReference,
BsoFailCmdClearAttachments,
BsoFailIndexBuffer,
BsoFailIndexBufferBadSize,
BsoFailIndexBufferBadOffset,
BsoFailIndexBufferBadMapSize,
BsoFailIndexBufferBadMapOffset
};
static const char bindStateVertShaderText[] =
"#version 450\n"
"vec2 vertices[3];\n"
"void main() {\n"
" vertices[0] = vec2(-1.0, -1.0);\n"
" vertices[1] = vec2( 1.0, -1.0);\n"
" vertices[2] = vec2( 0.0, 1.0);\n"
" gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n"
"}\n";
static const char bindStateFragShaderText[] =
"#version 450\n"
"\n"
"layout(location = 0) out vec4 uFragColor;\n"
"void main(){\n"
" uFragColor = vec4(0,1,0,1);\n"
"}\n";
// Static arrays helper
template <class ElementT, size_t array_size>
size_t size(ElementT (&)[array_size]) {
return array_size;
}
// Format search helper
VkFormat FindSupportedDepthStencilFormat(VkPhysicalDevice phy) {
VkFormat ds_formats[] = {VK_FORMAT_D16_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT};
for (uint32_t i = 0; i < sizeof(ds_formats); i++) {
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(phy, ds_formats[i], &format_props);
if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
return ds_formats[i];
}
}
return VK_FORMAT_UNDEFINED;
}
// Returns true if *any* requested features are available.
// Assumption is that the framework can successfully create an image as
// long as at least one of the feature bits is present (excepting VTX_BUF).
bool ImageFormatIsSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL,
VkFormatFeatureFlags features = ~VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) {
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(phy, format, &format_props);
VkFormatFeatureFlags phy_features =
(VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures);
return (0 != (phy_features & features));
}
// Returns true if format and *all* requested features are available.
bool ImageFormatAndFeaturesSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling, VkFormatFeatureFlags features) {
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(phy, format, &format_props);
VkFormatFeatureFlags phy_features =
(VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures);
return (features == (phy_features & features));
}
// Returns true if format and *all* requested features are available.
bool ImageFormatAndFeaturesSupported(const VkInstance inst, const VkPhysicalDevice phy, const VkImageCreateInfo info,
const VkFormatFeatureFlags features) {
// Verify physical device support of format features
if (!ImageFormatAndFeaturesSupported(phy, info.format, info.tiling, features)) {
return false;
}
// Verify that PhysDevImageFormatProp() also claims support for the specific usage
VkImageFormatProperties props;
VkResult err =
vkGetPhysicalDeviceImageFormatProperties(phy, info.format, info.imageType, info.tiling, info.usage, info.flags, &props);
if (VK_SUCCESS != err) {
return false;
}
#if 0 // Convinced this chunk doesn't currently add any additional info, but leaving in place because it may be
// necessary with future extensions
// Verify again using version 2, if supported, which *can* return more property data than the original...
// (It's not clear that this is any more definitive than using the original version - but no harm)
PFN_vkGetPhysicalDeviceImageFormatProperties2KHR p_GetPDIFP2KHR =
(PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)vkGetInstanceProcAddr(inst,
"vkGetPhysicalDeviceImageFormatProperties2KHR");
if (NULL != p_GetPDIFP2KHR) {
VkPhysicalDeviceImageFormatInfo2KHR fmt_info{};
fmt_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
fmt_info.pNext = nullptr;
fmt_info.format = info.format;
fmt_info.type = info.imageType;
fmt_info.tiling = info.tiling;
fmt_info.usage = info.usage;
fmt_info.flags = info.flags;
VkImageFormatProperties2KHR fmt_props = {};
fmt_props.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
err = p_GetPDIFP2KHR(phy, &fmt_info, &fmt_props);
if (VK_SUCCESS != err) {
return false;
}
}
#endif
return true;
}
// Validation report callback prototype
static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject,
size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg,
void *pUserData);
// Simple sane SamplerCreateInfo boilerplate
static VkSamplerCreateInfo SafeSaneSamplerCreateInfo() {
VkSamplerCreateInfo sampler_create_info = {};
sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
sampler_create_info.pNext = nullptr;
sampler_create_info.magFilter = VK_FILTER_NEAREST;
sampler_create_info.minFilter = VK_FILTER_NEAREST;
sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_create_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_create_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_create_info.mipLodBias = 1.0;
sampler_create_info.anisotropyEnable = VK_FALSE;
sampler_create_info.maxAnisotropy = 1.0;
sampler_create_info.compareEnable = VK_FALSE;
sampler_create_info.compareOp = VK_COMPARE_OP_NEVER;
sampler_create_info.minLod = 1.0;
sampler_create_info.maxLod = 1.0;
sampler_create_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
sampler_create_info.unnormalizedCoordinates = VK_FALSE;
return sampler_create_info;
}
// Dependent "false" type for the static assert, as GCC will evaluate
// non-dependent static_asserts even for non-instantiated templates
template <typename T>
struct AlwaysFalse : std::false_type {};
// Helpers to get nearest greater or smaller value (of float) -- useful for testing the boundary cases of Vulkan limits
template <typename T>
T NearestGreater(const T from) {
using Lim = std::numeric_limits<T>;
const auto positive_direction = Lim::has_infinity ? Lim::infinity() : Lim::max();
return std::nextafter(from, positive_direction);
}
template <typename T>
T NearestSmaller(const T from) {
using Lim = std::numeric_limits<T>;
const auto negative_direction = Lim::has_infinity ? -Lim::infinity() : Lim::lowest();
return std::nextafter(from, negative_direction);
}
// ErrorMonitor Usage:
//
// Call SetDesiredFailureMsg with a string to be compared against all
// encountered log messages, or a validation error enum identifying
// desired error message. Passing NULL or VALIDATION_ERROR_MAX_ENUM
// will match all log messages. logMsg will return true for skipCall
// only if msg is matched or NULL.
//
// Call VerifyFound to determine if all desired failure messages
// were encountered. Call VerifyNotFound to determine if any unexpected
// failure was encountered.
class ErrorMonitor {
public:
ErrorMonitor() {
test_platform_thread_create_mutex(&mutex_);
test_platform_thread_lock_mutex(&mutex_);
Reset();
test_platform_thread_unlock_mutex(&mutex_);
}
~ErrorMonitor() { test_platform_thread_delete_mutex(&mutex_); }
// Set monitor to pristine state
void Reset() {
message_flags_ = VK_DEBUG_REPORT_ERROR_BIT_EXT;
bailout_ = NULL;
message_found_ = VK_FALSE;
failure_message_strings_.clear();
desired_message_strings_.clear();
desired_message_ids_.clear();
ignore_message_strings_.clear();
other_messages_.clear();
message_outstanding_count_ = 0;
}
// ErrorMonitor will look for an error message containing the specified string(s)
void SetDesiredFailureMsg(const VkFlags msgFlags, const char *const msgString) {
test_platform_thread_lock_mutex(&mutex_);
desired_message_strings_.insert(msgString);
message_flags_ |= msgFlags;
message_outstanding_count_++;
test_platform_thread_unlock_mutex(&mutex_);
}
// ErrorMonitor will look for an error message containing the specified string(s)
template <typename Iter>
void SetDesiredFailureMsg(const VkFlags msgFlags, Iter iter, const Iter end) {
for (; iter != end; ++iter) {
SetDesiredFailureMsg(msgFlags, *iter);
}
}
// ErrorMonitor will look for a message ID matching the specified one(s)
void SetDesiredFailureMsg(const VkFlags msgFlags, const UNIQUE_VALIDATION_ERROR_CODE msg_id) {
test_platform_thread_lock_mutex(&mutex_);
desired_message_ids_.insert(msg_id);
message_flags_ |= msgFlags;
message_outstanding_count_++;
test_platform_thread_unlock_mutex(&mutex_);
}
// Set an error that the error monitor will ignore. Do not use this function if you are creating a new test.
// TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this
// function and its definition.
void SetUnexpectedError(const char *const msg) {
test_platform_thread_lock_mutex(&mutex_);
ignore_message_strings_.emplace_back(msg);
test_platform_thread_unlock_mutex(&mutex_);
}
VkBool32 CheckForDesiredMsg(const uint32_t message_code, const char *const msgString) {
VkBool32 result = VK_FALSE;
test_platform_thread_lock_mutex(&mutex_);
if (bailout_ != nullptr) {
*bailout_ = true;
}
string errorString(msgString);
bool found_expected = false;
if (!IgnoreMessage(errorString)) {
for (auto desired_msg : desired_message_strings_) {
if (desired_msg.length() == 0) {
// An empty desired_msg string "" indicates a positive test - not expecting an error.
// Return true to avoid calling layers/driver with this error.
// And don't erase the "" string, so it remains if another error is found.
result = VK_TRUE;
found_expected = true;
message_found_ = true;
failure_message_strings_.insert(errorString);
} else if (errorString.find(desired_msg) != string::npos) {
found_expected = true;
message_outstanding_count_--;
failure_message_strings_.insert(errorString);
message_found_ = true;
result = VK_TRUE;
// We only want one match for each expected error so remove from set here
// Since we're about the break the loop it's OK to remove from set we're iterating over
desired_message_strings_.erase(desired_msg);
break;
}
}
for (auto desired_id : desired_message_ids_) {
if (desired_id == VALIDATION_ERROR_MAX_ENUM) {
// A message ID set to MAX_ENUM indicates a positive test - not expecting an error.
// Return true to avoid calling layers/driver with this error.
result = VK_TRUE;
} else if (desired_id == message_code) {
// Double-check that the string matches the error enum
if (errorString.find(validation_error_map[desired_id]) != string::npos) {
found_expected = true;
message_outstanding_count_--;
result = VK_TRUE;
message_found_ = true;
desired_message_ids_.erase(desired_id);
break;
} else {
// Treat this message as a regular unexpected error, but print a warning jic
printf("Message (%s) from MessageID %d does not correspond to expected message from error Database (%s)\n",
errorString.c_str(), desired_id, validation_error_map[desired_id]);
}
}
}
if (!found_expected) {
printf("Unexpected: %s\n", msgString);
other_messages_.push_back(errorString);
}
}
test_platform_thread_unlock_mutex(&mutex_);
return result;
}
vector<string> GetOtherFailureMsgs() const { return other_messages_; }
VkDebugReportFlagsEXT GetMessageFlags() const { return message_flags_; }
bool AnyDesiredMsgFound() const { return message_found_; }
bool AllDesiredMsgsFound() const { return (0 == message_outstanding_count_); }
void SetBailout(bool *bailout) { bailout_ = bailout; }
void DumpFailureMsgs() const {
vector<string> otherMsgs = GetOtherFailureMsgs();
if (otherMsgs.size()) {
cout << "Other error messages logged for this test were:" << endl;
for (auto iter = otherMsgs.begin(); iter != otherMsgs.end(); iter++) {
cout << " " << *iter << endl;
}
}
}
// Helpers
// ExpectSuccess now takes an optional argument allowing a custom combination of debug flags
void ExpectSuccess(VkDebugReportFlagsEXT const message_flag_mask = VK_DEBUG_REPORT_ERROR_BIT_EXT) {
// Match ANY message matching specified type
SetDesiredFailureMsg(message_flag_mask, "");
message_flags_ = message_flag_mask; // override mask handling in SetDesired...
}
void VerifyFound() {
// Not seeing the desired message is a failure. /Before/ throwing, dump any other messages.
if (!AllDesiredMsgsFound()) {
DumpFailureMsgs();
for (auto desired_msg : desired_message_strings_) {
ADD_FAILURE() << "Did not receive expected error '" << desired_msg << "'";
}
for (auto desired_id : desired_message_ids_) {
ADD_FAILURE() << "Did not receive expected error ENUM '0x" << std::hex << desired_id << "'";
}
}
Reset();
}
void VerifyNotFound() {
// ExpectSuccess() configured us to match anything. Any error is a failure.
if (AnyDesiredMsgFound()) {
DumpFailureMsgs();
for (auto msg : failure_message_strings_) {
ADD_FAILURE() << "Expected to succeed but got error: " << msg;
}
}
Reset();
}
private:
// TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this
// function and its definition.
bool IgnoreMessage(std::string const &msg) const {
if (ignore_message_strings_.empty()) {
return false;
}
return std::find_if(ignore_message_strings_.begin(), ignore_message_strings_.end(), [&msg](std::string const &str) {
return msg.find(str) != std::string::npos;
}) != ignore_message_strings_.end();
}
VkFlags message_flags_;
std::unordered_set<uint32_t> desired_message_ids_;
std::unordered_set<string> desired_message_strings_;
std::unordered_set<string> failure_message_strings_;
std::vector<std::string> ignore_message_strings_;
vector<string> other_messages_;
test_platform_thread_mutex mutex_;
bool *bailout_;
bool message_found_;
int message_outstanding_count_;
};
static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject,
size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg,
void *pUserData) {
ErrorMonitor *errMonitor = (ErrorMonitor *)pUserData;
if (msgFlags & errMonitor->GetMessageFlags()) {
#ifdef _DEBUG
char embedded_code_string[2048];
snprintf(embedded_code_string, 2048, "%s [%08x]", pMsg, msgCode);
return errMonitor->CheckForDesiredMsg(msgCode, embedded_code_string);
#else
return errMonitor->CheckForDesiredMsg(msgCode, pMsg);
#endif
}
return VK_FALSE;
}
class VkLayerTest : public VkRenderFramework {
public:
void VKTriangleTest(BsoFailSelect failCase);
void GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj, VkDescriptorSetObj &descriptorSet,
BsoFailSelect failCase);
void Init(VkPhysicalDeviceFeatures *features = nullptr, VkPhysicalDeviceFeatures2 *features2 = nullptr,
const VkCommandPoolCreateFlags flags = 0) {
InitFramework(myDbgFunc, m_errorMonitor);
InitState(features, features2, flags);
}
protected:
ErrorMonitor *m_errorMonitor;
public:
ErrorMonitor *Monitor() { return m_errorMonitor; }
VkCommandBufferObj *CommandBuffer() { return m_commandBuffer; }
protected:
bool m_enableWSI;
virtual void SetUp() {
m_instance_layer_names.clear();
m_instance_extension_names.clear();
m_device_extension_names.clear();
// Add default instance extensions to the list
m_instance_extension_names.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
// Use Threading layer first to protect others from
// ThreadCommandBufferCollision test
m_instance_layer_names.push_back("VK_LAYER_GOOGLE_threading");
m_instance_layer_names.push_back("VK_LAYER_LUNARG_parameter_validation");
m_instance_layer_names.push_back("VK_LAYER_LUNARG_object_tracker");
m_instance_layer_names.push_back("VK_LAYER_LUNARG_core_validation");
m_instance_layer_names.push_back("VK_LAYER_GOOGLE_unique_objects");
if (VkTestFramework::m_devsim_layer) {
if (InstanceLayerSupported("VK_LAYER_LUNARG_device_simulation")) {
m_instance_layer_names.push_back("VK_LAYER_LUNARG_device_simulation");
} else {
VkTestFramework::m_devsim_layer = false;
printf(" Did not find VK_LAYER_LUNARG_device_simulation layer so it will not be enabled.\n");
}
}
if (m_enableWSI) {
m_instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
#ifdef NEED_TO_TEST_THIS_ON_PLATFORM
#if defined(VK_USE_PLATFORM_ANDROID_KHR)
m_instance_extension_names.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_ANDROID_KHR
#if defined(VK_USE_PLATFORM_MIR_KHR)
m_instance_extension_names.push_back(VK_KHR_MIR_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_MIR_KHR
#if defined(VK_USE_PLATFORM_WAYLAND_KHR)
m_instance_extension_names.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#if defined(VK_USE_PLATFORM_WIN32_KHR)
m_instance_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_WIN32_KHR
#endif // NEED_TO_TEST_THIS_ON_PLATFORM
#if defined(VK_USE_PLATFORM_XCB_KHR)
m_instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
#elif defined(VK_USE_PLATFORM_XLIB_KHR)
m_instance_extension_names.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_XLIB_KHR
}
this->app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
this->app_info.pNext = NULL;
this->app_info.pApplicationName = "layer_tests";
this->app_info.applicationVersion = 1;
this->app_info.pEngineName = "unittest";
this->app_info.engineVersion = 1;
this->app_info.apiVersion = VK_API_VERSION_1_0;
m_errorMonitor = new ErrorMonitor;
}
bool LoadDeviceProfileLayer(
PFN_vkSetPhysicalDeviceFormatPropertiesEXT &fpvkSetPhysicalDeviceFormatPropertiesEXT,
PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT &fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT) {
// Load required functions
fpvkSetPhysicalDeviceFormatPropertiesEXT =
(PFN_vkSetPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFormatPropertiesEXT");
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT =
(PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(
instance(), "vkGetOriginalPhysicalDeviceFormatPropertiesEXT");
if (!(fpvkSetPhysicalDeviceFormatPropertiesEXT) || !(fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) {
printf(" Can't find device_profile_api functions; skipped.\n");
return 0;
}
return 1;
}
virtual void TearDown() {
// Clean up resources before we reset
ShutdownFramework();
delete m_errorMonitor;
}
VkLayerTest() { m_enableWSI = false; }
};
void VkLayerTest::VKTriangleTest(BsoFailSelect failCase) {
ASSERT_TRUE(m_device && m_device->initialized()); // VKTriangleTest assumes Init() has finished
ASSERT_NO_FATAL_FAILURE(InitViewport());
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipelineobj(m_device);
pipelineobj.AddDefaultColorAttachment();
pipelineobj.AddShader(&vs);
pipelineobj.AddShader(&ps);
bool failcase_needs_depth = false; // to mark cases that need depth attachment
VkBufferObj index_buffer;
switch (failCase) {
case BsoFailLineWidth: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_LINE_WIDTH);
VkPipelineInputAssemblyStateCreateInfo ia_state = {};
ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
ia_state.topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
pipelineobj.SetInputAssembly(&ia_state);
break;
}
case BsoFailDepthBias: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BIAS);
VkPipelineRasterizationStateCreateInfo rs_state = {};
rs_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs_state.depthBiasEnable = VK_TRUE;
rs_state.lineWidth = 1.0f;
pipelineobj.SetRasterization(&rs_state);
break;
}
case BsoFailViewport: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT);
break;
}
case BsoFailScissor: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR);
break;
}
case BsoFailBlend: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_BLEND_CONSTANTS);
VkPipelineColorBlendAttachmentState att_state = {};
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR;
att_state.blendEnable = VK_TRUE;
pipelineobj.AddColorAttachment(0, att_state);
break;
}
case BsoFailDepthBounds: {
failcase_needs_depth = true;
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BOUNDS);
break;
}
case BsoFailStencilReadMask: {
failcase_needs_depth = true;
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK);
break;
}
case BsoFailStencilWriteMask: {
failcase_needs_depth = true;
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK);
break;
}
case BsoFailStencilReference: {
failcase_needs_depth = true;
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_REFERENCE);
break;
}
case BsoFailIndexBuffer:
break;
case BsoFailIndexBufferBadSize:
case BsoFailIndexBufferBadOffset:
case BsoFailIndexBufferBadMapSize:
case BsoFailIndexBufferBadMapOffset: {
// Create an index buffer for these tests.
// There is no need to populate it because we should bail before trying to draw.
uint32_t const indices[] = {0};
VkBufferCreateInfo buffer_info = {};
buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_info.size = 1024;
buffer_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
buffer_info.queueFamilyIndexCount = 1;
buffer_info.pQueueFamilyIndices = indices;
index_buffer.init(*m_device, buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
} break;
case BsoFailCmdClearAttachments:
break;
case BsoFailNone:
break;
default:
break;
}
VkDescriptorSetObj descriptorSet(m_device);
VkImageView *depth_attachment = nullptr;
if (failcase_needs_depth) {
m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu());
ASSERT_TRUE(m_depth_stencil_fmt != VK_FORMAT_UNDEFINED);
m_depthStencil->Init(m_device, static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height), m_depth_stencil_fmt,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
depth_attachment = m_depthStencil->BindInfo();
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget(1, depth_attachment));
m_commandBuffer->begin();
GenericDrawPreparation(m_commandBuffer, pipelineobj, descriptorSet, failCase);
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// render triangle
if (failCase == BsoFailIndexBuffer) {
// Use DrawIndexed w/o an index buffer bound
m_commandBuffer->DrawIndexed(3, 1, 0, 0, 0);
} else if (failCase == BsoFailIndexBufferBadSize) {
// Bind the index buffer and draw one too many indices
m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16);
m_commandBuffer->DrawIndexed(513, 1, 0, 0, 0);
} else if (failCase == BsoFailIndexBufferBadOffset) {
// Bind the index buffer and draw one past the end of the buffer using the offset
m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16);
m_commandBuffer->DrawIndexed(512, 1, 1, 0, 0);
} else if (failCase == BsoFailIndexBufferBadMapSize) {
// Bind the index buffer at the middle point and draw one too many indices
m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16);
m_commandBuffer->DrawIndexed(257, 1, 0, 0, 0);
} else if (failCase == BsoFailIndexBufferBadMapOffset) {
// Bind the index buffer at the middle point and draw one past the end of the buffer
m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16);
m_commandBuffer->DrawIndexed(256, 1, 1, 0, 0);
} else {
m_commandBuffer->Draw(3, 1, 0, 0);
}
if (failCase == BsoFailCmdClearAttachments) {
VkClearAttachment color_attachment = {};
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.colorAttachment = 1; // Someone who knew what they were doing would use 0 for the index;
VkClearRect clear_rect = {{{0, 0}, {static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height)}}, 0, 0};
vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect);
}
// finalize recording of the command buffer
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(true);
DestroyRenderTarget();
}
void VkLayerTest::GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj,
VkDescriptorSetObj &descriptorSet, BsoFailSelect failCase) {
commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color);
commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil);
// Make sure depthWriteEnable is set so that Depth fail test will work
// correctly
// Make sure stencilTestEnable is set so that Stencil fail test will work
// correctly
VkStencilOpState stencil = {};
stencil.failOp = VK_STENCIL_OP_KEEP;
stencil.passOp = VK_STENCIL_OP_KEEP;
stencil.depthFailOp = VK_STENCIL_OP_KEEP;
stencil.compareOp = VK_COMPARE_OP_NEVER;
VkPipelineDepthStencilStateCreateInfo ds_ci = {};
ds_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
ds_ci.pNext = NULL;
ds_ci.depthTestEnable = VK_FALSE;
ds_ci.depthWriteEnable = VK_TRUE;
ds_ci.depthCompareOp = VK_COMPARE_OP_NEVER;
ds_ci.depthBoundsTestEnable = VK_FALSE;
if (failCase == BsoFailDepthBounds) {
ds_ci.depthBoundsTestEnable = VK_TRUE;
ds_ci.maxDepthBounds = 0.0f;
ds_ci.minDepthBounds = 0.0f;
}
ds_ci.stencilTestEnable = VK_TRUE;
ds_ci.front = stencil;
ds_ci.back = stencil;
pipelineobj.SetDepthStencil(&ds_ci);
pipelineobj.SetViewport(m_viewports);
pipelineobj.SetScissor(m_scissors);
descriptorSet.CreateVKDescriptorSet(commandBuffer);
VkResult err = pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
ASSERT_VK_SUCCESS(err);
vkCmdBindPipeline(commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineobj.handle());
commandBuffer->BindDescriptorSet(descriptorSet);
}
class VkPositiveLayerTest : public VkLayerTest {
public:
protected:
};
class VkWsiEnabledLayerTest : public VkLayerTest {
public:
protected:
VkWsiEnabledLayerTest() { m_enableWSI = true; }
};
class VkBufferTest {
public:
enum eTestEnFlags {
eDoubleDelete,
eInvalidDeviceOffset,
eInvalidMemoryOffset,
eBindNullBuffer,
eBindFakeBuffer,
eFreeInvalidHandle,
eNone,
};
enum eTestConditions { eOffsetAlignment = 1 };
static bool GetTestConditionValid(VkDeviceObj *aVulkanDevice, eTestEnFlags aTestFlag, VkBufferUsageFlags aBufferUsage = 0) {
if (eInvalidDeviceOffset != aTestFlag && eInvalidMemoryOffset != aTestFlag) {
return true;
}
VkDeviceSize offset_limit = 0;
if (eInvalidMemoryOffset == aTestFlag) {
VkBuffer vulkanBuffer;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 32;
buffer_create_info.usage = aBufferUsage;
vkCreateBuffer(aVulkanDevice->device(), &buffer_create_info, nullptr, &vulkanBuffer);
VkMemoryRequirements memory_reqs = {};
vkGetBufferMemoryRequirements(aVulkanDevice->device(), vulkanBuffer, &memory_reqs);
vkDestroyBuffer(aVulkanDevice->device(), vulkanBuffer, nullptr);
offset_limit = memory_reqs.alignment;
} else if ((VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) & aBufferUsage) {
offset_limit = aVulkanDevice->props.limits.minTexelBufferOffsetAlignment;
} else if (VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT & aBufferUsage) {
offset_limit = aVulkanDevice->props.limits.minUniformBufferOffsetAlignment;
} else if (VK_BUFFER_USAGE_STORAGE_BUFFER_BIT & aBufferUsage) {
offset_limit = aVulkanDevice->props.limits.minStorageBufferOffsetAlignment;
}
return eOffsetAlignment < offset_limit;
}
// A constructor which performs validation tests within construction.
VkBufferTest(VkDeviceObj *aVulkanDevice, VkBufferUsageFlags aBufferUsage, eTestEnFlags aTestFlag = eNone)
: AllocateCurrent(true),
BoundCurrent(false),
CreateCurrent(false),
InvalidDeleteEn(false),
VulkanDevice(aVulkanDevice->device()) {
if (eBindNullBuffer == aTestFlag || eBindFakeBuffer == aTestFlag) {
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.allocationSize = 1; // fake size -- shouldn't matter for the test
memory_allocate_info.memoryTypeIndex = 0; // fake type -- shouldn't matter for the test
vkAllocateMemory(VulkanDevice, &memory_allocate_info, nullptr, &VulkanMemory);
VulkanBuffer = (aTestFlag == eBindNullBuffer) ? VK_NULL_HANDLE : (VkBuffer)0xCDCDCDCDCDCDCDCD;
vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, 0);
} else {
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 32;
buffer_create_info.usage = aBufferUsage;
vkCreateBuffer(VulkanDevice, &buffer_create_info, nullptr, &VulkanBuffer);
CreateCurrent = true;
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(VulkanDevice, VulkanBuffer, &memory_requirements);
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.allocationSize = memory_requirements.size + eOffsetAlignment;
bool pass = aVulkanDevice->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
CreateCurrent = false;
vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr);
return;
}
vkAllocateMemory(VulkanDevice, &memory_allocate_info, NULL, &VulkanMemory);
// NB: 1 is intentionally an invalid offset value
const bool offset_en = eInvalidDeviceOffset == aTestFlag || eInvalidMemoryOffset == aTestFlag;
vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, offset_en ? eOffsetAlignment : 0);
BoundCurrent = true;
InvalidDeleteEn = (eFreeInvalidHandle == aTestFlag);
}
}
~VkBufferTest() {
if (CreateCurrent) {
vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr);
}
if (AllocateCurrent) {
if (InvalidDeleteEn) {
union {
VkDeviceMemory device_memory;
unsigned long long index_access;
} bad_index;
bad_index.device_memory = VulkanMemory;
bad_index.index_access++;
vkFreeMemory(VulkanDevice, bad_index.device_memory, nullptr);
}
vkFreeMemory(VulkanDevice, VulkanMemory, nullptr);
}
}
bool GetBufferCurrent() { return AllocateCurrent && BoundCurrent && CreateCurrent; }
const VkBuffer &GetBuffer() { return VulkanBuffer; }
void TestDoubleDestroy() {
// Destroy the buffer but leave the flag set, which will cause
// the buffer to be destroyed again in the destructor.
vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr);
}
protected:
bool AllocateCurrent;
bool BoundCurrent;
bool CreateCurrent;
bool InvalidDeleteEn;
VkBuffer VulkanBuffer;
VkDevice VulkanDevice;
VkDeviceMemory VulkanMemory;
};
class VkVerticesObj {
public:
VkVerticesObj(VkDeviceObj *aVulkanDevice, unsigned aAttributeCount, unsigned aBindingCount, unsigned aByteStride,
VkDeviceSize aVertexCount, const float *aVerticies)
: BoundCurrent(false),
AttributeCount(aAttributeCount),
BindingCount(aBindingCount),
BindId(BindIdGenerator),
PipelineVertexInputStateCreateInfo(),
VulkanMemoryBuffer(aVulkanDevice, static_cast<int>(aByteStride * aVertexCount),
reinterpret_cast<const void *>(aVerticies), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) {
BindIdGenerator++; // NB: This can wrap w/misuse
VertexInputAttributeDescription = new VkVertexInputAttributeDescription[AttributeCount];
VertexInputBindingDescription = new VkVertexInputBindingDescription[BindingCount];
PipelineVertexInputStateCreateInfo.pVertexAttributeDescriptions = VertexInputAttributeDescription;
PipelineVertexInputStateCreateInfo.vertexAttributeDescriptionCount = AttributeCount;
PipelineVertexInputStateCreateInfo.pVertexBindingDescriptions = VertexInputBindingDescription;
PipelineVertexInputStateCreateInfo.vertexBindingDescriptionCount = BindingCount;
PipelineVertexInputStateCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
unsigned i = 0;
do {
VertexInputAttributeDescription[i].binding = BindId;
VertexInputAttributeDescription[i].location = i;
VertexInputAttributeDescription[i].format = VK_FORMAT_R32G32B32_SFLOAT;
VertexInputAttributeDescription[i].offset = sizeof(float) * aByteStride;
i++;
} while (AttributeCount < i);
i = 0;
do {
VertexInputBindingDescription[i].binding = BindId;
VertexInputBindingDescription[i].stride = aByteStride;
VertexInputBindingDescription[i].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
i++;
} while (BindingCount < i);
}
~VkVerticesObj() {
if (VertexInputAttributeDescription) {
delete[] VertexInputAttributeDescription;
}
if (VertexInputBindingDescription) {
delete[] VertexInputBindingDescription;
}
}
bool AddVertexInputToPipe(VkPipelineObj &aPipelineObj) {
aPipelineObj.AddVertexInputAttribs(VertexInputAttributeDescription, AttributeCount);
aPipelineObj.AddVertexInputBindings(VertexInputBindingDescription, BindingCount);
return true;
}
void BindVertexBuffers(VkCommandBuffer aCommandBuffer, unsigned aOffsetCount = 0, VkDeviceSize *aOffsetList = nullptr) {
VkDeviceSize *offsetList;
unsigned offsetCount;
if (aOffsetCount) {
offsetList = aOffsetList;
offsetCount = aOffsetCount;
} else {
offsetList = new VkDeviceSize[1]();
offsetCount = 1;
}
vkCmdBindVertexBuffers(aCommandBuffer, BindId, offsetCount, &VulkanMemoryBuffer.handle(), offsetList);
BoundCurrent = true;
if (!aOffsetCount) {
delete[] offsetList;
}
}
protected:
static uint32_t BindIdGenerator;
bool BoundCurrent;
unsigned AttributeCount;
unsigned BindingCount;
uint32_t BindId;
VkPipelineVertexInputStateCreateInfo PipelineVertexInputStateCreateInfo;
VkVertexInputAttributeDescription *VertexInputAttributeDescription;
VkVertexInputBindingDescription *VertexInputBindingDescription;
VkConstantBufferObj VulkanMemoryBuffer;
};
uint32_t VkVerticesObj::BindIdGenerator;
struct OneOffDescriptorSet {
VkDeviceObj *device_;
VkDescriptorPool pool_;
VkDescriptorSetLayoutObj layout_;
VkDescriptorSet set_;
typedef std::vector<VkDescriptorSetLayoutBinding> Bindings;
OneOffDescriptorSet(VkDeviceObj *device, const Bindings &bindings)
: device_{device}, pool_{}, layout_(device, bindings), set_{} {
VkResult err;
std::vector<VkDescriptorPoolSize> sizes;
for (const auto &b : bindings) sizes.push_back({b.descriptorType, std::max(1u, b.descriptorCount)});
VkDescriptorPoolCreateInfo dspci = {
VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, nullptr, 0, 1, uint32_t(sizes.size()), sizes.data()};
err = vkCreateDescriptorPool(device_->handle(), &dspci, nullptr, &pool_);
if (err != VK_SUCCESS) return;
VkDescriptorSetAllocateInfo alloc_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, nullptr, pool_, 1,
&layout_.handle()};
err = vkAllocateDescriptorSets(device_->handle(), &alloc_info, &set_);
}
~OneOffDescriptorSet() {
// No need to destroy set-- it's going away with the pool.
vkDestroyDescriptorPool(device_->handle(), pool_, nullptr);
}
bool Initialized() { return pool_ != VK_NULL_HANDLE && layout_.initialized() && set_ != VK_NULL_HANDLE; }
};
template <typename T>
bool IsValidVkStruct(const T &s) {
return LvlTypeMap<T>::kSType == s.sType;
}
// Helper class for tersely creating create pipeline tests
//
// Designed with minimal error checking to ensure easy error state creation
// See OneshotTest for typical usage
struct CreatePipelineHelper {
public:
std::vector<VkDescriptorSetLayoutBinding> dsl_bindings_;
std::unique_ptr<OneOffDescriptorSet> descriptor_set_;
std::vector<VkPipelineShaderStageCreateInfo> shader_stages_;
VkPipelineVertexInputStateCreateInfo vi_ci_ = {};
VkPipelineInputAssemblyStateCreateInfo ia_ci_ = {};
VkPipelineTessellationStateCreateInfo tess_ci_ = {};
VkViewport viewport_ = {};
VkRect2D scissor_ = {};
VkPipelineViewportStateCreateInfo vp_state_ci_ = {};
VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci_ = {};
VkPipelineLayoutCreateInfo pipeline_layout_ci_ = {};
VkPipelineLayoutObj pipeline_layout_;
VkPipelineDynamicStateCreateInfo dyn_state_ci_ = {};
VkPipelineRasterizationStateCreateInfo rs_state_ci_ = {};
VkPipelineColorBlendAttachmentState cb_attachments_ = {};
VkPipelineColorBlendStateCreateInfo cb_ci_ = {};
VkGraphicsPipelineCreateInfo gp_ci_ = {};
VkPipelineCacheCreateInfo pc_ci_ = {};
VkPipeline pipeline_ = VK_NULL_HANDLE;
VkPipelineCache pipeline_cache_ = VK_NULL_HANDLE;
std::unique_ptr<VkShaderObj> vs_;
std::unique_ptr<VkShaderObj> fs_;
VkLayerTest &layer_test_;
CreatePipelineHelper(VkLayerTest &test) : layer_test_(test) {}
~CreatePipelineHelper() {
VkDevice device = layer_test_.device();
vkDestroyPipelineCache(device, pipeline_cache_, nullptr);
vkDestroyPipeline(device, pipeline_, nullptr);
}
void InitDescriptorSetInfo() { dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }
void InitInputAndVertexInfo() {
vi_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
ia_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
}
void InitMultisampleInfo() {
pipe_ms_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
pipe_ms_state_ci_.pNext = nullptr;
pipe_ms_state_ci_.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
pipe_ms_state_ci_.sampleShadingEnable = VK_FALSE;
pipe_ms_state_ci_.minSampleShading = 1.0;
pipe_ms_state_ci_.pSampleMask = NULL;
}
void InitPipelineLayoutInfo() {
pipeline_layout_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_ci_.setLayoutCount = 1; // Not really changeable because InitState() sets exactly one pSetLayout
pipeline_layout_ci_.pSetLayouts = nullptr; // must bound after it is created
}
void InitViewportInfo() {
viewport_ = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
scissor_ = {{0, 0}, {64, 64}};
vp_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp_state_ci_.pNext = nullptr;
vp_state_ci_.viewportCount = 1;
vp_state_ci_.pViewports = &viewport_; // ignored if dynamic
vp_state_ci_.scissorCount = 1;
vp_state_ci_.pScissors = &scissor_; // ignored if dynamic
}
void InitDynamicStateInfo() {
// Use a "validity" check on the {} initialized structure to detect initialization
// during late bind
}
void InitShaderInfo() {
vs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, &layer_test_));
fs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, &layer_test_));
// We shouldn't need a fragment shader but add it to be able to run on more devices
shader_stages_ = {vs_->GetStageCreateInfo(), fs_->GetStageCreateInfo()};
}
void InitRasterizationInfo() {
rs_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs_state_ci_.pNext = nullptr;
rs_state_ci_.flags = 0;
rs_state_ci_.depthClampEnable = VK_FALSE;
rs_state_ci_.rasterizerDiscardEnable = VK_FALSE;
rs_state_ci_.polygonMode = VK_POLYGON_MODE_FILL;
rs_state_ci_.cullMode = VK_CULL_MODE_BACK_BIT;
rs_state_ci_.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rs_state_ci_.depthBiasEnable = VK_FALSE;
rs_state_ci_.lineWidth = 1.0F;
}
void InitBlendStateInfo() {
cb_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
cb_ci_.logicOpEnable = VK_FALSE;
cb_ci_.logicOp = VK_LOGIC_OP_COPY; // ignored if enable is VK_FALSE above
cb_ci_.attachmentCount = layer_test_.RenderPassInfo().subpassCount;
ASSERT_TRUE(IsValidVkStruct(layer_test_.RenderPassInfo()));
cb_ci_.pAttachments = &cb_attachments_;
for (int i = 0; i < 4; i++) {
cb_ci_.blendConstants[0] = 1.0F;
}
}
void InitGraphicsPipelineInfo() {
// Color-only rendering in a subpass with no depth/stencil attachment
// Active Pipeline Shader Stages
// Vertex Shader
// Fragment Shader
// Required: Fixed-Function Pipeline Stages
// VkPipelineVertexInputStateCreateInfo
// VkPipelineInputAssemblyStateCreateInfo
// VkPipelineViewportStateCreateInfo
// VkPipelineRasterizationStateCreateInfo
// VkPipelineMultisampleStateCreateInfo
// VkPipelineColorBlendStateCreateInfo
gp_ci_.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
gp_ci_.pNext = nullptr;
gp_ci_.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
gp_ci_.pVertexInputState = &vi_ci_;
gp_ci_.pInputAssemblyState = &ia_ci_;
gp_ci_.pTessellationState = nullptr;
gp_ci_.pViewportState = &vp_state_ci_;
gp_ci_.pRasterizationState = &rs_state_ci_;
gp_ci_.pMultisampleState = &pipe_ms_state_ci_;
gp_ci_.pDepthStencilState = nullptr;
gp_ci_.pColorBlendState = &cb_ci_;
gp_ci_.pDynamicState = nullptr;
gp_ci_.renderPass = layer_test_.renderPass();
}
void InitPipelineCacheInfo() {
pc_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
pc_ci_.pNext = nullptr;
pc_ci_.flags = 0;
pc_ci_.initialDataSize = 0;
pc_ci_.pInitialData = nullptr;
}
// Not called by default during init_info
void InitTesselationState() {
// TBD -- add shaders and create_info
}
// TDB -- add control for optional and/or additional initialization
void InitInfo() {
InitDescriptorSetInfo();
InitInputAndVertexInfo();
InitMultisampleInfo();
InitPipelineLayoutInfo();
InitViewportInfo();
InitDynamicStateInfo();
InitShaderInfo();
InitRasterizationInfo();
InitBlendStateInfo();
InitGraphicsPipelineInfo();
InitPipelineCacheInfo();
}
void InitState() {
VkResult err;
descriptor_set_.reset(new OneOffDescriptorSet(layer_test_.DeviceObj(), dsl_bindings_));
ASSERT_TRUE(descriptor_set_->Initialized());
const std::vector<VkPushConstantRange> push_ranges(
pipeline_layout_ci_.pPushConstantRanges,
pipeline_layout_ci_.pPushConstantRanges + pipeline_layout_ci_.pushConstantRangeCount);
pipeline_layout_ = VkPipelineLayoutObj(layer_test_.DeviceObj(), {&descriptor_set_->layout_}, push_ranges);
err = vkCreatePipelineCache(layer_test_.device(), &pc_ci_, NULL, &pipeline_cache_);
ASSERT_VK_SUCCESS(err);
}
void LateBindPipelineInfo() {
// By value or dynamically located items must be late bound
gp_ci_.layout = pipeline_layout_.handle();
gp_ci_.stageCount = shader_stages_.size();
gp_ci_.pStages = shader_stages_.data();
if ((gp_ci_.pTessellationState == nullptr) && IsValidVkStruct(tess_ci_)) {
gp_ci_.pTessellationState = &tess_ci_;
}
if ((gp_ci_.pDynamicState == nullptr) && IsValidVkStruct(dyn_state_ci_)) {
gp_ci_.pDynamicState = &dyn_state_ci_;
}
}
VkResult CreateGraphicsPipeline(bool implicit_destroy = true, bool do_late_bind = true) {
VkResult err;
if (do_late_bind) {
LateBindPipelineInfo();
}
if (implicit_destroy && (pipeline_ != VK_NULL_HANDLE)) {
vkDestroyPipeline(layer_test_.device(), pipeline_, nullptr);
pipeline_ = VK_NULL_HANDLE;
}
err = vkCreateGraphicsPipelines(layer_test_.device(), pipeline_cache_, 1, &gp_ci_, NULL, &pipeline_);
return err;
}
// Helper function to create a simple test case (positive or negative)
//
// info_override can be any callable that takes a CreatePipelineHeper &
// flags, error can be any args accepted by "SetDesiredFailure".
template <typename Test, typename OverrideFunc, typename Error>
static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, const std::vector<Error> &errors,
bool positive_test = false) {
CreatePipelineHelper helper(test);
helper.InitInfo();
info_override(helper);
helper.InitState();
for (const auto &error : errors) test.Monitor()->SetDesiredFailureMsg(flags, error);
helper.CreateGraphicsPipeline();
if (positive_test) {
test.Monitor()->VerifyNotFound();
} else {
test.Monitor()->VerifyFound();
}
}
template <typename Test, typename OverrideFunc, typename Error>
static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, Error error, bool positive_test = false) {
OneshotTest(test, info_override, flags, std::vector<Error>(1, error), positive_test);
}
};
namespace chain_util {
template <typename T>
T Init(const void *pnext_in = nullptr) {
T pnext_obj = {};
pnext_obj.sType = LvlTypeMap<T>::kSType;
pnext_obj.pNext = pnext_in;
return pnext_obj;
}
class ExtensionChain {
const void *head_ = nullptr;
typedef std::function<bool(const char *)> AddIfFunction;
AddIfFunction add_if_;
typedef std::vector<const char *> List;
List *list_;
public:
template <typename F>
ExtensionChain(F &add_if, List *list) : add_if_(add_if), list_(list) {}
template <typename T>
void Add(const char *name, T &obj) {
if (add_if_(name)) {
if (list_) {
list_->push_back(name);
}
obj.pNext = head_;
head_ = &obj;
}
}
const void *Head() const { return head_; }
};
} // namespace chain_util
// ********************************************************************************************************************
// ********************************************************************************************************************
// ********************************************************************************************************************
// ********************************************************************************************************************
TEST_F(VkLayerTest, RequiredParameter) {
TEST_DESCRIPTION("Specify VK_NULL_HANDLE, NULL, and 0 for required handle, pointer, array, and array count parameters");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pFeatures specified as NULL");
// Specify NULL for a pointer to a handle
// Expected to trigger an error with
// parameter_validation::validate_required_pointer
vkGetPhysicalDeviceFeatures(gpu(), NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"required parameter pQueueFamilyPropertyCount specified as NULL");
// Specify NULL for pointer to array count
// Expected to trigger an error with parameter_validation::validate_array
vkGetPhysicalDeviceQueueFamilyProperties(gpu(), NULL, NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_1e030a1b);
// Specify 0 for a required array count
// Expected to trigger an error with parameter_validation::validate_array
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
m_commandBuffer->SetViewport(0, 0, &viewport);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_1e03fa01);
// Specify NULL for a required array
// Expected to trigger an error with parameter_validation::validate_array
m_commandBuffer->SetViewport(0, 1, NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter memory specified as VK_NULL_HANDLE");
// Specify VK_NULL_HANDLE for a required handle
// Expected to trigger an error with
// parameter_validation::validate_required_handle
vkUnmapMemory(device(), VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"required parameter pFences[0] specified as VK_NULL_HANDLE");
// Specify VK_NULL_HANDLE for a required handle array entry
// Expected to trigger an error with
// parameter_validation::validate_required_handle_array
VkFence fence = VK_NULL_HANDLE;
vkResetFences(device(), 1, &fence);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pAllocateInfo specified as NULL");
// Specify NULL for a required struct pointer
// Expected to trigger an error with
// parameter_validation::validate_struct_type
VkDeviceMemory memory = VK_NULL_HANDLE;
vkAllocateMemory(device(), NULL, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of faceMask must not be 0");
// Specify 0 for a required VkFlags parameter
// Expected to trigger an error with parameter_validation::validate_flags
m_commandBuffer->SetStencilReference(0, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of pSubmits[0].pWaitDstStageMask[0] must not be 0");
// Specify 0 for a required VkFlags array entry
// Expected to trigger an error with
// parameter_validation::validate_flags_array
VkSemaphore semaphore = VK_NULL_HANDLE;
VkPipelineStageFlags stageFlags = 0;
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = &semaphore;
submitInfo.pWaitDstStageMask = &stageFlags;
vkQueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ReservedParameter) {
TEST_DESCRIPTION("Specify a non-zero value for a reserved parameter");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " must be 0");
// Specify 0 for a reserved VkFlags parameter
// Expected to trigger an error with
// parameter_validation::validate_reserved_flags
VkEvent event_handle = VK_NULL_HANDLE;
VkEventCreateInfo event_info = {};
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_info.flags = 1;
vkCreateEvent(device(), &event_info, NULL, &event_handle);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DebugMarkerNameTest) {
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), "VK_LAYER_LUNARG_core_validation", VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_DEBUG_MARKER_EXTENSION_NAME);
} else {
printf(" Debug Marker Extension not supported, skipping test\n");
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkDebugMarkerSetObjectNameEXT fpvkDebugMarkerSetObjectNameEXT =
(PFN_vkDebugMarkerSetObjectNameEXT)vkGetInstanceProcAddr(instance(), "vkDebugMarkerSetObjectNameEXT");
if (!(fpvkDebugMarkerSetObjectNameEXT)) {
printf(" Can't find fpvkDebugMarkerSetObjectNameEXT; skipped.\n");
return;
}
VkEvent event_handle = VK_NULL_HANDLE;
VkEventCreateInfo event_info = {};
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vkCreateEvent(device(), &event_info, NULL, &event_handle);
VkDebugMarkerObjectNameInfoEXT name_info = {};
name_info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT;
name_info.pNext = nullptr;
name_info.object = (uint64_t)event_handle;
name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT;
name_info.pObjectName = "UnimaginablyImprobableString";
fpvkDebugMarkerSetObjectNameEXT(device(), &name_info);
m_commandBuffer->begin();
vkCmdSetEvent(m_commandBuffer->handle(), event_handle, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UnimaginablyImprobableString");
vkDestroyEvent(m_device->device(), event_handle, NULL);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
}
TEST_F(VkLayerTest, InvalidStructSType) {
TEST_DESCRIPTION("Specify an invalid VkStructureType for a Vulkan structure's sType field");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pAllocateInfo->sType must be");
// Zero struct memory, effectively setting sType to
// VK_STRUCTURE_TYPE_APPLICATION_INFO
// Expected to trigger an error with
// parameter_validation::validate_struct_type
VkMemoryAllocateInfo alloc_info = {};
VkDeviceMemory memory = VK_NULL_HANDLE;
vkAllocateMemory(device(), &alloc_info, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pSubmits[0].sType must be");
// Zero struct memory, effectively setting sType to
// VK_STRUCTURE_TYPE_APPLICATION_INFO
// Expected to trigger an error with
// parameter_validation::validate_struct_type_array
VkSubmitInfo submit_info = {};
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidStructPNext) {
TEST_DESCRIPTION("Specify an invalid value for a Vulkan structure's pNext field");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "value of pCreateInfo->pNext must be NULL");
// Set VkMemoryAllocateInfo::pNext to a non-NULL value, when pNext must be NULL.
// Need to pick a function that has no allowed pNext structure types.
// Expected to trigger an error with parameter_validation::validate_struct_pnext
VkEvent event = VK_NULL_HANDLE;
VkEventCreateInfo event_alloc_info = {};
// Zero-initialization will provide the correct sType
VkApplicationInfo app_info = {};
event_alloc_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_alloc_info.pNext = &app_info;
vkCreateEvent(device(), &event_alloc_info, NULL, &event);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT,
" chain includes a structure with unexpected VkStructureType ");
// Set VkMemoryAllocateInfo::pNext to a non-NULL value, but use
// a function that has allowed pNext structure types and specify
// a structure type that is not allowed.
// Expected to trigger an error with parameter_validation::validate_struct_pnext
VkDeviceMemory memory = VK_NULL_HANDLE;
VkMemoryAllocateInfo memory_alloc_info = {};
memory_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_alloc_info.pNext = &app_info;
vkAllocateMemory(device(), &memory_alloc_info, NULL, &memory);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueOutOfRange) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"does not fall within the begin..end range of the core VkFormat enumeration tokens");
// Specify an invalid VkFormat value
// Expected to trigger an error with
// parameter_validation::validate_ranged_enum
VkFormatProperties format_properties;
vkGetPhysicalDeviceFormatProperties(gpu(), static_cast<VkFormat>(8000), &format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadMask) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of");
// Specify an invalid VkFlags bitmask value
// Expected to trigger an error with parameter_validation::validate_flags
VkImageFormatProperties image_format_properties;
vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
static_cast<VkImageUsageFlags>(1 << 25), 0, &image_format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadFlag) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of");
// Specify an invalid VkFlags array entry
// Expected to trigger an error with
// parameter_validation::validate_flags_array
VkSemaphore semaphore = VK_NULL_HANDLE;
VkPipelineStageFlags stage_flags = static_cast<VkPipelineStageFlags>(1 << 25);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = &stage_flags;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadBool) {
// Make sure using VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE doesn't trigger a false positive.
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
} else {
printf(" VK_KHR_sampler_mirror_clamp_to_edge extension not supported, skipping test\n");
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is neither VK_TRUE nor VK_FALSE");
// Specify an invalid VkBool32 value, expecting a warning with parameter_validation::validate_bool32
VkSampler sampler = VK_NULL_HANDLE;
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
// Not VK_TRUE or VK_FALSE
sampler_info.anisotropyEnable = 3;
vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, MirrorClampToEdgeNotEnabled) {
TEST_DESCRIPTION("Validation should catch using CLAMP_TO_EDGE addressing mode if the extension is not enabled.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_1260086e);
VkSampler sampler = VK_NULL_HANDLE;
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
// Set the modes to cause the error
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, AnisotropyFeatureDisabled) {
TEST_DESCRIPTION("Validation should check anisotropy parameters are correct with samplerAnisotropy disabled.");
// Determine if required device features are available
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
device_features.samplerAnisotropy = VK_FALSE; // force anisotropy off
ASSERT_NO_FATAL_FAILURE(InitState(&device_features));
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_1260085c);
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
// With the samplerAnisotropy disable, the sampler must not enable it.
sampler_info.anisotropyEnable = VK_TRUE;
VkSampler sampler = VK_NULL_HANDLE;
VkResult err;
err = vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == err) {
vkDestroySampler(m_device->device(), sampler, NULL);
}
sampler = VK_NULL_HANDLE;
}
TEST_F(VkLayerTest, AnisotropyFeatureEnabled) {
TEST_DESCRIPTION("Validation must check several conditions that apply only when Anisotropy is enabled.");
// Determine if required device features are available
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
// These tests require that the device support sparse residency for 2D images
if (VK_TRUE != device_features.samplerAnisotropy) {
printf(" Test requires unsupported samplerAnisotropy feature. Skipped.\n");
return;
}
bool cubic_support = false;
if (DeviceExtensionSupported(gpu(), nullptr, "VK_IMG_filter_cubic")) {
m_device_extension_names.push_back("VK_IMG_filter_cubic");
cubic_support = true;
}
VkSamplerCreateInfo sampler_info_ref = SafeSaneSamplerCreateInfo();
sampler_info_ref.anisotropyEnable = VK_TRUE;
VkSamplerCreateInfo sampler_info = sampler_info_ref;
ASSERT_NO_FATAL_FAILURE(InitState());
auto do_test = [this](UNIQUE_VALIDATION_ERROR_CODE code, const VkSamplerCreateInfo *pCreateInfo) -> void {
VkResult err;
VkSampler sampler = VK_NULL_HANDLE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, code);
err = vkCreateSampler(m_device->device(), pCreateInfo, NULL, &sampler);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == err) {
vkDestroySampler(m_device->device(), sampler, NULL);
}
};
// maxAnisotropy out-of-bounds low.
sampler_info.maxAnisotropy = NearestSmaller(1.0F);
do_test(VALIDATION_ERROR_1260085e, &sampler_info);
sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy;
// maxAnisotropy out-of-bounds high.
sampler_info.maxAnisotropy = NearestGreater(m_device->phy().properties().limits.maxSamplerAnisotropy);
do_test(VALIDATION_ERROR_1260085e, &sampler_info);
sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy;
// Both anisotropy and unnormalized coords enabled
sampler_info.unnormalizedCoordinates = VK_TRUE;
do_test(VALIDATION_ERROR_12600868, &sampler_info);
sampler_info.unnormalizedCoordinates = sampler_info_ref.unnormalizedCoordinates;
// Both anisotropy and cubic filtering enabled
if (cubic_support) {
sampler_info.minFilter = VK_FILTER_CUBIC_IMG;
do_test(VALIDATION_ERROR_12600872, &sampler_info);
sampler_info.minFilter = sampler_info_ref.minFilter;
sampler_info.magFilter = VK_FILTER_CUBIC_IMG;
do_test(VALIDATION_ERROR_12600872, &sampler_info);
sampler_info.magFilter = sampler_info_ref.magFilter;
} else {
printf(" Test requires unsupported extension \"VK_IMG_filter_cubic\". Skipped.\n");
}
}
TEST_F(VkLayerTest, UnrecognizedValueMaxEnum) {
ASSERT_NO_FATAL_FAILURE(Init());
// Specify MAX_ENUM
VkFormatProperties format_properties;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not fall within the begin..end range");
vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_MAX_ENUM, &format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UpdateBufferAlignment) {
TEST_DESCRIPTION("Check alignment parameters for vkCmdUpdateBuffer");
uint32_t updateData[] = {1, 2, 3, 4, 5, 6, 7, 8};
ASSERT_NO_FATAL_FAILURE(Init());
VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
VkBufferObj buffer;
buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs);
m_commandBuffer->begin();
// Introduce failure by using dstOffset that is not multiple of 4
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4");
m_commandBuffer->UpdateBuffer(buffer.handle(), 1, 4, updateData);
m_errorMonitor->VerifyFound();
// Introduce failure by using dataSize that is not multiple of 4
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4");
m_commandBuffer->UpdateBuffer(buffer.handle(), 0, 6, updateData);
m_errorMonitor->VerifyFound();
// Introduce failure by using dataSize that is < 0
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"must be greater than zero and less than or equal to 65536");
m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)-44, updateData);
m_errorMonitor->VerifyFound();
// Introduce failure by using dataSize that is > 65536
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"must be greater than zero and less than or equal to 65536");
m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)80000, updateData);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, FillBufferAlignment) {
TEST_DESCRIPTION("Check alignment parameters for vkCmdFillBuffer");
ASSERT_NO_FATAL_FAILURE(Init());
VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
VkBufferObj buffer;
buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs);
m_commandBuffer->begin();
// Introduce failure by using dstOffset that is not multiple of 4
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4");
m_commandBuffer->FillBuffer(buffer.handle(), 1, 4, 0x11111111);
m_errorMonitor->VerifyFound();
// Introduce failure by using size that is not multiple of 4
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4");
m_commandBuffer->FillBuffer(buffer.handle(), 0, 6, 0x11111111);
m_errorMonitor->VerifyFound();
// Introduce failure by using size that is zero
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero");
m_commandBuffer->FillBuffer(buffer.handle(), 0, 0, 0x11111111);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, PSOPolygonModeInvalid) {
TEST_DESCRIPTION("Attempt to use a non-solid polygon fill mode in a pipeline when this feature is not enabled.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
std::vector<const char *> device_extension_names;
auto features = m_device->phy().features();
// Artificially disable support for non-solid fill modes
features.fillModeNonSolid = VK_FALSE;
// The sacrificial device object
VkDeviceObj test_device(0, gpu(), device_extension_names, &features);
VkRenderpassObj render_pass(&test_device);
const VkPipelineLayoutObj pipeline_layout(&test_device);
VkPipelineRasterizationStateCreateInfo rs_ci = {};
rs_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs_ci.pNext = nullptr;
rs_ci.lineWidth = 1.0f;
rs_ci.rasterizerDiscardEnable = VK_TRUE;
VkShaderObj vs(&test_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(&test_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
// Set polygonMode to unsupported value POINT, should fail
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE");
{
VkPipelineObj pipe(&test_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
// Introduce failure by setting unsupported polygon mode
rs_ci.polygonMode = VK_POLYGON_MODE_POINT;
pipe.SetRasterization(&rs_ci);
pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle());
}
m_errorMonitor->VerifyFound();
// Try again with polygonMode=LINE, should fail
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE");
{
VkPipelineObj pipe(&test_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
// Introduce failure by setting unsupported polygon mode
rs_ci.polygonMode = VK_POLYGON_MODE_LINE;
pipe.SetRasterization(&rs_ci);
pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle());
}
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SparseBindingImageBufferCreate) {
TEST_DESCRIPTION("Create buffer/image with sparse attributes but without the sparse_binding bit set");
ASSERT_NO_FATAL_FAILURE(Init());
VkBuffer buffer;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
buf_info.size = 2048;
buf_info.queueFamilyIndexCount = 0;
buf_info.pQueueFamilyIndices = NULL;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (m_device->phy().features().sparseResidencyBuffer) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_0140072c);
buf_info.flags = VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
m_errorMonitor->VerifyFound();
} else {
printf(" Test requires unsupported sparseResidencyBuffer feature. Skipped.\n");
return;
}
if (m_device->phy().features().sparseResidencyAliased) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_0140072c);
buf_info.flags = VK_BUFFER_CREATE_SPARSE_ALIASED_BIT;
vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
m_errorMonitor->VerifyFound();
} else {
printf(" Test requires unsupported sparseResidencyAliased feature. Skipped.\n");
return;
}
VkImage image;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 512;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (m_device->phy().features().sparseResidencyImage2D) {
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_09e007b6);
vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
} else {
printf(" Test requires unsupported sparseResidencyImage2D feature. Skipped.\n");
return;
}
if (m_device->phy().features().sparseResidencyAliased) {
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_ALIASED_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_09e007b6);
vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
} else {
printf(" Test requires unsupported sparseResidencyAliased feature. Skipped.\n");
return;
}
}
TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedTypes) {
TEST_DESCRIPTION("Create images with sparse residency with unsupported types");
// Determine which device feature are available
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
// Mask out device features we don't want and initialize device state
device_features.sparseResidencyImage2D = VK_FALSE;
device_features.sparseResidencyImage3D = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(&device_features));
if (!m_device->phy().features().sparseBinding) {
printf(" Test requires unsupported sparseBinding feature. Skipped.\n");
return;
}
VkImage image = VK_NULL_HANDLE;
VkResult result = VK_RESULT_MAX_ENUM;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_1D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 512;
image_create_info.extent.height = 1;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
// 1D image w/ sparse residency is an error
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_09e00794);
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
// 2D image w/ sparse residency when feature isn't available
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.extent.height = 64;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_09e00796);
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
// 3D image w/ sparse residency when feature isn't available
image_create_info.imageType = VK_IMAGE_TYPE_3D;
image_create_info.extent.depth = 8;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_09e00798);
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
}
TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedSamples) {
TEST_DESCRIPTION("Create images with sparse residency with unsupported tiling or sample counts");
// Determine which device feature are available
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// These tests require that the device support sparse residency for 2D images
if (VK_TRUE != device_features.sparseResidencyImage2D) {
printf(" Test requires unsupported SparseResidencyImage2D feature. Skipped.\n");
return;
}
// Mask out device features we don't want and initialize device state
device_features.sparseResidency2Samples = VK_FALSE;
device_features.sparseResidency4Samples = VK_FALSE;
device_features.sparseResidency8Samples = VK_FALSE;
device_features.sparseResidency16Samples = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(&device_features));
VkImage image = VK_NULL_HANDLE;
VkResult result = VK_RESULT_MAX_ENUM;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 64;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
// 2D image w/ sparse residency and linear tiling is an error
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT then image tiling of VK_IMAGE_TILING_LINEAR is not supported");
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
// Multi-sample image w/ sparse residency when feature isn't available (4 flavors)
image_create_info.samples = VK_SAMPLE_COUNT_2_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_09e0079a);
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
image_create_info.samples = VK_SAMPLE_COUNT_4_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_09e0079c);
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
image_create_info.samples = VK_SAMPLE_COUNT_8_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_09e0079e);
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
image_create_info.samples = VK_SAMPLE_COUNT_16_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_09e007a0);
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
}
TEST_F(VkLayerTest, InvalidMemoryAliasing) {
TEST_DESCRIPTION(
"Create a buffer and image, allocate memory, and bind the buffer and image to memory such that they will alias.");
VkResult err;
bool pass;
ASSERT_NO_FATAL_FAILURE(Init());
VkBuffer buffer, buffer2;
VkImage image;
VkImage image2;
VkDeviceMemory mem; // buffer will be bound first
VkDeviceMemory mem_img; // image bound first
VkMemoryRequirements buff_mem_reqs, img_mem_reqs;
VkMemoryRequirements buff_mem_reqs2, img_mem_reqs2;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buf_info.size = 256;
buf_info.queueFamilyIndexCount = 0;
buf_info.pQueueFamilyIndices = NULL;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buf_info.flags = 0;
err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
vkGetBufferMemoryRequirements(m_device->device(), buffer, &buff_mem_reqs);
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 64;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
// Image tiling must be optimal to trigger error when aliasing linear buffer
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.flags = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image2);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image, &img_mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.memoryTypeIndex = 0;
// Ensure memory is big enough for both bindings
alloc_info.allocationSize = buff_mem_reqs.size + img_mem_reqs.size;
pass = m_device->phy().set_memory_type(buff_mem_reqs.memoryTypeBits & img_mem_reqs.memoryTypeBits, &alloc_info,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
if (!pass) {
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkDestroyImage(m_device->device(), image, NULL);
vkDestroyImage(m_device->device(), image2, NULL);
return;
}
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image2, &img_mem_reqs2);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " is aliased with linear buffer 0x");
// VALIDATION FAILURE due to image mapping overlapping buffer mapping
err = vkBindImageMemory(m_device->device(), image, mem, 0);
m_errorMonitor->VerifyFound();
// Now correctly bind image2 to second mem allocation before incorrectly
// aliasing buffer2
err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer2);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem_img);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), image2, mem_img, 0);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is aliased with non-linear image 0x");
vkGetBufferMemoryRequirements(m_device->device(), buffer2, &buff_mem_reqs2);
err = vkBindBufferMemory(m_device->device(), buffer2, mem_img, 0);
m_errorMonitor->VerifyFound();
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkDestroyBuffer(m_device->device(), buffer2, NULL);
vkDestroyImage(m_device->device(), image, NULL);
vkDestroyImage(m_device->device(), image2, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
vkFreeMemory(m_device->device(), mem_img, NULL);
}
TEST_F(VkLayerTest, InvalidMemoryMapping) {
TEST_DESCRIPTION("Attempt to map memory in a number of incorrect ways");
VkResult err;
bool pass;
ASSERT_NO_FATAL_FAILURE(Init());
VkBuffer buffer;
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buf_info.size = 256;
buf_info.queueFamilyIndexCount = 0;
buf_info.pQueueFamilyIndices = NULL;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buf_info.flags = 0;
err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.memoryTypeIndex = 0;
// Ensure memory is big enough for both bindings
static const VkDeviceSize allocation_size = 0x10000;
alloc_info.allocationSize = allocation_size;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
uint8_t *pData;
// Attempt to map memory size 0 is invalid
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VkMapMemory: Attempting to map memory range of size zero");
err = vkMapMemory(m_device->device(), mem, 0, 0, 0, (void **)&pData);
m_errorMonitor->VerifyFound();
// Map memory twice
err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VkMapMemory: Attempting to map memory on an already-mapped object ");
err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData);
m_errorMonitor->VerifyFound();
// Unmap the memory to avoid re-map error
vkUnmapMemory(m_device->device(), mem);
// overstep allocation with VK_WHOLE_SIZE
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" with size of VK_WHOLE_SIZE oversteps total array size 0x");
err = vkMapMemory(m_device->device(), mem, allocation_size + 1, VK_WHOLE_SIZE, 0, (void **)&pData);
m_errorMonitor->VerifyFound();
// overstep allocation w/o VK_WHOLE_SIZE
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " oversteps total array size 0x");
err = vkMapMemory(m_device->device(), mem, 1, allocation_size, 0, (void **)&pData);
m_errorMonitor->VerifyFound();
// Now error due to unmapping memory that's not mapped
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Unmapping Memory without memory being mapped: ");
vkUnmapMemory(m_device->device(), mem);
m_errorMonitor->VerifyFound();
// Now map memory and cause errors due to flushing invalid ranges
err = vkMapMemory(m_device->device(), mem, 4 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
VkMappedMemoryRange mmr = {};
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = atom_size; // Error b/c offset less than offset of mapped mem
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_0c20055a);
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
// Now flush range that oversteps mapped range
vkUnmapMemory(m_device->device(), mem);
err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.offset = atom_size;
mmr.size = 4 * atom_size; // Flushing bounds exceed mapped bounds
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_0c20055a);
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
// Now flush range with VK_WHOLE_SIZE that oversteps offset
vkUnmapMemory(m_device->device(), mem);
err = vkMapMemory(m_device->device(), mem, 2 * atom_size, 4 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.offset = atom_size;
mmr.size = VK_WHOLE_SIZE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_0c20055c);
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
// Some platforms have an atomsize of 1 which makes the test meaningless
if (atom_size > 3) {
// Now with an offset NOT a multiple of the device limit
vkUnmapMemory(m_device->device(), mem);
err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.offset = 3; // Not a multiple of atom_size
mmr.size = VK_WHOLE_SIZE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_0c20055e);
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
// Now with a size NOT a multiple of the device limit
vkUnmapMemory(m_device->device(), mem);
err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.offset = atom_size;
mmr.size = 2 * atom_size + 1; // Not a multiple of atom_size
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_0c200adc);
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
}
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
// TODO : If we can get HOST_VISIBLE w/o HOST_COHERENT we can test cases of
// MEMTRACK_INVALID_MAP in validateAndCopyNoncoherentMemoryToDriver()
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
}
TEST_F(VkLayerTest, MapMemWithoutHostVisibleBit) {
TEST_DESCRIPTION("Allocate memory that is not mappable and then attempt to map it.");
VkResult err;
bool pass;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_31200554);
ASSERT_NO_FATAL_FAILURE(Init());
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 1024;
pass = m_device->phy().set_memory_type(0xFFFFFFFF, &mem_alloc, 0, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) { // If we can't find any unmappable memory this test doesn't
// make sense
printf(" No unmappable memory types found, skipping test\n");
return;
}
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
void *mappedAddress = NULL;
err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, &mappedAddress);
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), mem, NULL);
}
TEST_F(VkLayerTest, RebindMemory) {
VkResult err;
bool pass;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "which has already been bound to mem object");
ASSERT_NO_FATAL_FAILURE(Init());
// Create an image, allocate memory, free it, and then try to bind it
VkImage image;
VkDeviceMemory mem1;
VkDeviceMemory mem2;
VkMemoryRequirements mem_reqs;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.flags = 0;
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
// Introduce failure, do NOT set memProps to
// VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
mem_alloc.memoryTypeIndex = 1;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs);
mem_alloc.allocationSize = mem_reqs.size;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0);
ASSERT_TRUE(pass);
// allocate 2 memory objects
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem1);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem2);
ASSERT_VK_SUCCESS(err);
// Bind first memory object to Image object
err = vkBindImageMemory(m_device->device(), image, mem1, 0);
ASSERT_VK_SUCCESS(err);
// Introduce validation failure, try to bind a different memory object to
// the same image object
err = vkBindImageMemory(m_device->device(), image, mem2, 0);
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), image, NULL);
vkFreeMemory(m_device->device(), mem1, NULL);
vkFreeMemory(m_device->device(), mem2, NULL);
}
TEST_F(VkLayerTest, SubmitSignaledFence) {
vk_testing::Fence testFence;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"submitted in SIGNALED state. Fences must be reset before being submitted");
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.pNext = NULL;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color);
m_commandBuffer->end();
testFence.init(*m_device, fenceInfo);
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, testFence.handle());
vkQueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidUsageBits) {
TEST_DESCRIPTION(
"Specify wrong usage for image then create conflicting view of image Initialize buffer with wrong usage then perform copy "
"expecting errors from both the image and the buffer (2 calls)");
ASSERT_NO_FATAL_FAILURE(Init());
auto format = FindSupportedDepthStencilFormat(gpu());
if (!format) {
printf(" No Depth + Stencil format found. Skipped.\n");
return;
}
VkImageObj image(m_device);
// Initialize image with transfer source usage
image.Init(128, 128, 1, format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView dsv;
VkImageViewCreateInfo dsvci = {};
dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
dsvci.image = image.handle();
dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D;
dsvci.format = format;
dsvci.subresourceRange.layerCount = 1;
dsvci.subresourceRange.baseMipLevel = 0;
dsvci.subresourceRange.levelCount = 1;
dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
// Create a view with depth / stencil aspect for image with different usage
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid usage flag for Image ");
vkCreateImageView(m_device->device(), &dsvci, NULL, &dsv);
m_errorMonitor->VerifyFound();
// Initialize buffer with TRANSFER_DST usage
VkBufferObj buffer;
VkMemoryPropertyFlags reqs = 0;
buffer.init_as_dst(*m_device, 128 * 128, reqs);
VkBufferImageCopy region = {};
region.bufferRowLength = 128;
region.bufferImageHeight = 128;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
region.imageSubresource.layerCount = 1;
region.imageExtent.height = 16;
region.imageExtent.width = 16;
region.imageExtent.depth = 1;
// Buffer usage not set to TRANSFER_SRC and image usage not set to TRANSFER_DST
m_commandBuffer->begin();
// two separate errors from this call:
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_18e00162);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VALIDATION_ERROR_18e0015c);
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&region);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, LeakAnObject) {
VkResult err;
TEST_DESCRIPTION("Create a fence and destroy its device without first destroying the fence.");
// Note that we have to create a new device since destroying the
// framework's device causes Teardown() to fail and just calling Teardown
// will destroy the errorMonitor.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has not been destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
// The sacrificial device object
VkDevice testDevice;
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.pEnabledFeatures = &features;
err = vkCreateDevice(gpu(), &device_create_info, NULL, &testDevice);
ASSERT_VK_SUCCESS(err);
VkFence fence;
VkFenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_create_info.pNext = NULL;
fence_create_info.flags = 0;
err = vkCreateFence(testDevice, &fence_create_info, NULL, &fence);
ASSERT_VK_SUCCESS(err);
// Induce failure by not calling vkDestroyFence
vkDestroyDevice(testDevice, NULL);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidCommandPoolConsistency) {
TEST_DESCRIPTION("Allocate command buffers from one command pool and attempt to delete them from another.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "FreeCommandBuffers is attempting to free Command Buffer");
ASSERT_NO_FATAL_FAILURE(Init());
VkCommandPool command_pool_one;
VkCommandPool command_pool_two;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_one);
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_two);
VkCommandBuffer cb;
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;