blob: d93e05860c82e492c85a0f06cf0811a5669dc0d1 [file] [log] [blame]
/*-------------------------------------------------------------------------
* Vulkan Conformance Tests
* ------------------------
*
* Copyright (c) 2015 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief Pipeline barrier tests
*//*--------------------------------------------------------------------*/
#include "vktMemoryPipelineBarrierTests.hpp"
#include "vktTestCaseUtil.hpp"
#include "vkDefs.hpp"
#include "vkPlatform.hpp"
#include "vkRefUtil.hpp"
#include "vkQueryUtil.hpp"
#include "vkMemUtil.hpp"
#include "vkTypeUtil.hpp"
#include "vkPrograms.hpp"
#include "tcuMaybe.hpp"
#include "tcuTextureUtil.hpp"
#include "tcuTestLog.hpp"
#include "tcuResultCollector.hpp"
#include "tcuTexture.hpp"
#include "tcuImageCompare.hpp"
#include "deUniquePtr.hpp"
#include "deStringUtil.hpp"
#include "deRandom.hpp"
#include "deInt32.h"
#include "deMath.h"
#include "deMemory.h"
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <vector>
using tcu::TestLog;
using tcu::Maybe;
using de::MovePtr;
using std::string;
using std::vector;
using std::map;
using std::set;
using std::pair;
using tcu::IVec2;
using tcu::UVec2;
using tcu::UVec4;
using tcu::Vec4;
using tcu::ConstPixelBufferAccess;
using tcu::PixelBufferAccess;
using tcu::TextureFormat;
using tcu::TextureLevel;
namespace vkt
{
namespace memory
{
namespace
{
enum
{
MAX_UNIFORM_BUFFER_SIZE = 1024,
MAX_STORAGE_BUFFER_SIZE = (1<<28)
};
// \todo [mika] Add to utilities
template<typename T>
T divRoundUp (const T& a, const T& b)
{
return (a / b) + (a % b == 0 ? 0 : 1);
}
enum
{
ALL_PIPELINE_STAGES = vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
| vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
| vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
| vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
| vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
| vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
| vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
| vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
| vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
| vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
| vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
| vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
| vk::VK_PIPELINE_STAGE_TRANSFER_BIT
| vk::VK_PIPELINE_STAGE_HOST_BIT
};
enum
{
ALL_ACCESSES = vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT
| vk::VK_ACCESS_INDEX_READ_BIT
| vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
| vk::VK_ACCESS_UNIFORM_READ_BIT
| vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
| vk::VK_ACCESS_SHADER_READ_BIT
| vk::VK_ACCESS_SHADER_WRITE_BIT
| vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
| vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
| vk::VK_ACCESS_TRANSFER_READ_BIT
| vk::VK_ACCESS_TRANSFER_WRITE_BIT
| vk::VK_ACCESS_HOST_READ_BIT
| vk::VK_ACCESS_HOST_WRITE_BIT
| vk::VK_ACCESS_MEMORY_READ_BIT
| vk::VK_ACCESS_MEMORY_WRITE_BIT
};
enum Usage
{
// Mapped host read and write
USAGE_HOST_READ = (0x1u<<0),
USAGE_HOST_WRITE = (0x1u<<1),
// Copy and other transfer operations
USAGE_TRANSFER_SRC = (0x1u<<2),
USAGE_TRANSFER_DST = (0x1u<<3),
// Buffer usage flags
USAGE_INDEX_BUFFER = (0x1u<<4),
USAGE_VERTEX_BUFFER = (0x1u<<5),
USAGE_UNIFORM_BUFFER = (0x1u<<6),
USAGE_STORAGE_BUFFER = (0x1u<<7),
USAGE_UNIFORM_TEXEL_BUFFER = (0x1u<<8),
USAGE_STORAGE_TEXEL_BUFFER = (0x1u<<9),
// \todo [2016-03-09 mika] This is probably almost impossible to do
USAGE_INDIRECT_BUFFER = (0x1u<<10),
// Texture usage flags
USAGE_SAMPLED_IMAGE = (0x1u<<11),
USAGE_STORAGE_IMAGE = (0x1u<<12),
USAGE_COLOR_ATTACHMENT = (0x1u<<13),
USAGE_INPUT_ATTACHMENT = (0x1u<<14),
USAGE_DEPTH_STENCIL_ATTACHMENT = (0x1u<<15),
};
bool supportsDeviceBufferWrites (Usage usage)
{
if (usage & USAGE_TRANSFER_DST)
return true;
if (usage & USAGE_STORAGE_BUFFER)
return true;
if (usage & USAGE_STORAGE_TEXEL_BUFFER)
return true;
return false;
}
bool supportsDeviceImageWrites (Usage usage)
{
if (usage & USAGE_TRANSFER_DST)
return true;
if (usage & USAGE_STORAGE_IMAGE)
return true;
if (usage & USAGE_COLOR_ATTACHMENT)
return true;
return false;
}
// Sequential access enums
enum Access
{
ACCESS_INDIRECT_COMMAND_READ_BIT = 0,
ACCESS_INDEX_READ_BIT,
ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
ACCESS_UNIFORM_READ_BIT,
ACCESS_INPUT_ATTACHMENT_READ_BIT,
ACCESS_SHADER_READ_BIT,
ACCESS_SHADER_WRITE_BIT,
ACCESS_COLOR_ATTACHMENT_READ_BIT,
ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
ACCESS_TRANSFER_READ_BIT,
ACCESS_TRANSFER_WRITE_BIT,
ACCESS_HOST_READ_BIT,
ACCESS_HOST_WRITE_BIT,
ACCESS_MEMORY_READ_BIT,
ACCESS_MEMORY_WRITE_BIT,
ACCESS_LAST
};
// Sequential stage enums
enum PipelineStage
{
PIPELINESTAGE_TOP_OF_PIPE_BIT = 0,
PIPELINESTAGE_BOTTOM_OF_PIPE_BIT,
PIPELINESTAGE_DRAW_INDIRECT_BIT,
PIPELINESTAGE_VERTEX_INPUT_BIT,
PIPELINESTAGE_VERTEX_SHADER_BIT,
PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT,
PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT,
PIPELINESTAGE_GEOMETRY_SHADER_BIT,
PIPELINESTAGE_FRAGMENT_SHADER_BIT,
PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT,
PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT,
PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
PIPELINESTAGE_COMPUTE_SHADER_BIT,
PIPELINESTAGE_TRANSFER_BIT,
PIPELINESTAGE_HOST_BIT,
PIPELINESTAGE_LAST
};
PipelineStage pipelineStageFlagToPipelineStage (vk::VkPipelineStageFlagBits flags)
{
switch (flags)
{
case vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT: return PIPELINESTAGE_TOP_OF_PIPE_BIT;
case vk::VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT: return PIPELINESTAGE_BOTTOM_OF_PIPE_BIT;
case vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT: return PIPELINESTAGE_DRAW_INDIRECT_BIT;
case vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT: return PIPELINESTAGE_VERTEX_INPUT_BIT;
case vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT: return PIPELINESTAGE_VERTEX_SHADER_BIT;
case vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_CONTROL_SHADER_BIT;
case vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT: return PIPELINESTAGE_TESSELLATION_EVALUATION_SHADER_BIT;
case vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT: return PIPELINESTAGE_GEOMETRY_SHADER_BIT;
case vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT: return PIPELINESTAGE_FRAGMENT_SHADER_BIT;
case vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_EARLY_FRAGMENT_TESTS_BIT;
case vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT: return PIPELINESTAGE_LATE_FRAGMENT_TESTS_BIT;
case vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT: return PIPELINESTAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
case vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT: return PIPELINESTAGE_COMPUTE_SHADER_BIT;
case vk::VK_PIPELINE_STAGE_TRANSFER_BIT: return PIPELINESTAGE_TRANSFER_BIT;
case vk::VK_PIPELINE_STAGE_HOST_BIT: return PIPELINESTAGE_HOST_BIT;
default:
DE_FATAL("Unknown pipeline stage flags");
return PIPELINESTAGE_LAST;
}
}
Usage operator| (Usage a, Usage b)
{
return (Usage)((deUint32)a | (deUint32)b);
}
Usage operator& (Usage a, Usage b)
{
return (Usage)((deUint32)a & (deUint32)b);
}
string usageToName (Usage usage)
{
const struct
{
Usage usage;
const char* const name;
} usageNames[] =
{
{ USAGE_HOST_READ, "host_read" },
{ USAGE_HOST_WRITE, "host_write" },
{ USAGE_TRANSFER_SRC, "transfer_src" },
{ USAGE_TRANSFER_DST, "transfer_dst" },
{ USAGE_INDEX_BUFFER, "index_buffer" },
{ USAGE_VERTEX_BUFFER, "vertex_buffer" },
{ USAGE_UNIFORM_BUFFER, "uniform_buffer" },
{ USAGE_STORAGE_BUFFER, "storage_buffer" },
{ USAGE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer" },
{ USAGE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer" },
{ USAGE_INDIRECT_BUFFER, "indirect_buffer" },
{ USAGE_SAMPLED_IMAGE, "image_sampled" },
{ USAGE_STORAGE_IMAGE, "storage_image" },
{ USAGE_COLOR_ATTACHMENT, "color_attachment" },
{ USAGE_INPUT_ATTACHMENT, "input_attachment" },
{ USAGE_DEPTH_STENCIL_ATTACHMENT, "depth_stencil_attachment" },
};
std::ostringstream stream;
bool first = true;
for (size_t usageNdx = 0; usageNdx < DE_LENGTH_OF_ARRAY(usageNames); usageNdx++)
{
if (usage & usageNames[usageNdx].usage)
{
if (!first)
stream << "_";
else
first = false;
stream << usageNames[usageNdx].name;
}
}
return stream.str();
}
vk::VkBufferUsageFlags usageToBufferUsageFlags (Usage usage)
{
vk::VkBufferUsageFlags flags = 0;
if (usage & USAGE_TRANSFER_SRC)
flags |= vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
if (usage & USAGE_TRANSFER_DST)
flags |= vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT;
if (usage & USAGE_INDEX_BUFFER)
flags |= vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
if (usage & USAGE_VERTEX_BUFFER)
flags |= vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
if (usage & USAGE_INDIRECT_BUFFER)
flags |= vk::VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
if (usage & USAGE_UNIFORM_BUFFER)
flags |= vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
if (usage & USAGE_STORAGE_BUFFER)
flags |= vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
if (usage & USAGE_UNIFORM_TEXEL_BUFFER)
flags |= vk::VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
if (usage & USAGE_STORAGE_TEXEL_BUFFER)
flags |= vk::VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
return flags;
}
vk::VkImageUsageFlags usageToImageUsageFlags (Usage usage)
{
vk::VkImageUsageFlags flags = 0;
if (usage & USAGE_TRANSFER_SRC)
flags |= vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
if (usage & USAGE_TRANSFER_DST)
flags |= vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT;
if (usage & USAGE_SAMPLED_IMAGE)
flags |= vk::VK_IMAGE_USAGE_SAMPLED_BIT;
if (usage & USAGE_STORAGE_IMAGE)
flags |= vk::VK_IMAGE_USAGE_STORAGE_BIT;
if (usage & USAGE_COLOR_ATTACHMENT)
flags |= vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
if (usage & USAGE_INPUT_ATTACHMENT)
flags |= vk::VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
flags |= vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
return flags;
}
vk::VkPipelineStageFlags usageToStageFlags (Usage usage)
{
vk::VkPipelineStageFlags flags = 0;
if (usage & (USAGE_HOST_READ|USAGE_HOST_WRITE))
flags |= vk::VK_PIPELINE_STAGE_HOST_BIT;
if (usage & (USAGE_TRANSFER_SRC|USAGE_TRANSFER_DST))
flags |= vk::VK_PIPELINE_STAGE_TRANSFER_BIT;
if (usage & (USAGE_VERTEX_BUFFER|USAGE_INDEX_BUFFER))
flags |= vk::VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
if (usage & USAGE_INDIRECT_BUFFER)
flags |= vk::VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
if (usage &
(USAGE_UNIFORM_BUFFER
| USAGE_STORAGE_BUFFER
| USAGE_UNIFORM_TEXEL_BUFFER
| USAGE_STORAGE_TEXEL_BUFFER
| USAGE_SAMPLED_IMAGE
| USAGE_STORAGE_IMAGE))
{
flags |= (vk::VK_PIPELINE_STAGE_VERTEX_SHADER_BIT
| vk::VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT
| vk::VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT
| vk::VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT
| vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
| vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT);
}
if (usage & USAGE_INPUT_ATTACHMENT)
flags |= vk::VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
if (usage & USAGE_COLOR_ATTACHMENT)
flags |= vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
{
flags |= vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
| vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
}
return flags;
}
vk::VkAccessFlags usageToAccessFlags (Usage usage)
{
vk::VkAccessFlags flags = 0;
if (usage & USAGE_HOST_READ)
flags |= vk::VK_ACCESS_HOST_READ_BIT;
if (usage & USAGE_HOST_WRITE)
flags |= vk::VK_ACCESS_HOST_WRITE_BIT;
if (usage & USAGE_TRANSFER_SRC)
flags |= vk::VK_ACCESS_TRANSFER_READ_BIT;
if (usage & USAGE_TRANSFER_DST)
flags |= vk::VK_ACCESS_TRANSFER_WRITE_BIT;
if (usage & USAGE_INDEX_BUFFER)
flags |= vk::VK_ACCESS_INDEX_READ_BIT;
if (usage & USAGE_VERTEX_BUFFER)
flags |= vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
if (usage & (USAGE_UNIFORM_BUFFER | USAGE_UNIFORM_TEXEL_BUFFER))
flags |= vk::VK_ACCESS_UNIFORM_READ_BIT;
if (usage & USAGE_SAMPLED_IMAGE)
flags |= vk::VK_ACCESS_SHADER_READ_BIT;
if (usage & (USAGE_STORAGE_BUFFER
| USAGE_STORAGE_TEXEL_BUFFER
| USAGE_STORAGE_IMAGE))
flags |= vk::VK_ACCESS_SHADER_READ_BIT | vk::VK_ACCESS_SHADER_WRITE_BIT;
if (usage & USAGE_INDIRECT_BUFFER)
flags |= vk::VK_ACCESS_INDIRECT_COMMAND_READ_BIT;
if (usage & USAGE_COLOR_ATTACHMENT)
flags |= vk::VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
if (usage & USAGE_INPUT_ATTACHMENT)
flags |= vk::VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
if (usage & USAGE_DEPTH_STENCIL_ATTACHMENT)
flags |= vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
| vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
return flags;
}
struct TestConfig
{
Usage usage;
vk::VkDeviceSize size;
vk::VkSharingMode sharing;
};
vk::Move<vk::VkCommandBuffer> createCommandBuffer (const vk::DeviceInterface& vkd,
vk::VkDevice device,
vk::VkCommandPool pool,
vk::VkCommandBufferLevel level)
{
const vk::VkCommandBufferAllocateInfo bufferInfo =
{
vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
DE_NULL,
pool,
level,
1u
};
return vk::allocateCommandBuffer(vkd, device, &bufferInfo);
}
vk::Move<vk::VkCommandBuffer> createBeginCommandBuffer (const vk::DeviceInterface& vkd,
vk::VkDevice device,
vk::VkCommandPool pool,
vk::VkCommandBufferLevel level)
{
const vk::VkCommandBufferInheritanceInfo inheritInfo =
{
vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
DE_NULL,
0,
0,
0,
VK_FALSE,
0u,
0u
};
const vk::VkCommandBufferBeginInfo beginInfo =
{
vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
DE_NULL,
0u,
(level == vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY ? &inheritInfo : (const vk::VkCommandBufferInheritanceInfo*)DE_NULL),
};
vk::Move<vk::VkCommandBuffer> commandBuffer (createCommandBuffer(vkd, device, pool, level));
vkd.beginCommandBuffer(*commandBuffer, &beginInfo);
return commandBuffer;
}
vk::Move<vk::VkCommandPool> createCommandPool (const vk::DeviceInterface& vkd,
vk::VkDevice device,
deUint32 queueFamilyIndex)
{
const vk::VkCommandPoolCreateInfo poolInfo =
{
vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
DE_NULL,
vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
queueFamilyIndex,
};
return vk::createCommandPool(vkd, device, &poolInfo);
}
vk::Move<vk::VkBuffer> createBuffer (const vk::DeviceInterface& vkd,
vk::VkDevice device,
vk::VkDeviceSize size,
vk::VkBufferUsageFlags usage,
vk::VkSharingMode sharingMode,
const vector<deUint32>& queueFamilies)
{
const vk::VkBufferCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
DE_NULL,
0, // flags
size,
usage,
sharingMode,
(deUint32)queueFamilies.size(),
&queueFamilies[0]
};
return vk::createBuffer(vkd, device, &createInfo);
}
vk::Move<vk::VkDeviceMemory> allocMemory (const vk::DeviceInterface& vkd,
vk::VkDevice device,
vk::VkDeviceSize size,
deUint32 memoryTypeIndex)
{
const vk::VkMemoryAllocateInfo alloc =
{
vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
DE_NULL, // pNext
size,
memoryTypeIndex
};
return vk::allocateMemory(vkd, device, &alloc);
}
vk::Move<vk::VkDeviceMemory> bindBufferMemory (const vk::InstanceInterface& vki,
const vk::DeviceInterface& vkd,
vk::VkPhysicalDevice physicalDevice,
vk::VkDevice device,
vk::VkBuffer buffer,
vk::VkMemoryPropertyFlags properties)
{
const vk::VkMemoryRequirements memoryRequirements = vk::getBufferMemoryRequirements(vkd, device, buffer);
const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
deUint32 memoryTypeIndex;
for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
{
if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
{
try
{
const vk::VkMemoryAllocateInfo allocationInfo =
{
vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
DE_NULL,
memoryRequirements.size,
memoryTypeIndex
};
vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0));
return memory;
}
catch (const vk::Error& error)
{
if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
{
// Try next memory type/heap if out of memory
}
else
{
// Throw all other errors forward
throw;
}
}
}
}
TCU_FAIL("Failed to allocate memory for buffer");
}
vk::Move<vk::VkDeviceMemory> bindImageMemory (const vk::InstanceInterface& vki,
const vk::DeviceInterface& vkd,
vk::VkPhysicalDevice physicalDevice,
vk::VkDevice device,
vk::VkImage image,
vk::VkMemoryPropertyFlags properties)
{
const vk::VkMemoryRequirements memoryRequirements = vk::getImageMemoryRequirements(vkd, device, image);
const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
deUint32 memoryTypeIndex;
for (memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
{
if ((memoryRequirements.memoryTypeBits & (0x1u << memoryTypeIndex))
&& (memoryProperties.memoryTypes[memoryTypeIndex].propertyFlags & properties) == properties)
{
try
{
const vk::VkMemoryAllocateInfo allocationInfo =
{
vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
DE_NULL,
memoryRequirements.size,
memoryTypeIndex
};
vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &allocationInfo));
VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0));
return memory;
}
catch (const vk::Error& error)
{
if (error.getError() == vk::VK_ERROR_OUT_OF_DEVICE_MEMORY
|| error.getError() == vk::VK_ERROR_OUT_OF_HOST_MEMORY)
{
// Try next memory type/heap if out of memory
}
else
{
// Throw all other errors forward
throw;
}
}
}
}
TCU_FAIL("Failed to allocate memory for image");
}
void queueRun (const vk::DeviceInterface& vkd,
vk::VkQueue queue,
vk::VkCommandBuffer commandBuffer)
{
const vk::VkSubmitInfo submitInfo =
{
vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
DE_NULL,
0,
DE_NULL,
(const vk::VkPipelineStageFlags*)DE_NULL,
1,
&commandBuffer,
0,
DE_NULL
};
VK_CHECK(vkd.queueSubmit(queue, 1, &submitInfo, 0));
VK_CHECK(vkd.queueWaitIdle(queue));
}
void* mapMemory (const vk::DeviceInterface& vkd,
vk::VkDevice device,
vk::VkDeviceMemory memory,
vk::VkDeviceSize size)
{
void* ptr;
VK_CHECK(vkd.mapMemory(device, memory, 0, size, 0, &ptr));
return ptr;
}
class ReferenceMemory
{
public:
ReferenceMemory (size_t size);
void set (size_t pos, deUint8 val);
deUint8 get (size_t pos) const;
bool isDefined (size_t pos) const;
void setDefined (size_t offset, size_t size, const void* data);
void setUndefined (size_t offset, size_t size);
void setData (size_t offset, size_t size, const void* data);
size_t getSize (void) const { return m_data.size(); }
private:
vector<deUint8> m_data;
vector<deUint64> m_defined;
};
ReferenceMemory::ReferenceMemory (size_t size)
: m_data (size, 0)
, m_defined (size / 64 + (size % 64 == 0 ? 0 : 1), 0ull)
{
}
void ReferenceMemory::set (size_t pos, deUint8 val)
{
DE_ASSERT(pos < m_data.size());
m_data[pos] = val;
m_defined[pos / 64] |= 0x1ull << (pos % 64);
}
void ReferenceMemory::setData (size_t offset, size_t size, const void* data_)
{
const deUint8* data = (const deUint8*)data_;
DE_ASSERT(offset < m_data.size());
DE_ASSERT(offset + size <= m_data.size());
// \todo [2016-03-09 mika] Optimize
for (size_t pos = 0; pos < size; pos++)
{
m_data[offset + pos] = data[pos];
m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
}
}
void ReferenceMemory::setUndefined (size_t offset, size_t size)
{
// \todo [2016-03-09 mika] Optimize
for (size_t pos = 0; pos < size; pos++)
m_defined[(offset + pos) / 64] |= 0x1ull << ((offset + pos) % 64);
}
deUint8 ReferenceMemory::get (size_t pos) const
{
DE_ASSERT(pos < m_data.size());
DE_ASSERT(isDefined(pos));
return m_data[pos];
}
bool ReferenceMemory::isDefined (size_t pos) const
{
DE_ASSERT(pos < m_data.size());
return (m_defined[pos / 64] & (0x1ull << (pos % 64))) != 0;
}
class Memory
{
public:
Memory (const vk::InstanceInterface& vki,
const vk::DeviceInterface& vkd,
vk::VkPhysicalDevice physicalDevice,
vk::VkDevice device,
vk::VkDeviceSize size,
deUint32 memoryTypeIndex,
vk::VkDeviceSize maxBufferSize,
deInt32 maxImageWidth,
deInt32 maxImageHeight);
vk::VkDeviceSize getSize (void) const { return m_size; }
vk::VkDeviceSize getMaxBufferSize (void) const { return m_maxBufferSize; }
bool getSupportBuffers (void) const { return m_maxBufferSize > 0; }
deInt32 getMaxImageWidth (void) const { return m_maxImageWidth; }
deInt32 getMaxImageHeight (void) const { return m_maxImageHeight; }
bool getSupportImages (void) const { return m_maxImageWidth > 0; }
const vk::VkMemoryType& getMemoryType (void) const { return m_memoryType; }
deUint32 getMemoryTypeIndex (void) const { return m_memoryTypeIndex; }
vk::VkDeviceMemory getMemory (void) const { return *m_memory; }
private:
const vk::VkDeviceSize m_size;
const deUint32 m_memoryTypeIndex;
const vk::VkMemoryType m_memoryType;
const vk::Unique<vk::VkDeviceMemory> m_memory;
const vk::VkDeviceSize m_maxBufferSize;
const deInt32 m_maxImageWidth;
const deInt32 m_maxImageHeight;
};
vk::VkMemoryType getMemoryTypeInfo (const vk::InstanceInterface& vki,
vk::VkPhysicalDevice device,
deUint32 memoryTypeIndex)
{
const vk::VkPhysicalDeviceMemoryProperties memoryProperties = vk::getPhysicalDeviceMemoryProperties(vki, device);
DE_ASSERT(memoryTypeIndex < memoryProperties.memoryTypeCount);
return memoryProperties.memoryTypes[memoryTypeIndex];
}
vk::VkDeviceSize findMaxBufferSize (const vk::DeviceInterface& vkd,
vk::VkDevice device,
vk::VkBufferUsageFlags usage,
vk::VkSharingMode sharingMode,
const vector<deUint32>& queueFamilies,
vk::VkDeviceSize memorySize,
deUint32 memoryTypeIndex)
{
vk::VkDeviceSize lastSuccess = 0;
vk::VkDeviceSize currentSize = memorySize / 2;
{
const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, memorySize, usage, sharingMode, queueFamilies));
const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
if (requirements.size == memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
return memorySize;
}
for (vk::VkDeviceSize stepSize = memorySize / 4; currentSize > 0; stepSize /= 2)
{
const vk::Unique<vk::VkBuffer> buffer (createBuffer(vkd, device, currentSize, usage, sharingMode, queueFamilies));
const vk::VkMemoryRequirements requirements (vk::getBufferMemoryRequirements(vkd, device, *buffer));
if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
{
lastSuccess = currentSize;
currentSize += stepSize;
}
else
currentSize -= stepSize;
if (stepSize == 0)
break;
}
return lastSuccess;
}
// Round size down maximum W * H * 4, where W and H < 4096
vk::VkDeviceSize roundBufferSizeToWxHx4 (vk::VkDeviceSize size)
{
const vk::VkDeviceSize maxTextureSize = 4096;
vk::VkDeviceSize maxTexelCount = size / 4;
vk::VkDeviceSize bestW = de::max(maxTexelCount, maxTextureSize);
vk::VkDeviceSize bestH = maxTexelCount / bestW;
// \todo [2016-03-09 mika] Could probably be faster?
for (vk::VkDeviceSize w = 1; w * w < maxTexelCount && w < maxTextureSize && bestW * bestH * 4 < size; w++)
{
const vk::VkDeviceSize h = maxTexelCount / w;
if (bestW * bestH < w * h)
{
bestW = w;
bestH = h;
}
}
return bestW * bestH * 4;
}
// Find RGBA8 image size that has exactly "size" of number of bytes.
// "size" must be W * H * 4 where W and H < 4096
IVec2 findImageSizeWxHx4 (vk::VkDeviceSize size)
{
const vk::VkDeviceSize maxTextureSize = 4096;
vk::VkDeviceSize texelCount = size / 4;
DE_ASSERT((size % 4) == 0);
// \todo [2016-03-09 mika] Could probably be faster?
for (vk::VkDeviceSize w = 1; w < maxTextureSize && w < texelCount; w++)
{
const vk::VkDeviceSize h = texelCount / w;
if ((texelCount % w) == 0 && h < maxTextureSize)
return IVec2((int)w, (int)h);
}
DE_FATAL("Invalid size");
return IVec2(-1, -1);
}
IVec2 findMaxRGBA8ImageSize (const vk::DeviceInterface& vkd,
vk::VkDevice device,
vk::VkImageUsageFlags usage,
vk::VkSharingMode sharingMode,
const vector<deUint32>& queueFamilies,
vk::VkDeviceSize memorySize,
deUint32 memoryTypeIndex)
{
IVec2 lastSuccess (0);
IVec2 currentSize;
{
const deUint32 texelCount = (deUint32)(memorySize / 4);
const deUint32 width = (deUint32)deFloatSqrt((float)texelCount);
const deUint32 height = texelCount / width;
currentSize[0] = deMaxu32(width, height);
currentSize[1] = deMinu32(width, height);
}
for (deInt32 stepSize = currentSize[0] / 2; currentSize[0] > 0; stepSize /= 2)
{
const vk::VkImageCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
DE_NULL,
0u,
vk::VK_IMAGE_TYPE_2D,
vk::VK_FORMAT_R8G8B8A8_UNORM,
{
(deUint32)currentSize[0],
(deUint32)currentSize[1],
1u,
},
1u, 1u,
vk::VK_SAMPLE_COUNT_1_BIT,
vk::VK_IMAGE_TILING_OPTIMAL,
usage,
sharingMode,
(deUint32)queueFamilies.size(),
&queueFamilies[0],
vk::VK_IMAGE_LAYOUT_UNDEFINED
};
const vk::Unique<vk::VkImage> image (vk::createImage(vkd, device, &createInfo));
const vk::VkMemoryRequirements requirements (vk::getImageMemoryRequirements(vkd, device, *image));
if (requirements.size <= memorySize && requirements.memoryTypeBits & (0x1u << memoryTypeIndex))
{
lastSuccess = currentSize;
currentSize[0] += stepSize;
currentSize[1] += stepSize;
}
else
{
currentSize[0] -= stepSize;
currentSize[1] -= stepSize;
}
if (stepSize == 0)
break;
}
return lastSuccess;
}
Memory::Memory (const vk::InstanceInterface& vki,
const vk::DeviceInterface& vkd,
vk::VkPhysicalDevice physicalDevice,
vk::VkDevice device,
vk::VkDeviceSize size,
deUint32 memoryTypeIndex,
vk::VkDeviceSize maxBufferSize,
deInt32 maxImageWidth,
deInt32 maxImageHeight)
: m_size (size)
, m_memoryTypeIndex (memoryTypeIndex)
, m_memoryType (getMemoryTypeInfo(vki, physicalDevice, memoryTypeIndex))
, m_memory (allocMemory(vkd, device, size, memoryTypeIndex))
, m_maxBufferSize (maxBufferSize)
, m_maxImageWidth (maxImageWidth)
, m_maxImageHeight (maxImageHeight)
{
}
class Context
{
public:
Context (const vk::InstanceInterface& vki,
const vk::DeviceInterface& vkd,
vk::VkPhysicalDevice physicalDevice,
vk::VkDevice device,
vk::VkQueue queue,
deUint32 queueFamilyIndex,
const vector<pair<deUint32, vk::VkQueue> >& queues,
const vk::ProgramCollection<vk::ProgramBinary>& binaryCollection)
: m_vki (vki)
, m_vkd (vkd)
, m_physicalDevice (physicalDevice)
, m_device (device)
, m_queue (queue)
, m_queueFamilyIndex (queueFamilyIndex)
, m_queues (queues)
, m_commandPool (createCommandPool(vkd, device, queueFamilyIndex))
, m_binaryCollection (binaryCollection)
{
for (size_t queueNdx = 0; queueNdx < m_queues.size(); queueNdx++)
m_queueFamilies.push_back(m_queues[queueNdx].first);
}
const vk::InstanceInterface& getInstanceInterface (void) const { return m_vki; }
vk::VkPhysicalDevice getPhysicalDevice (void) const { return m_physicalDevice; }
vk::VkDevice getDevice (void) const { return m_device; }
const vk::DeviceInterface& getDeviceInterface (void) const { return m_vkd; }
vk::VkQueue getQueue (void) const { return m_queue; }
deUint32 getQueueFamily (void) const { return m_queueFamilyIndex; }
const vector<pair<deUint32, vk::VkQueue> >& getQueues (void) const { return m_queues; }
const vector<deUint32> getQueueFamilies (void) const { return m_queueFamilies; }
vk::VkCommandPool getCommandPool (void) const { return *m_commandPool; }
const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_binaryCollection; }
private:
const vk::InstanceInterface& m_vki;
const vk::DeviceInterface& m_vkd;
const vk::VkPhysicalDevice m_physicalDevice;
const vk::VkDevice m_device;
const vk::VkQueue m_queue;
const deUint32 m_queueFamilyIndex;
const vector<pair<deUint32, vk::VkQueue> > m_queues;
const vk::Unique<vk::VkCommandPool> m_commandPool;
const vk::ProgramCollection<vk::ProgramBinary>& m_binaryCollection;
vector<deUint32> m_queueFamilies;
};
class PrepareContext
{
public:
PrepareContext (const Context& context,
const Memory& memory)
: m_context (context)
, m_memory (memory)
{
}
const Memory& getMemory (void) const { return m_memory; }
const Context& getContext (void) const { return m_context; }
const vk::ProgramCollection<vk::ProgramBinary>& getBinaryCollection (void) const { return m_context.getBinaryCollection(); }
void setBuffer (vk::Move<vk::VkBuffer> buffer,
vk::VkDeviceSize size)
{
DE_ASSERT(!m_currentImage);
DE_ASSERT(!m_currentBuffer);
m_currentBuffer = buffer;
m_currentBufferSize = size;
}
vk::VkBuffer getBuffer (void) const { return *m_currentBuffer; }
vk::VkDeviceSize getBufferSize (void) const
{
DE_ASSERT(m_currentBuffer);
return m_currentBufferSize;
}
void releaseBuffer (void) { m_currentBuffer.disown(); }
void setImage (vk::Move<vk::VkImage> image,
vk::VkImageLayout layout,
vk::VkDeviceSize memorySize,
deInt32 width,
deInt32 height)
{
DE_ASSERT(!m_currentImage);
DE_ASSERT(!m_currentBuffer);
m_currentImage = image;
m_currentImageMemorySize = memorySize;
m_currentImageLayout = layout;
m_currentImageWidth = width;
m_currentImageHeight = height;
}
void setImageLayout (vk::VkImageLayout layout)
{
DE_ASSERT(m_currentImage);
m_currentImageLayout = layout;
}
vk::VkImage getImage (void) const { return *m_currentImage; }
deInt32 getImageWidth (void) const
{
DE_ASSERT(m_currentImage);
return m_currentImageWidth;
}
deInt32 getImageHeight (void) const
{
DE_ASSERT(m_currentImage);
return m_currentImageHeight;
}
vk::VkDeviceSize getImageMemorySize (void) const
{
DE_ASSERT(m_currentImage);
return m_currentImageMemorySize;
}
void releaseImage (void) { m_currentImage.disown(); }
vk::VkImageLayout getImageLayout (void) const
{
DE_ASSERT(m_currentImage);
return m_currentImageLayout;
}
private:
const Context& m_context;
const Memory& m_memory;
vk::Move<vk::VkBuffer> m_currentBuffer;
vk::VkDeviceSize m_currentBufferSize;
vk::Move<vk::VkImage> m_currentImage;
vk::VkDeviceSize m_currentImageMemorySize;
vk::VkImageLayout m_currentImageLayout;
deInt32 m_currentImageWidth;
deInt32 m_currentImageHeight;
};
class ExecuteContext
{
public:
ExecuteContext (const Context& context)
: m_context (context)
{
}
const Context& getContext (void) const { return m_context; }
void setMapping (void* ptr) { m_mapping = ptr; }
void* getMapping (void) const { return m_mapping; }
private:
const Context& m_context;
void* m_mapping;
};
class VerifyContext
{
public:
VerifyContext (TestLog& log,
tcu::ResultCollector& resultCollector,
const Context& context,
vk::VkDeviceSize size)
: m_log (log)
, m_resultCollector (resultCollector)
, m_context (context)
, m_reference ((size_t)size)
{
}
const Context& getContext (void) const { return m_context; }
TestLog& getLog (void) const { return m_log; }
tcu::ResultCollector& getResultCollector (void) const { return m_resultCollector; }
ReferenceMemory& getReference (void) { return m_reference; }
TextureLevel& getReferenceImage (void) { return m_referenceImage;}
private:
TestLog& m_log;
tcu::ResultCollector& m_resultCollector;
const Context& m_context;
ReferenceMemory m_reference;
TextureLevel m_referenceImage;
};
class Command
{
public:
// Constructor should allocate all non-vulkan resources.
virtual ~Command (void) {}
// Get name of the command
virtual const char* getName (void) const = 0;
// Log prepare operations
virtual void logPrepare (TestLog&, size_t) const {}
// Log executed operations
virtual void logExecute (TestLog&, size_t) const {}
// Prepare should allocate all vulkan resources and resources that require
// that buffer or memory has been already allocated. This should build all
// command buffers etc.
virtual void prepare (PrepareContext&) {}
// Execute command. Write or read mapped memory, submit commands to queue
// etc.
virtual void execute (ExecuteContext&) {}
// Verify that results are correct.
virtual void verify (VerifyContext&, size_t) {}
protected:
// Allow only inheritance
Command (void) {}
private:
// Disallow copying
Command (const Command&);
Command& operator& (const Command&);
};
class Map : public Command
{
public:
Map (void) {}
~Map (void) {}
const char* getName (void) const { return "Map"; }
void logExecute (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Map memory" << TestLog::EndMessage;
}
void prepare (PrepareContext& context)
{
m_memory = context.getMemory().getMemory();
m_size = context.getMemory().getSize();
}
void execute (ExecuteContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
context.setMapping(mapMemory(vkd, device, m_memory, m_size));
}
private:
vk::VkDeviceMemory m_memory;
vk::VkDeviceSize m_size;
};
class UnMap : public Command
{
public:
UnMap (void) {}
~UnMap (void) {}
const char* getName (void) const { return "UnMap"; }
void logExecute (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ": Unmap memory" << TestLog::EndMessage;
}
void prepare (PrepareContext& context)
{
m_memory = context.getMemory().getMemory();
}
void execute (ExecuteContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
vkd.unmapMemory(device, m_memory);
context.setMapping(DE_NULL);
}
private:
vk::VkDeviceMemory m_memory;
};
class Invalidate : public Command
{
public:
Invalidate (void) {}
~Invalidate (void) {}
const char* getName (void) const { return "Invalidate"; }
void logExecute (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ": Invalidate mapped memory" << TestLog::EndMessage;
}
void prepare (PrepareContext& context)
{
m_memory = context.getMemory().getMemory();
m_size = context.getMemory().getSize();
}
void execute (ExecuteContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
vk::invalidateMappedMemoryRange(vkd, device, m_memory, 0, m_size);
}
private:
vk::VkDeviceMemory m_memory;
vk::VkDeviceSize m_size;
};
class Flush : public Command
{
public:
Flush (void) {}
~Flush (void) {}
const char* getName (void) const { return "Flush"; }
void logExecute (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ": Flush mapped memory" << TestLog::EndMessage;
}
void prepare (PrepareContext& context)
{
m_memory = context.getMemory().getMemory();
m_size = context.getMemory().getSize();
}
void execute (ExecuteContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
vk::flushMappedMemoryRange(vkd, device, m_memory, 0, m_size);
}
private:
vk::VkDeviceMemory m_memory;
vk::VkDeviceSize m_size;
};
// Host memory reads and writes
class HostMemoryAccess : public Command
{
public:
HostMemoryAccess (bool read, bool write, deUint32 seed);
~HostMemoryAccess (void) {}
const char* getName (void) const { return "HostMemoryAccess"; }
void logExecute (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
void execute (ExecuteContext& context);
void verify (VerifyContext& context, size_t commandIndex);
private:
const bool m_read;
const bool m_write;
const deUint32 m_seed;
size_t m_size;
vector<deUint8> m_readData;
};
HostMemoryAccess::HostMemoryAccess (bool read, bool write, deUint32 seed)
: m_read (read)
, m_write (write)
, m_seed (seed)
{
}
void HostMemoryAccess::logExecute (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ": Host memory access:" << (m_read ? " read" : "") << (m_write ? " write" : "") << ", seed: " << m_seed << TestLog::EndMessage;
}
void HostMemoryAccess::prepare (PrepareContext& context)
{
m_size = (size_t)context.getMemory().getSize();
if (m_read)
m_readData.resize(m_size, 0);
}
void HostMemoryAccess::execute (ExecuteContext& context)
{
de::Random rng (m_seed);
deUint8* const ptr = (deUint8*)context.getMapping();
if (m_read && m_write)
{
for (size_t pos = 0; pos < m_size; pos++)
{
const deUint8 mask = rng.getUint8();
const deUint8 value = ptr[pos];
m_readData[pos] = value;
ptr[pos] = value ^ mask;
}
}
else if (m_read)
{
for (size_t pos = 0; pos < m_size; pos++)
{
const deUint8 value = ptr[pos];
m_readData[pos] = value;
}
}
else if (m_write)
{
for (size_t pos = 0; pos < m_size; pos++)
{
const deUint8 value = rng.getUint8();
ptr[pos] = value;
}
}
else
DE_FATAL("Host memory access without read or write.");
}
void HostMemoryAccess::verify (VerifyContext& context, size_t commandIndex)
{
tcu::ResultCollector& resultCollector = context.getResultCollector();
ReferenceMemory& reference = context.getReference();
de::Random rng (m_seed);
if (m_read && m_write)
{
for (size_t pos = 0; pos < m_size; pos++)
{
const deUint8 mask = rng.getUint8();
const deUint8 value = m_readData[pos];
if (reference.isDefined(pos))
{
if (value != reference.get(pos))
{
resultCollector.fail(
de::toString(commandIndex) + ":" + getName()
+ " Result differs from reference, Expected: "
+ de::toString(tcu::toHex<8>(reference.get(pos)))
+ ", Got: "
+ de::toString(tcu::toHex<8>(value))
+ ", At offset: "
+ de::toString(pos));
break;
}
reference.set(pos, reference.get(pos) ^ mask);
}
}
}
else if (m_read)
{
for (size_t pos = 0; pos < m_size; pos++)
{
const deUint8 value = m_readData[pos];
if (reference.isDefined(pos))
{
if (value != reference.get(pos))
{
resultCollector.fail(
de::toString(commandIndex) + ":" + getName()
+ " Result differs from reference, Expected: "
+ de::toString(tcu::toHex<8>(reference.get(pos)))
+ ", Got: "
+ de::toString(tcu::toHex<8>(value))
+ ", At offset: "
+ de::toString(pos));
break;
}
}
}
}
else if (m_write)
{
for (size_t pos = 0; pos < m_size; pos++)
{
const deUint8 value = rng.getUint8();
reference.set(pos, value);
}
}
else
DE_FATAL("Host memory access without read or write.");
}
class CreateBuffer : public Command
{
public:
CreateBuffer (vk::VkBufferUsageFlags usage,
vk::VkSharingMode sharing);
~CreateBuffer (void) {}
const char* getName (void) const { return "CreateBuffer"; }
void logPrepare (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
private:
const vk::VkBufferUsageFlags m_usage;
const vk::VkSharingMode m_sharing;
};
CreateBuffer::CreateBuffer (vk::VkBufferUsageFlags usage,
vk::VkSharingMode sharing)
: m_usage (usage)
, m_sharing (sharing)
{
}
void CreateBuffer::logPrepare (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Create buffer, Sharing mode: " << m_sharing << ", Usage: " << vk::getBufferUsageFlagsStr(m_usage) << TestLog::EndMessage;
}
void CreateBuffer::prepare (PrepareContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
const vk::VkDeviceSize bufferSize = context.getMemory().getMaxBufferSize();
const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
context.setBuffer(createBuffer(vkd, device, bufferSize, m_usage, m_sharing, queueFamilies), bufferSize);
}
class DestroyBuffer : public Command
{
public:
DestroyBuffer (void);
~DestroyBuffer (void) {}
const char* getName (void) const { return "DestroyBuffer"; }
void logExecute (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
void execute (ExecuteContext& context);
private:
vk::Move<vk::VkBuffer> m_buffer;
};
DestroyBuffer::DestroyBuffer (void)
{
}
void DestroyBuffer::prepare (PrepareContext& context)
{
m_buffer = vk::Move<vk::VkBuffer>(vk::check(context.getBuffer()), vk::Deleter<vk::VkBuffer>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
context.releaseBuffer();
}
void DestroyBuffer::logExecute (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Destroy buffer" << TestLog::EndMessage;
}
void DestroyBuffer::execute (ExecuteContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
vkd.destroyBuffer(device, m_buffer.disown(), DE_NULL);
}
class BindBufferMemory : public Command
{
public:
BindBufferMemory (void) {}
~BindBufferMemory (void) {}
const char* getName (void) const { return "BindBufferMemory"; }
void logPrepare (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
};
void BindBufferMemory::logPrepare (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to buffer" << TestLog::EndMessage;
}
void BindBufferMemory::prepare (PrepareContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
VK_CHECK(vkd.bindBufferMemory(device, context.getBuffer(), context.getMemory().getMemory(), 0));
}
class CreateImage : public Command
{
public:
CreateImage (vk::VkImageUsageFlags usage,
vk::VkSharingMode sharing);
~CreateImage (void) {}
const char* getName (void) const { return "CreateImage"; }
void logPrepare (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
void verify (VerifyContext& context, size_t commandIndex);
private:
const vk::VkImageUsageFlags m_usage;
const vk::VkSharingMode m_sharing;
deInt32 m_imageWidth;
deInt32 m_imageHeight;
};
CreateImage::CreateImage (vk::VkImageUsageFlags usage,
vk::VkSharingMode sharing)
: m_usage (usage)
, m_sharing (sharing)
{
}
void CreateImage::logPrepare (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Create image, sharing: " << m_sharing << ", usage: " << vk::getImageUsageFlagsStr(m_usage) << TestLog::EndMessage;
}
void CreateImage::prepare (PrepareContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
m_imageWidth = context.getMemory().getMaxImageWidth();
m_imageHeight = context.getMemory().getMaxImageHeight();
{
const vk::VkImageCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
DE_NULL,
0u,
vk::VK_IMAGE_TYPE_2D,
vk::VK_FORMAT_R8G8B8A8_UNORM,
{
(deUint32)m_imageWidth,
(deUint32)m_imageHeight,
1u,
},
1u, 1u,
vk::VK_SAMPLE_COUNT_1_BIT,
vk::VK_IMAGE_TILING_OPTIMAL,
m_usage,
m_sharing,
(deUint32)queueFamilies.size(),
&queueFamilies[0],
vk::VK_IMAGE_LAYOUT_UNDEFINED
};
vk::Move<vk::VkImage> image (createImage(vkd, device, &createInfo));
const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vkd, device, *image);
context.setImage(image, vk::VK_IMAGE_LAYOUT_UNDEFINED, requirements.size, m_imageWidth, m_imageHeight);
}
}
void CreateImage::verify (VerifyContext& context, size_t)
{
context.getReferenceImage() = TextureLevel(TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), m_imageWidth, m_imageHeight);
}
class DestroyImage : public Command
{
public:
DestroyImage (void);
~DestroyImage (void) {}
const char* getName (void) const { return "DestroyImage"; }
void logExecute (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
void execute (ExecuteContext& context);
private:
vk::Move<vk::VkImage> m_image;
};
DestroyImage::DestroyImage (void)
{
}
void DestroyImage::prepare (PrepareContext& context)
{
m_image = vk::Move<vk::VkImage>(vk::check(context.getImage()), vk::Deleter<vk::VkImage>(context.getContext().getDeviceInterface(), context.getContext().getDevice(), DE_NULL));
context.releaseImage();
}
void DestroyImage::logExecute (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Destroy image" << TestLog::EndMessage;
}
void DestroyImage::execute (ExecuteContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
vkd.destroyImage(device, m_image.disown(), DE_NULL);
}
class BindImageMemory : public Command
{
public:
BindImageMemory (void) {}
~BindImageMemory (void) {}
const char* getName (void) const { return "BindImageMemory"; }
void logPrepare (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
};
void BindImageMemory::logPrepare (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Bind memory to image" << TestLog::EndMessage;
}
void BindImageMemory::prepare (PrepareContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
VK_CHECK(vkd.bindImageMemory(device, context.getImage(), context.getMemory().getMemory(), 0));
}
class QueueWaitIdle : public Command
{
public:
QueueWaitIdle (void) {}
~QueueWaitIdle (void) {}
const char* getName (void) const { return "QueuetWaitIdle"; }
void logExecute (TestLog& log, size_t commandIndex) const;
void execute (ExecuteContext& context);
};
void QueueWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Queue wait idle" << TestLog::EndMessage;
}
void QueueWaitIdle::execute (ExecuteContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkQueue queue = context.getContext().getQueue();
VK_CHECK(vkd.queueWaitIdle(queue));
}
class DeviceWaitIdle : public Command
{
public:
DeviceWaitIdle (void) {}
~DeviceWaitIdle (void) {}
const char* getName (void) const { return "DeviceWaitIdle"; }
void logExecute (TestLog& log, size_t commandIndex) const;
void execute (ExecuteContext& context);
};
void DeviceWaitIdle::logExecute (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Device wait idle" << TestLog::EndMessage;
}
void DeviceWaitIdle::execute (ExecuteContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
VK_CHECK(vkd.deviceWaitIdle(device));
}
class SubmitContext
{
public:
SubmitContext (const PrepareContext& context,
const vk::VkCommandBuffer commandBuffer)
: m_context (context)
, m_commandBuffer (commandBuffer)
{
}
const Memory& getMemory (void) const { return m_context.getMemory(); }
const Context& getContext (void) const { return m_context.getContext(); }
vk::VkCommandBuffer getCommandBuffer (void) const { return m_commandBuffer; }
vk::VkBuffer getBuffer (void) const { return m_context.getBuffer(); }
vk::VkDeviceSize getBufferSize (void) const { return m_context.getBufferSize(); }
vk::VkImage getImage (void) const { return m_context.getImage(); }
deInt32 getImageWidth (void) const { return m_context.getImageWidth(); }
deInt32 getImageHeight (void) const { return m_context.getImageHeight(); }
private:
const PrepareContext& m_context;
const vk::VkCommandBuffer m_commandBuffer;
};
class CmdCommand
{
public:
virtual ~CmdCommand (void) {}
virtual const char* getName (void) const = 0;
// Log things that are done during prepare
virtual void logPrepare (TestLog&, size_t) const {}
// Log submitted calls etc.
virtual void logSubmit (TestLog&, size_t) const {}
// Allocate vulkan resources and prepare for submit.
virtual void prepare (PrepareContext&) {}
// Submit commands to command buffer.
virtual void submit (SubmitContext&) {}
// Verify results
virtual void verify (VerifyContext&, size_t) {}
};
class SubmitCommandBuffer : public Command
{
public:
SubmitCommandBuffer (const vector<CmdCommand*>& commands);
~SubmitCommandBuffer (void);
const char* getName (void) const { return "SubmitCommandBuffer"; }
void logExecute (TestLog& log, size_t commandIndex) const;
void logPrepare (TestLog& log, size_t commandIndex) const;
// Allocate command buffer and submit commands to command buffer
void prepare (PrepareContext& context);
void execute (ExecuteContext& context);
// Verify that results are correct.
void verify (VerifyContext& context, size_t commandIndex);
private:
vector<CmdCommand*> m_commands;
vk::Move<vk::VkCommandBuffer> m_commandBuffer;
};
SubmitCommandBuffer::SubmitCommandBuffer (const vector<CmdCommand*>& commands)
: m_commands (commands)
{
}
SubmitCommandBuffer::~SubmitCommandBuffer (void)
{
for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
delete m_commands[cmdNdx];
}
void SubmitCommandBuffer::prepare (PrepareContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
m_commandBuffer = createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
{
CmdCommand& command = *m_commands[cmdNdx];
command.prepare(context);
}
{
SubmitContext submitContext (context, *m_commandBuffer);
for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
{
CmdCommand& command = *m_commands[cmdNdx];
command.submit(submitContext);
}
VK_CHECK(vkd.endCommandBuffer(*m_commandBuffer));
}
}
void SubmitCommandBuffer::execute (ExecuteContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkCommandBuffer cmd = *m_commandBuffer;
const vk::VkQueue queue = context.getContext().getQueue();
const vk::VkSubmitInfo submit =
{
vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
DE_NULL,
0,
DE_NULL,
(const vk::VkPipelineStageFlags*)DE_NULL,
1,
&cmd,
0,
DE_NULL
};
vkd.queueSubmit(queue, 1, &submit, 0);
}
void SubmitCommandBuffer::verify (VerifyContext& context, size_t commandIndex)
{
const string sectionName (de::toString(commandIndex) + ":" + getName());
const tcu::ScopedLogSection section (context.getLog(), sectionName, sectionName);
for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
m_commands[cmdNdx]->verify(context, cmdNdx);
}
void SubmitCommandBuffer::logPrepare (TestLog& log, size_t commandIndex) const
{
const string sectionName (de::toString(commandIndex) + ":" + getName());
const tcu::ScopedLogSection section (log, sectionName, sectionName);
for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
m_commands[cmdNdx]->logPrepare(log, cmdNdx);
}
void SubmitCommandBuffer::logExecute (TestLog& log, size_t commandIndex) const
{
const string sectionName (de::toString(commandIndex) + ":" + getName());
const tcu::ScopedLogSection section (log, sectionName, sectionName);
for (size_t cmdNdx = 0; cmdNdx < m_commands.size(); cmdNdx++)
m_commands[cmdNdx]->logSubmit(log, cmdNdx);
}
class PipelineBarrier : public CmdCommand
{
public:
enum Type
{
TYPE_GLOBAL = 0,
TYPE_BUFFER,
TYPE_IMAGE,
TYPE_LAST
};
PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
const vk::VkAccessFlags srcAccesses,
const vk::VkPipelineStageFlags dstStages,
const vk::VkAccessFlags dstAccesses,
Type type,
const tcu::Maybe<vk::VkImageLayout> imageLayout);
~PipelineBarrier (void) {}
const char* getName (void) const { return "PipelineBarrier"; }
void logSubmit (TestLog& log, size_t commandIndex) const;
void submit (SubmitContext& context);
private:
const vk::VkPipelineStageFlags m_srcStages;
const vk::VkAccessFlags m_srcAccesses;
const vk::VkPipelineStageFlags m_dstStages;
const vk::VkAccessFlags m_dstAccesses;
const Type m_type;
const tcu::Maybe<vk::VkImageLayout> m_imageLayout;
};
PipelineBarrier::PipelineBarrier (const vk::VkPipelineStageFlags srcStages,
const vk::VkAccessFlags srcAccesses,
const vk::VkPipelineStageFlags dstStages,
const vk::VkAccessFlags dstAccesses,
Type type,
const tcu::Maybe<vk::VkImageLayout> imageLayout)
: m_srcStages (srcStages)
, m_srcAccesses (srcAccesses)
, m_dstStages (dstStages)
, m_dstAccesses (dstAccesses)
, m_type (type)
, m_imageLayout (imageLayout)
{
}
void PipelineBarrier::logSubmit (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName()
<< " " << (m_type == TYPE_GLOBAL ? "Global pipeline barrier"
: m_type == TYPE_BUFFER ? "Buffer pipeline barrier"
: "Image pipeline barrier")
<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses) << TestLog::EndMessage;
}
void PipelineBarrier::submit (SubmitContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkCommandBuffer cmd = context.getCommandBuffer();
switch (m_type)
{
case TYPE_GLOBAL:
{
const vk::VkMemoryBarrier barrier =
{
vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER,
DE_NULL,
m_srcAccesses,
m_dstAccesses
};
vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 1, &barrier, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
break;
}
case TYPE_BUFFER:
{
const vk::VkBufferMemoryBarrier barrier =
{
vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
DE_NULL,
m_srcAccesses,
m_dstAccesses,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
context.getBuffer(),
0,
VK_WHOLE_SIZE
};
vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
break;
}
case TYPE_IMAGE:
{
const vk::VkImageMemoryBarrier barrier =
{
vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
DE_NULL,
m_srcAccesses,
m_dstAccesses,
*m_imageLayout,
*m_imageLayout,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
context.getImage(),
{
vk::VK_IMAGE_ASPECT_COLOR_BIT,
0, 1,
0, 1
}
};
vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
break;
}
default:
DE_FATAL("Unknown pipeline barrier type");
}
}
class ImageTransition : public CmdCommand
{
public:
ImageTransition (vk::VkPipelineStageFlags srcStages,
vk::VkAccessFlags srcAccesses,
vk::VkPipelineStageFlags dstStages,
vk::VkAccessFlags dstAccesses,
vk::VkImageLayout srcLayout,
vk::VkImageLayout dstLayout);
~ImageTransition (void) {}
const char* getName (void) const { return "ImageTransition"; }
void prepare (PrepareContext& context);
void logSubmit (TestLog& log, size_t commandIndex) const;
void submit (SubmitContext& context);
void verify (VerifyContext& context, size_t);
private:
const vk::VkPipelineStageFlags m_srcStages;
const vk::VkAccessFlags m_srcAccesses;
const vk::VkPipelineStageFlags m_dstStages;
const vk::VkAccessFlags m_dstAccesses;
const vk::VkImageLayout m_srcLayout;
const vk::VkImageLayout m_dstLayout;
vk::VkDeviceSize m_imageMemorySize;
};
ImageTransition::ImageTransition (vk::VkPipelineStageFlags srcStages,
vk::VkAccessFlags srcAccesses,
vk::VkPipelineStageFlags dstStages,
vk::VkAccessFlags dstAccesses,
vk::VkImageLayout srcLayout,
vk::VkImageLayout dstLayout)
: m_srcStages (srcStages)
, m_srcAccesses (srcAccesses)
, m_dstStages (dstStages)
, m_dstAccesses (dstAccesses)
, m_srcLayout (srcLayout)
, m_dstLayout (dstLayout)
{
}
void ImageTransition::logSubmit (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName()
<< " Image transition pipeline barrier"
<< ", srcStages: " << vk::getPipelineStageFlagsStr(m_srcStages) << ", srcAccesses: " << vk::getAccessFlagsStr(m_srcAccesses)
<< ", dstStages: " << vk::getPipelineStageFlagsStr(m_dstStages) << ", dstAccesses: " << vk::getAccessFlagsStr(m_dstAccesses)
<< ", srcLayout: " << m_srcLayout << ", dstLayout: " << m_dstLayout << TestLog::EndMessage;
}
void ImageTransition::prepare (PrepareContext& context)
{
DE_ASSERT(context.getImageLayout() == vk::VK_IMAGE_LAYOUT_UNDEFINED || m_srcLayout == vk::VK_IMAGE_LAYOUT_UNDEFINED || context.getImageLayout() == m_srcLayout);
context.setImageLayout(m_dstLayout);
m_imageMemorySize = context.getImageMemorySize();
}
void ImageTransition::submit (SubmitContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkCommandBuffer cmd = context.getCommandBuffer();
const vk::VkImageMemoryBarrier barrier =
{
vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
DE_NULL,
m_srcAccesses,
m_dstAccesses,
m_srcLayout,
m_dstLayout,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
context.getImage(),
{
vk::VK_IMAGE_ASPECT_COLOR_BIT,
0u, 1u,
0u, 1u
}
};
vkd.cmdPipelineBarrier(cmd, m_srcStages, m_dstStages, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
}
void ImageTransition::verify (VerifyContext& context, size_t)
{
context.getReference().setUndefined(0, (size_t)m_imageMemorySize);
}
class FillBuffer : public CmdCommand
{
public:
FillBuffer (deUint32 value) : m_value(value) {}
~FillBuffer (void) {}
const char* getName (void) const { return "FillBuffer"; }
void logSubmit (TestLog& log, size_t commandIndex) const;
void submit (SubmitContext& context);
void verify (VerifyContext& context, size_t commandIndex);
private:
const deUint32 m_value;
vk::VkDeviceSize m_bufferSize;
};
void FillBuffer::logSubmit (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Fill value: " << m_value << TestLog::EndMessage;
}
void FillBuffer::submit (SubmitContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkCommandBuffer cmd = context.getCommandBuffer();
const vk::VkBuffer buffer = context.getBuffer();
const vk::VkDeviceSize sizeMask = ~(0x3ull); // \note Round down to multiple of 4
m_bufferSize = sizeMask & context.getBufferSize();
vkd.cmdFillBuffer(cmd, buffer, 0, m_bufferSize, m_value);
}
void FillBuffer::verify (VerifyContext& context, size_t)
{
ReferenceMemory& reference = context.getReference();
for (size_t ndx = 0; ndx < m_bufferSize; ndx++)
{
#if (DE_ENDIANNESS == DE_LITTLE_ENDIAN)
reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(ndx % 4)))));
#else
reference.set(ndx, (deUint8)(0xffu & (m_value >> (8*(3 - (ndx % 4))))));
#endif
}
}
class UpdateBuffer : public CmdCommand
{
public:
UpdateBuffer (deUint32 seed) : m_seed(seed) {}
~UpdateBuffer (void) {}
const char* getName (void) const { return "UpdateBuffer"; }
void logSubmit (TestLog& log, size_t commandIndex) const;
void submit (SubmitContext& context);
void verify (VerifyContext& context, size_t commandIndex);
private:
const deUint32 m_seed;
vk::VkDeviceSize m_bufferSize;
};
void UpdateBuffer::logSubmit (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Update buffer, seed: " << m_seed << TestLog::EndMessage;
}
void UpdateBuffer::submit (SubmitContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkCommandBuffer cmd = context.getCommandBuffer();
const vk::VkBuffer buffer = context.getBuffer();
const size_t blockSize = 65536;
std::vector<deUint8> data (blockSize, 0);
de::Random rng (m_seed);
m_bufferSize = context.getBufferSize();
for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
{
for (size_t ndx = 0; ndx < data.size(); ndx++)
data[ndx] = rng.getUint8();
if (m_bufferSize - updated > blockSize)
vkd.cmdUpdateBuffer(cmd, buffer, updated, blockSize, (const deUint32*)(&data[0]));
else
vkd.cmdUpdateBuffer(cmd, buffer, updated, m_bufferSize - updated, (const deUint32*)(&data[0]));
}
}
void UpdateBuffer::verify (VerifyContext& context, size_t)
{
ReferenceMemory& reference = context.getReference();
const size_t blockSize = 65536;
vector<deUint8> data (blockSize, 0);
de::Random rng (m_seed);
for (size_t updated = 0; updated < m_bufferSize; updated += blockSize)
{
for (size_t ndx = 0; ndx < data.size(); ndx++)
data[ndx] = rng.getUint8();
if (m_bufferSize - updated > blockSize)
reference.setData(updated, blockSize, &data[0]);
else
reference.setData(updated, (size_t)(m_bufferSize - updated), &data[0]);
}
}
class BufferCopyToBuffer : public CmdCommand
{
public:
BufferCopyToBuffer (void) {}
~BufferCopyToBuffer (void) {}
const char* getName (void) const { return "BufferCopyToBuffer"; }
void logPrepare (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
void logSubmit (TestLog& log, size_t commandIndex) const;
void submit (SubmitContext& context);
void verify (VerifyContext& context, size_t commandIndex);
private:
vk::VkDeviceSize m_bufferSize;
vk::Move<vk::VkBuffer> m_dstBuffer;
vk::Move<vk::VkDeviceMemory> m_memory;
};
void BufferCopyToBuffer::logPrepare (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination buffer for buffer to buffer copy." << TestLog::EndMessage;
}
void BufferCopyToBuffer::prepare (PrepareContext& context)
{
const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
const vk::VkDevice device = context.getContext().getDevice();
const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
m_bufferSize = context.getBufferSize();
m_dstBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
}
void BufferCopyToBuffer::logSubmit (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to another buffer" << TestLog::EndMessage;
}
void BufferCopyToBuffer::submit (SubmitContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
const vk::VkBufferCopy range =
{
0, 0, // Offsets
m_bufferSize
};
vkd.cmdCopyBuffer(commandBuffer, context.getBuffer(), *m_dstBuffer, 1, &range);
}
void BufferCopyToBuffer::verify (VerifyContext& context, size_t commandIndex)
{
tcu::ResultCollector& resultCollector (context.getResultCollector());
ReferenceMemory& reference (context.getReference());
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkDevice device = context.getContext().getDevice();
const vk::VkQueue queue = context.getContext().getQueue();
const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
const vk::VkBufferMemoryBarrier barrier =
{
vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
DE_NULL,
vk::VK_ACCESS_TRANSFER_WRITE_BIT,
vk::VK_ACCESS_HOST_READ_BIT,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
*m_dstBuffer,
0,
VK_WHOLE_SIZE
};
vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &barrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
queueRun(vkd, queue, *commandBuffer);
{
void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
bool isOk = true;
vk::invalidateMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
{
const deUint8* const data = (const deUint8*)ptr;
for (size_t pos = 0; pos < (size_t)m_bufferSize; pos++)
{
if (reference.isDefined(pos))
{
if (data[pos] != reference.get(pos))
{
resultCollector.fail(
de::toString(commandIndex) + ":" + getName()
+ " Result differs from reference, Expected: "
+ de::toString(tcu::toHex<8>(reference.get(pos)))
+ ", Got: "
+ de::toString(tcu::toHex<8>(data[pos]))
+ ", At offset: "
+ de::toString(pos));
break;
}
}
}
}
vkd.unmapMemory(device, *m_memory);
if (!isOk)
context.getLog() << TestLog::Message << commandIndex << ": Buffer copy to buffer verification failed" << TestLog::EndMessage;
}
}
class BufferCopyFromBuffer : public CmdCommand
{
public:
BufferCopyFromBuffer (deUint32 seed) : m_seed(seed) {}
~BufferCopyFromBuffer (void) {}
const char* getName (void) const { return "BufferCopyFromBuffer"; }
void logPrepare (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
void logSubmit (TestLog& log, size_t commandIndex) const;
void submit (SubmitContext& context);
void verify (VerifyContext& context, size_t commandIndex);
private:
const deUint32 m_seed;
vk::VkDeviceSize m_bufferSize;
vk::Move<vk::VkBuffer> m_srcBuffer;
vk::Move<vk::VkDeviceMemory> m_memory;
};
void BufferCopyFromBuffer::logPrepare (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source buffer for buffer to buffer copy. Seed: " << m_seed << TestLog::EndMessage;
}
void BufferCopyFromBuffer::prepare (PrepareContext& context)
{
const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
const vk::VkDevice device = context.getContext().getDevice();
const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
m_bufferSize = context.getBufferSize();
m_srcBuffer = createBuffer(vkd, device, m_bufferSize, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies);
m_memory = bindBufferMemory(vki, vkd, physicalDevice, device, *m_srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
{
void* const ptr = mapMemory(vkd, device, *m_memory, m_bufferSize);
de::Random rng (m_seed);
{
deUint8* const data = (deUint8*)ptr;
for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
data[ndx] = rng.getUint8();
}
vk::flushMappedMemoryRange(vkd, device, *m_memory, 0, m_bufferSize);
vkd.unmapMemory(device, *m_memory);
}
}
void BufferCopyFromBuffer::logSubmit (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer data from another buffer" << TestLog::EndMessage;
}
void BufferCopyFromBuffer::submit (SubmitContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
const vk::VkBufferCopy range =
{
0, 0, // Offsets
m_bufferSize
};
vkd.cmdCopyBuffer(commandBuffer, *m_srcBuffer, context.getBuffer(), 1, &range);
}
void BufferCopyFromBuffer::verify (VerifyContext& context, size_t)
{
ReferenceMemory& reference (context.getReference());
de::Random rng (m_seed);
for (size_t ndx = 0; ndx < (size_t)m_bufferSize; ndx++)
reference.set(ndx, rng.getUint8());
}
class BufferCopyToImage : public CmdCommand
{
public:
BufferCopyToImage (void) {}
~BufferCopyToImage (void) {}
const char* getName (void) const { return "BufferCopyToImage"; }
void logPrepare (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
void logSubmit (TestLog& log, size_t commandIndex) const;
void submit (SubmitContext& context);
void verify (VerifyContext& context, size_t commandIndex);
private:
deInt32 m_imageWidth;
deInt32 m_imageHeight;
vk::Move<vk::VkImage> m_dstImage;
vk::Move<vk::VkDeviceMemory> m_memory;
};
void BufferCopyToImage::logPrepare (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Allocate destination image for buffer to image copy." << TestLog::EndMessage;
}
void BufferCopyToImage::prepare (PrepareContext& context)
{
const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
const vk::VkDevice device = context.getContext().getDevice();
const vk::VkQueue queue = context.getContext().getQueue();
const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
m_imageWidth = imageSize[0];
m_imageHeight = imageSize[1];
{
const vk::VkImageCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
DE_NULL,
0,
vk::VK_IMAGE_TYPE_2D,
vk::VK_FORMAT_R8G8B8A8_UNORM,
{
(deUint32)m_imageWidth,
(deUint32)m_imageHeight,
1u,
},
1, 1, // mipLevels, arrayLayers
vk::VK_SAMPLE_COUNT_1_BIT,
vk::VK_IMAGE_TILING_OPTIMAL,
vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
vk::VK_SHARING_MODE_EXCLUSIVE,
(deUint32)queueFamilies.size(),
&queueFamilies[0],
vk::VK_IMAGE_LAYOUT_UNDEFINED
};
m_dstImage = vk::createImage(vkd, device, &createInfo);
}
m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_dstImage, 0);
{
const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
const vk::VkImageMemoryBarrier barrier =
{
vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
DE_NULL,
0,
vk::VK_ACCESS_TRANSFER_WRITE_BIT,
vk::VK_IMAGE_LAYOUT_UNDEFINED,
vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
*m_dstImage,
{
vk::VK_IMAGE_ASPECT_COLOR_BIT,
0, // Mip level
1, // Mip level count
0, // Layer
1 // Layer count
}
};
vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &barrier);
VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
queueRun(vkd, queue, *commandBuffer);
}
}
void BufferCopyToImage::logSubmit (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Copy buffer to image" << TestLog::EndMessage;
}
void BufferCopyToImage::submit (SubmitContext& context)
{
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkCommandBuffer commandBuffer = context.getCommandBuffer();
const vk::VkBufferImageCopy region =
{
0,
0, 0,
{
vk::VK_IMAGE_ASPECT_COLOR_BIT,
0, // mipLevel
0, // arrayLayer
1 // layerCount
},
{ 0, 0, 0 },
{
(deUint32)m_imageWidth,
(deUint32)m_imageHeight,
1u
}
};
vkd.cmdCopyBufferToImage(commandBuffer, context.getBuffer(), *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
}
void BufferCopyToImage::verify (VerifyContext& context, size_t commandIndex)
{
tcu::ResultCollector& resultCollector (context.getResultCollector());
ReferenceMemory& reference (context.getReference());
const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
const vk::VkDevice device = context.getContext().getDevice();
const vk::VkQueue queue = context.getContext().getQueue();
const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
const vk::Unique<vk::VkBuffer> dstBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *dstBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
{
const vk::VkImageMemoryBarrier imageBarrier =
{
vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
DE_NULL,
vk::VK_ACCESS_TRANSFER_WRITE_BIT,
vk::VK_ACCESS_TRANSFER_READ_BIT,
vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
*m_dstImage,
{
vk::VK_IMAGE_ASPECT_COLOR_BIT,
0, // Mip level
1, // Mip level count
0, // Layer
1 // Layer count
}
};
const vk::VkBufferMemoryBarrier bufferBarrier =
{
vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
DE_NULL,
vk::VK_ACCESS_TRANSFER_WRITE_BIT,
vk::VK_ACCESS_HOST_READ_BIT,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
*dstBuffer,
0,
VK_WHOLE_SIZE
};
const vk::VkBufferImageCopy region =
{
0,
0, 0,
{
vk::VK_IMAGE_ASPECT_COLOR_BIT,
0, // mipLevel
0, // arrayLayer
1 // layerCount
},
{ 0, 0, 0 },
{
(deUint32)m_imageWidth,
(deUint32)m_imageHeight,
1u
}
};
vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 0, (const vk::VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
vkd.cmdCopyImageToBuffer(*commandBuffer, *m_dstImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *dstBuffer, 1, &region);
vkd.cmdPipelineBarrier(*commandBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0, 0, (const vk::VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
}
VK_CHECK(vkd.endCommandBuffer(*commandBuffer));
queueRun(vkd, queue, *commandBuffer);
{
void* const ptr = mapMemory(vkd, device, *memory, 4 * m_imageWidth * m_imageHeight);
vk::invalidateMappedMemoryRange(vkd, device, *memory, 0, 4 * m_imageWidth * m_imageHeight);
{
const deUint8* const data = (const deUint8*)ptr;
for (size_t pos = 0; pos < (size_t)( 4 * m_imageWidth * m_imageHeight); pos++)
{
if (reference.isDefined(pos))
{
if (data[pos] != reference.get(pos))
{
resultCollector.fail(
de::toString(commandIndex) + ":" + getName()
+ " Result differs from reference, Expected: "
+ de::toString(tcu::toHex<8>(reference.get(pos)))
+ ", Got: "
+ de::toString(tcu::toHex<8>(data[pos]))
+ ", At offset: "
+ de::toString(pos));
break;
}
}
}
}
vkd.unmapMemory(device, *memory);
}
}
class BufferCopyFromImage : public CmdCommand
{
public:
BufferCopyFromImage (deUint32 seed) : m_seed(seed) {}
~BufferCopyFromImage (void) {}
const char* getName (void) const { return "BufferCopyFromImage"; }
void logPrepare (TestLog& log, size_t commandIndex) const;
void prepare (PrepareContext& context);
void logSubmit (TestLog& log, size_t commandIndex) const;
void submit (SubmitContext& context);
void verify (VerifyContext& context, size_t commandIndex);
private:
const deUint32 m_seed;
deInt32 m_imageWidth;
deInt32 m_imageHeight;
vk::Move<vk::VkImage> m_srcImage;
vk::Move<vk::VkDeviceMemory> m_memory;
};
void BufferCopyFromImage::logPrepare (TestLog& log, size_t commandIndex) const
{
log << TestLog::Message << commandIndex << ":" << getName() << " Allocate source image for image to buffer copy." << TestLog::EndMessage;
}
void BufferCopyFromImage::prepare (PrepareContext& context)
{
const vk::InstanceInterface& vki = context.getContext().getInstanceInterface();
const vk::DeviceInterface& vkd = context.getContext().getDeviceInterface();
const vk::VkPhysicalDevice physicalDevice = context.getContext().getPhysicalDevice();
const vk::VkDevice device = context.getContext().getDevice();
const vk::VkQueue queue = context.getContext().getQueue();
const vk::VkCommandPool commandPool = context.getContext().getCommandPool();
const vector<deUint32>& queueFamilies = context.getContext().getQueueFamilies();
const IVec2 imageSize = findImageSizeWxHx4(context.getBufferSize());
m_imageWidth = imageSize[0];
m_imageHeight = imageSize[1];
{
const vk::VkImageCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
DE_NULL,
0,
vk::VK_IMAGE_TYPE_2D,
vk::VK_FORMAT_R8G8B8A8_UNORM,
{
(deUint32)m_imageWidth,
(deUint32)m_imageHeight,
1u,
},
1, 1, // mipLevels, arrayLayers
vk::VK_SAMPLE_COUNT_1_BIT,
vk::VK_IMAGE_TILING_OPTIMAL,
vk::VK_IMAGE_USAGE_TRANSFER_DST_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
vk::VK_SHARING_MODE_EXCLUSIVE,
(deUint32)queueFamilies.size(),
&queueFamilies[0],
vk::VK_IMAGE_LAYOUT_UNDEFINED
};
m_srcImage = vk::createImage(vkd, device, &createInfo);
}
m_memory = bindImageMemory(vki, vkd, physicalDevice, device, *m_srcImage, 0);
{
const vk::Unique<vk::VkBuffer> srcBuffer (createBuffer(vkd, device, 4 * m_imageWidth * m_imageHeight, vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT, vk::VK_SHARING_MODE_EXCLUSIVE, queueFamilies));
const vk::Unique<vk::VkDeviceMemory> memory (bindBufferMemory(vki, vkd, physicalDevice, device, *srcBuffer, vk::VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
const vk::Unique<vk::VkCommandBuffer> commandBuffer (createBeginCommandBuffer(vkd, device, commandPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
const vk::VkImageMemoryBarrier preImageBarrier =
{
vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
DE_NULL,
0,
vk::VK_ACCESS_TRANSFER_WRITE_BIT,
vk::VK_IMAGE_LAYOUT_UNDEFINED,
vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
*m_srcImage,
{
vk::VK_IMAGE_ASPECT_COLOR_BIT,
0, // Mip level
1, // Mip level count
0, // Layer
1 // Layer count
}
};
const vk::VkImageMemoryBarrier postImageBarrier =
{
vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
DE_NULL,
vk::VK_ACCESS_TRANSFER_WRITE_BIT,
0,
vk::VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
*m_srcImage,
{
vk::VK_IMAGE_ASPECT_COLOR_BIT,
0, // Mip level
1, // Mip level count
0, // Layer
1 // Layer count
}
};
const vk::VkBufferImageCopy region =
{
0,
0, 0,
{
vk::VK_IMAGE_ASPECT_COLOR_BIT,
0, // mipLevel
0, // arrayLayer
1 // layerCount
},
{ 0, 0, 0 },
{
(deUint32)m_imageWidth,
(deUint32)m_imageHeight,
1u
}
};