blob: e6b480e93326290b10dd29ba1fa3c028f7595684 [file] [log] [blame]
/*------------------------------------------------------------------------
* Vulkan Conformance Tests
* ------------------------
*
* Copyright (c) 2016 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief Synchronization operation abstraction
*//*--------------------------------------------------------------------*/
#include "vktSynchronizationOperation.hpp"
#include "vkDefs.hpp"
#include "vktTestCase.hpp"
#include "vktTestCaseUtil.hpp"
#include "vkRef.hpp"
#include "vkRefUtil.hpp"
#include "vkMemUtil.hpp"
#include "vkBarrierUtil.hpp"
#include "vkQueryUtil.hpp"
#include "vkTypeUtil.hpp"
#include "vkImageUtil.hpp"
#include "vkBuilderUtil.hpp"
#include "vkCmdUtil.hpp"
#include "vkObjUtil.hpp"
#include "deUniquePtr.hpp"
#include "tcuTestLog.hpp"
#include "tcuTextureUtil.hpp"
#include <vector>
#include <sstream>
namespace vkt
{
namespace synchronization
{
namespace
{
using namespace vk;
enum Constants
{
MAX_IMAGE_DIMENSION_2D = 0x1000u,
MAX_UBO_RANGE = 0x4000u,
MAX_UPDATE_BUFFER_SIZE = 0x10000u,
};
enum BufferType
{
BUFFER_TYPE_UNIFORM,
BUFFER_TYPE_STORAGE,
};
enum AccessMode
{
ACCESS_MODE_READ,
ACCESS_MODE_WRITE,
};
enum PipelineType
{
PIPELINE_TYPE_GRAPHICS,
PIPELINE_TYPE_COMPUTE,
};
static const char* const s_perVertexBlock = "gl_PerVertex {\n"
" vec4 gl_Position;\n"
"}";
static const SyncInfo emptySyncInfo =
{
0, // VkPipelineStageFlags stageMask;
0, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
std::string getShaderStageName(VkShaderStageFlagBits stage)
{
switch (stage)
{
default:
DE_FATAL("Unhandled stage!");
return "";
case VK_SHADER_STAGE_COMPUTE_BIT:
return "compute";
case VK_SHADER_STAGE_FRAGMENT_BIT:
return "fragment";
case VK_SHADER_STAGE_VERTEX_BIT:
return "vertex";
case VK_SHADER_STAGE_GEOMETRY_BIT:
return "geometry";
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
return "tess_control";
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
return "tess_eval";
}
}
//! A pipeline that can be embedded inside an operation.
class Pipeline
{
public:
virtual ~Pipeline (void) {}
virtual void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet) = 0;
};
//! Vertex data that covers the whole viewport with two triangles.
class VertexGrid
{
public:
VertexGrid (OperationContext& context)
: m_vertexFormat (VK_FORMAT_R32G32B32A32_SFLOAT)
, m_vertexStride (tcu::getPixelSize(mapVkFormat(m_vertexFormat)))
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkDevice device = context.getDevice();
Allocator& allocator = context.getAllocator();
// Vertex positions
{
m_vertexData.push_back(tcu::Vec4( 1.0f, 1.0f, 0.0f, 1.0f));
m_vertexData.push_back(tcu::Vec4(-1.0f, 1.0f, 0.0f, 1.0f));
m_vertexData.push_back(tcu::Vec4(-1.0f, -1.0f, 0.0f, 1.0f));
m_vertexData.push_back(tcu::Vec4(-1.0f, -1.0f, 0.0f, 1.0f));
m_vertexData.push_back(tcu::Vec4( 1.0f, -1.0f, 0.0f, 1.0f));
m_vertexData.push_back(tcu::Vec4( 1.0f, 1.0f, 0.0f, 1.0f));
}
{
const VkDeviceSize vertexDataSizeBytes = m_vertexData.size() * sizeof(m_vertexData[0]);
m_vertexBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator, makeBufferCreateInfo(vertexDataSizeBytes, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT), MemoryRequirement::HostVisible));
DE_ASSERT(sizeof(m_vertexData[0]) == m_vertexStride);
{
const Allocation& alloc = m_vertexBuffer->getAllocation();
deMemcpy(alloc.getHostPtr(), &m_vertexData[0], static_cast<std::size_t>(vertexDataSizeBytes));
flushAlloc(vk, device, alloc);
}
}
// Indices
{
const VkDeviceSize indexBufferSizeBytes = sizeof(deUint32) * m_vertexData.size();
const deUint32 numIndices = static_cast<deUint32>(m_vertexData.size());
m_indexBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator, makeBufferCreateInfo(indexBufferSizeBytes, VK_BUFFER_USAGE_INDEX_BUFFER_BIT), MemoryRequirement::HostVisible));
{
const Allocation& alloc = m_indexBuffer->getAllocation();
deUint32* const pData = static_cast<deUint32*>(alloc.getHostPtr());
for (deUint32 i = 0; i < numIndices; ++i)
pData[i] = i;
flushAlloc(vk, device, alloc);
}
}
}
VkFormat getVertexFormat (void) const { return m_vertexFormat; }
deUint32 getVertexStride (void) const { return m_vertexStride; }
VkIndexType getIndexType (void) const { return VK_INDEX_TYPE_UINT32; }
deUint32 getNumVertices (void) const { return static_cast<deUint32>(m_vertexData.size()); }
deUint32 getNumIndices (void) const { return getNumVertices(); }
VkBuffer getVertexBuffer (void) const { return **m_vertexBuffer; }
VkBuffer getIndexBuffer (void) const { return **m_indexBuffer; }
private:
const VkFormat m_vertexFormat;
const deUint32 m_vertexStride;
std::vector<tcu::Vec4> m_vertexData;
de::MovePtr<Buffer> m_vertexBuffer;
de::MovePtr<Buffer> m_indexBuffer;
};
//! Add flags for all shader stages required to support a particular stage (e.g. fragment requires vertex as well).
VkShaderStageFlags getRequiredStages (const VkShaderStageFlagBits stage)
{
VkShaderStageFlags flags = 0;
DE_ASSERT(stage == VK_SHADER_STAGE_COMPUTE_BIT || (stage & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
if (stage & VK_SHADER_STAGE_ALL_GRAPHICS)
flags |= VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
if (stage & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
flags |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
if (stage & VK_SHADER_STAGE_GEOMETRY_BIT)
flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
if (stage & VK_SHADER_STAGE_COMPUTE_BIT)
flags |= VK_SHADER_STAGE_COMPUTE_BIT;
return flags;
}
//! Check that SSBO read/write is available and that all shader stages are supported.
void requireFeaturesForSSBOAccess (OperationContext& context, const VkShaderStageFlags usedStages)
{
const InstanceInterface& vki = context.getInstanceInterface();
const VkPhysicalDevice physDevice = context.getPhysicalDevice();
FeatureFlags flags = (FeatureFlags)0;
if (usedStages & VK_SHADER_STAGE_FRAGMENT_BIT)
flags |= FEATURE_FRAGMENT_STORES_AND_ATOMICS;
if (usedStages & (VK_SHADER_STAGE_ALL_GRAPHICS & (~VK_SHADER_STAGE_FRAGMENT_BIT)))
flags |= FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS;
if (usedStages & VK_SHADER_STAGE_GEOMETRY_BIT)
flags |= FEATURE_GEOMETRY_SHADER;
if (usedStages & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
flags |= FEATURE_TESSELLATION_SHADER;
requireFeatures(vki, physDevice, flags);
}
Data getHostBufferData (const OperationContext& context, const Buffer& hostBuffer, const VkDeviceSize size)
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkDevice device = context.getDevice();
const Allocation& alloc = hostBuffer.getAllocation();
const Data data =
{
static_cast<std::size_t>(size), // std::size_t size;
static_cast<deUint8*>(alloc.getHostPtr()), // const deUint8* data;
};
invalidateAlloc(vk, device, alloc);
return data;
}
void setHostBufferData (const OperationContext& context, const Buffer& hostBuffer, const Data& data)
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkDevice device = context.getDevice();
const Allocation& alloc = hostBuffer.getAllocation();
deMemcpy(alloc.getHostPtr(), data.data, data.size);
flushAlloc(vk, device, alloc);
}
void assertValidShaderStage (const VkShaderStageFlagBits stage)
{
switch (stage)
{
case VK_SHADER_STAGE_VERTEX_BIT:
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
case VK_SHADER_STAGE_GEOMETRY_BIT:
case VK_SHADER_STAGE_FRAGMENT_BIT:
case VK_SHADER_STAGE_COMPUTE_BIT:
// OK
break;
default:
DE_FATAL("Invalid shader stage");
break;
}
}
VkPipelineStageFlags pipelineStageFlagsFromShaderStageFlagBits (const VkShaderStageFlagBits shaderStage)
{
switch (shaderStage)
{
case VK_SHADER_STAGE_VERTEX_BIT: return VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT;
case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT;
case VK_SHADER_STAGE_GEOMETRY_BIT: return VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
case VK_SHADER_STAGE_FRAGMENT_BIT: return VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
case VK_SHADER_STAGE_COMPUTE_BIT: return VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
// Other usages are probably an error, so flag that.
default:
DE_FATAL("Invalid shader stage");
return (VkPipelineStageFlags)0;
}
}
//! Fill destination buffer with a repeating pattern.
void fillPattern (void* const pData, const VkDeviceSize size)
{
static const deUint8 pattern[] = { 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31 };
deUint8* const pBytes = static_cast<deUint8*>(pData);
for (deUint32 i = 0; i < size; ++i)
pBytes[i] = pattern[i % DE_LENGTH_OF_ARRAY(pattern)];
}
//! Get size in bytes of a pixel buffer with given extent.
VkDeviceSize getPixelBufferSize (const VkFormat format, const VkExtent3D& extent)
{
const int pixelSize = tcu::getPixelSize(mapVkFormat(format));
return (pixelSize * extent.width * extent.height * extent.depth);
}
//! Determine the size of a 2D image that can hold sizeBytes data.
VkExtent3D get2DImageExtentWithSize (const VkDeviceSize sizeBytes, const deUint32 pixelSize)
{
const deUint32 size = static_cast<deUint32>(sizeBytes / pixelSize);
DE_ASSERT(size <= MAX_IMAGE_DIMENSION_2D * MAX_IMAGE_DIMENSION_2D);
return makeExtent3D(
std::min(size, static_cast<deUint32>(MAX_IMAGE_DIMENSION_2D)),
(size / MAX_IMAGE_DIMENSION_2D) + (size % MAX_IMAGE_DIMENSION_2D != 0 ? 1u : 0u),
1u);
}
VkClearValue makeClearValue (const VkFormat format)
{
if (isDepthStencilFormat(format))
return makeClearValueDepthStencil(0.4f, 21u);
else
{
if (isIntFormat(format) || isUintFormat(format))
return makeClearValueColorU32(8u, 16u, 24u, 32u);
else
return makeClearValueColorF32(0.25f, 0.49f, 0.75f, 1.0f);
}
}
void clearPixelBuffer (tcu::PixelBufferAccess& pixels, const VkClearValue& clearValue)
{
const tcu::TextureFormat format = pixels.getFormat();
const tcu::TextureChannelClass channelClass = tcu::getTextureChannelClass(format.type);
if (format.order == tcu::TextureFormat::D)
{
for (int z = 0; z < pixels.getDepth(); z++)
for (int y = 0; y < pixels.getHeight(); y++)
for (int x = 0; x < pixels.getWidth(); x++)
pixels.setPixDepth(clearValue.depthStencil.depth, x, y, z);
}
else if (format.order == tcu::TextureFormat::S)
{
for (int z = 0; z < pixels.getDepth(); z++)
for (int y = 0; y < pixels.getHeight(); y++)
for (int x = 0; x < pixels.getWidth(); x++)
pixels.setPixStencil(clearValue.depthStencil.stencil, x, y, z);
}
else if (format.order == tcu::TextureFormat::DS)
{
for (int z = 0; z < pixels.getDepth(); z++)
for (int y = 0; y < pixels.getHeight(); y++)
for (int x = 0; x < pixels.getWidth(); x++)
{
pixels.setPixDepth(clearValue.depthStencil.depth, x, y, z);
pixels.setPixStencil(clearValue.depthStencil.stencil, x, y, z);
}
}
else if (channelClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER || channelClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER)
{
const tcu::UVec4 color (clearValue.color.uint32);
for (int z = 0; z < pixels.getDepth(); z++)
for (int y = 0; y < pixels.getHeight(); y++)
for (int x = 0; x < pixels.getWidth(); x++)
pixels.setPixel(color, x, y, z);
}
else
{
const tcu::Vec4 color (clearValue.color.float32);
for (int z = 0; z < pixels.getDepth(); z++)
for (int y = 0; y < pixels.getHeight(); y++)
for (int x = 0; x < pixels.getWidth(); x++)
pixels.setPixel(color, x, y, z);
}
}
//! Storage image format that requires StorageImageExtendedFormats SPIR-V capability (listed only Vulkan-defined formats).
bool isStorageImageExtendedFormat (const VkFormat format)
{
switch (format)
{
case VK_FORMAT_R32G32_SFLOAT:
case VK_FORMAT_R32G32_SINT:
case VK_FORMAT_R32G32_UINT:
case VK_FORMAT_R16G16B16A16_UNORM:
case VK_FORMAT_R16G16B16A16_SNORM:
case VK_FORMAT_R16G16_SFLOAT:
case VK_FORMAT_R16G16_UNORM:
case VK_FORMAT_R16G16_SNORM:
case VK_FORMAT_R16G16_SINT:
case VK_FORMAT_R16G16_UINT:
case VK_FORMAT_R16_SFLOAT:
case VK_FORMAT_R16_UNORM:
case VK_FORMAT_R16_SNORM:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8_UINT:
case VK_FORMAT_R8_UNORM:
case VK_FORMAT_R8_SNORM:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8_UINT:
return true;
default:
return false;
}
}
VkImageViewType getImageViewType (const VkImageType imageType)
{
switch (imageType)
{
case VK_IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
case VK_IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
case VK_IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
default:
DE_FATAL("Unknown image type");
return VK_IMAGE_VIEW_TYPE_LAST;
}
}
std::string getShaderImageType (const VkFormat format, const VkImageType imageType)
{
const tcu::TextureFormat texFormat = mapVkFormat(format);
const std::string formatPart = tcu::getTextureChannelClass(texFormat.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
tcu::getTextureChannelClass(texFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER ? "i" : "";
switch (imageType)
{
case VK_IMAGE_TYPE_1D: return formatPart + "image1D";
case VK_IMAGE_TYPE_2D: return formatPart + "image2D";
case VK_IMAGE_TYPE_3D: return formatPart + "image3D";
default:
DE_FATAL("Unknown image type");
return DE_NULL;
}
}
std::string getShaderImageFormatQualifier (const VkFormat format)
{
const tcu::TextureFormat texFormat = mapVkFormat(format);
const char* orderPart = DE_NULL;
const char* typePart = DE_NULL;
switch (texFormat.order)
{
case tcu::TextureFormat::R: orderPart = "r"; break;
case tcu::TextureFormat::RG: orderPart = "rg"; break;
case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
default:
DE_FATAL("Unksupported texture channel order");
break;
}
switch (texFormat.type)
{
case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
default:
DE_FATAL("Unksupported texture channel type");
break;
}
return std::string(orderPart) + typePart;
}
namespace FillUpdateBuffer
{
enum BufferOp
{
BUFFER_OP_FILL,
BUFFER_OP_UPDATE,
};
class Implementation : public Operation
{
public:
Implementation (OperationContext& context, Resource& resource, const BufferOp bufferOp)
: m_context (context)
, m_resource (resource)
, m_fillValue (0x13)
, m_bufferOp (bufferOp)
{
DE_ASSERT((m_resource.getBuffer().size % sizeof(deUint32)) == 0);
DE_ASSERT(m_bufferOp == BUFFER_OP_FILL || m_resource.getBuffer().size <= MAX_UPDATE_BUFFER_SIZE);
m_data.resize(static_cast<size_t>(m_resource.getBuffer().size));
if (m_bufferOp == BUFFER_OP_FILL)
{
const std::size_t size = m_data.size() / sizeof(m_fillValue);
deUint32* const pData = reinterpret_cast<deUint32*>(&m_data[0]);
for (deUint32 i = 0; i < size; ++i)
pData[i] = m_fillValue;
}
else if (m_bufferOp == BUFFER_OP_UPDATE)
{
fillPattern(&m_data[0], m_data.size());
}
else
{
// \todo Really??
// Do nothing
}
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
if (m_bufferOp == BUFFER_OP_FILL)
vk.cmdFillBuffer(cmdBuffer, m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size, m_fillValue);
else if (m_bufferOp == BUFFER_OP_UPDATE)
vk.cmdUpdateBuffer(cmdBuffer, m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size, reinterpret_cast<deUint32*>(&m_data[0]));
else
{
// \todo Really??
// Do nothing
}
}
SyncInfo getInSyncInfo (void) const
{
return emptySyncInfo;
}
SyncInfo getOutSyncInfo (void) const
{
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
return syncInfo;
}
Data getData (void) const
{
const Data data =
{
m_data.size(), // std::size_t size;
&m_data[0], // const deUint8* data;
};
return data;
}
void setData (const Data& data)
{
deMemcpy(&m_data[0], data.data, data.size);
}
private:
OperationContext& m_context;
Resource& m_resource;
std::vector<deUint8> m_data;
const deUint32 m_fillValue;
const BufferOp m_bufferOp;
};
class Support : public OperationSupport
{
public:
Support (const ResourceDescription& resourceDesc, const BufferOp bufferOp)
: m_resourceDesc (resourceDesc)
, m_bufferOp (bufferOp)
{
DE_ASSERT(m_bufferOp == BUFFER_OP_FILL || m_bufferOp == BUFFER_OP_UPDATE);
DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_BUFFER);
}
deUint32 getInResourceUsageFlags (void) const
{
return 0;
}
deUint32 getOutResourceUsageFlags (void) const
{
return VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
VkQueueFlags getQueueFlags (const OperationContext& context) const
{
if (m_bufferOp == BUFFER_OP_FILL &&
!isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_KHR_maintenance1"))
{
return VK_QUEUE_COMPUTE_BIT | VK_QUEUE_GRAPHICS_BIT;
}
return VK_QUEUE_TRANSFER_BIT;
}
de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
{
return de::MovePtr<Operation>(new Implementation(context, resource, m_bufferOp));
}
de::MovePtr<Operation> build (OperationContext&, Resource&, Resource&) const
{
DE_ASSERT(0);
return de::MovePtr<Operation>();
}
private:
const ResourceDescription m_resourceDesc;
const BufferOp m_bufferOp;
};
} // FillUpdateBuffer ns
namespace CopyBuffer
{
class Implementation : public Operation
{
public:
Implementation (OperationContext& context, Resource& resource, const AccessMode mode)
: m_context (context)
, m_resource (resource)
, m_mode (mode)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
const VkDevice device = m_context.getDevice();
Allocator& allocator = m_context.getAllocator();
const VkBufferUsageFlags hostBufferUsage = (m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_TRANSFER_DST_BIT : VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
m_hostBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, hostBufferUsage), MemoryRequirement::HostVisible));
const Allocation& alloc = m_hostBuffer->getAllocation();
if (m_mode == ACCESS_MODE_READ)
deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_resource.getBuffer().size));
else
fillPattern(alloc.getHostPtr(), m_resource.getBuffer().size);
flushAlloc(vk, device, alloc);
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
const VkBufferCopy copyRegion = makeBufferCopy(0u, 0u, m_resource.getBuffer().size);
if (m_mode == ACCESS_MODE_READ)
{
vk.cmdCopyBuffer(cmdBuffer, m_resource.getBuffer().handle, **m_hostBuffer, 1u, &copyRegion);
// Insert a barrier so copied data is available to the host
const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_resource.getBuffer().size);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
}
else
{
// // Insert a barrier so buffer data is available to the device
// const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, **m_hostBuffer, 0u, m_resource.getBuffer().size);
// vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
vk.cmdCopyBuffer(cmdBuffer, **m_hostBuffer, m_resource.getBuffer().handle, 1u, &copyRegion);
}
}
SyncInfo getInSyncInfo (void) const
{
const VkAccessFlags access = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_TRANSFER_READ_BIT : 0);
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
access, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
return syncInfo;
}
SyncInfo getOutSyncInfo (void) const
{
const VkAccessFlags access = (m_mode == ACCESS_MODE_WRITE ? VK_ACCESS_TRANSFER_WRITE_BIT : 0);
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
access, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
return syncInfo;
}
Data getData (void) const
{
return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
}
void setData (const Data& data)
{
DE_ASSERT(m_mode == ACCESS_MODE_WRITE);
setHostBufferData(m_context, *m_hostBuffer, data);
}
private:
OperationContext& m_context;
Resource& m_resource;
const AccessMode m_mode;
de::MovePtr<Buffer> m_hostBuffer;
};
class Support : public OperationSupport
{
public:
Support (const ResourceDescription& resourceDesc, const AccessMode mode)
: m_mode (mode)
{
DE_ASSERT(resourceDesc.type == RESOURCE_TYPE_BUFFER);
DE_UNREF(resourceDesc);
}
deUint32 getInResourceUsageFlags (void) const
{
return m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_TRANSFER_SRC_BIT : 0;
}
deUint32 getOutResourceUsageFlags (void) const
{
return m_mode == ACCESS_MODE_WRITE ? VK_BUFFER_USAGE_TRANSFER_DST_BIT : 0;
}
VkQueueFlags getQueueFlags (const OperationContext& context) const
{
DE_UNREF(context);
return VK_QUEUE_TRANSFER_BIT;
}
de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
{
return de::MovePtr<Operation>(new Implementation(context, resource, m_mode));
}
de::MovePtr<Operation> build (OperationContext&, Resource&, Resource&) const
{
DE_ASSERT(0);
return de::MovePtr<Operation>();
}
private:
const AccessMode m_mode;
};
class CopyImplementation : public Operation
{
public:
CopyImplementation (OperationContext& context, Resource& inResource, Resource& outResource)
: m_context (context)
, m_inResource (inResource)
, m_outResource (outResource)
{
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
const VkBufferCopy copyRegion = makeBufferCopy(0u, 0u, m_inResource.getBuffer().size);
vk.cmdCopyBuffer(cmdBuffer, m_inResource.getBuffer().handle, m_outResource.getBuffer().handle, 1u, &copyRegion);
}
SyncInfo getInSyncInfo (void) const
{
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
return syncInfo;
}
SyncInfo getOutSyncInfo (void) const
{
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
return syncInfo;
}
Data getData (void) const
{
Data data = { 0, DE_NULL };
return data;
}
void setData (const Data&)
{
DE_ASSERT(0);
}
private:
OperationContext& m_context;
Resource& m_inResource;
Resource& m_outResource;
de::MovePtr<Buffer> m_hostBuffer;
};
class CopySupport : public OperationSupport
{
public:
CopySupport (const ResourceDescription& resourceDesc)
{
DE_ASSERT(resourceDesc.type == RESOURCE_TYPE_BUFFER);
DE_UNREF(resourceDesc);
}
deUint32 getInResourceUsageFlags (void) const
{
return VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
}
deUint32 getOutResourceUsageFlags (void) const
{
return VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
VkQueueFlags getQueueFlags (const OperationContext& context) const
{
DE_UNREF(context);
return VK_QUEUE_TRANSFER_BIT;
}
de::MovePtr<Operation> build (OperationContext&, Resource&) const
{
DE_ASSERT(0);
return de::MovePtr<Operation>();
}
de::MovePtr<Operation> build (OperationContext& context, Resource& inResource, Resource& outResource) const
{
return de::MovePtr<Operation>(new CopyImplementation(context, inResource, outResource));
}
};
} // CopyBuffer ns
namespace CopyBlitImage
{
class ImplementationBase : public Operation
{
public:
//! Copy/Blit/Resolve etc. operation
virtual void recordCopyCommand (const VkCommandBuffer cmdBuffer) = 0;
ImplementationBase (OperationContext& context, Resource& resource, const AccessMode mode)
: m_context (context)
, m_resource (resource)
, m_mode (mode)
, m_bufferSize (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
{
const DeviceInterface& vk = m_context.getDeviceInterface();
const VkDevice device = m_context.getDevice();
Allocator& allocator = m_context.getAllocator();
m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
vk, device, allocator, makeBufferCreateInfo(m_bufferSize, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
MemoryRequirement::HostVisible));
const Allocation& alloc = m_hostBuffer->getAllocation();
if (m_mode == ACCESS_MODE_READ)
deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_bufferSize));
else
fillPattern(alloc.getHostPtr(), m_bufferSize);
flushAlloc(vk, device, alloc);
// Staging image
m_image = de::MovePtr<Image>(new Image(
vk, device, allocator,
makeImageCreateInfo(m_resource.getImage().imageType, m_resource.getImage().extent, m_resource.getImage().format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT),
MemoryRequirement::Any));
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
const VkBufferImageCopy bufferCopyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
const VkImageMemoryBarrier stagingImageTransferSrcLayoutBarrier = makeImageMemoryBarrier(
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
**m_image, m_resource.getImage().subresourceRange);
// Staging image layout
{
const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
(VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
**m_image, m_resource.getImage().subresourceRange);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
}
if (m_mode == ACCESS_MODE_READ)
{
// Resource Image -> Staging image
recordCopyCommand(cmdBuffer);
// Staging image layout
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &stagingImageTransferSrcLayoutBarrier);
// Image -> Host buffer
vk.cmdCopyImageToBuffer(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, &bufferCopyRegion);
// Insert a barrier so copied data is available to the host
const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_bufferSize);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
}
else
{
// Host buffer -> Staging image
vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &bufferCopyRegion);
// Staging image layout
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &stagingImageTransferSrcLayoutBarrier);
// Resource image layout
{
const VkImageMemoryBarrier layoutBarrier = makeImageMemoryBarrier(
(VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
m_resource.getImage().handle, m_resource.getImage().subresourceRange);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
}
// Staging image -> Resource Image
recordCopyCommand(cmdBuffer);
}
}
SyncInfo getInSyncInfo (void) const
{
const VkAccessFlags access = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_TRANSFER_READ_BIT : VK_ACCESS_TRANSFER_WRITE_BIT);
const VkImageLayout layout = (m_mode == ACCESS_MODE_READ ? VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
access, // VkAccessFlags accessMask;
layout, // VkImageLayout imageLayout;
};
return syncInfo;
}
SyncInfo getOutSyncInfo (void) const
{
const VkAccessFlags access = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_TRANSFER_READ_BIT : VK_ACCESS_TRANSFER_WRITE_BIT);
const VkImageLayout layout = (m_mode == ACCESS_MODE_READ ? VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
access, // VkAccessFlags accessMask;
layout, // VkImageLayout imageLayout;
};
return syncInfo;
}
Data getData (void) const
{
return getHostBufferData(m_context, *m_hostBuffer, m_bufferSize);
}
void setData (const Data& data)
{
DE_ASSERT(m_mode == ACCESS_MODE_WRITE);
setHostBufferData(m_context, *m_hostBuffer, data);
}
protected:
OperationContext& m_context;
Resource& m_resource;
const AccessMode m_mode;
const VkDeviceSize m_bufferSize;
de::MovePtr<Buffer> m_hostBuffer;
de::MovePtr<Image> m_image;
};
VkOffset3D makeExtentOffset (const Resource& resource)
{
DE_ASSERT(resource.getType() == RESOURCE_TYPE_IMAGE);
const VkExtent3D extent = resource.getImage().extent;
switch (resource.getImage().imageType)
{
case VK_IMAGE_TYPE_1D: return makeOffset3D(extent.width, 1, 1);
case VK_IMAGE_TYPE_2D: return makeOffset3D(extent.width, extent.height, 1);
case VK_IMAGE_TYPE_3D: return makeOffset3D(extent.width, extent.height, extent.depth);
default:
DE_ASSERT(0);
return VkOffset3D();
}
}
VkImageBlit makeBlitRegion (const Resource& resource)
{
const VkImageBlit blitRegion =
{
resource.getImage().subresourceLayers, // VkImageSubresourceLayers srcSubresource;
{ makeOffset3D(0, 0, 0), makeExtentOffset(resource) }, // VkOffset3D srcOffsets[2];
resource.getImage().subresourceLayers, // VkImageSubresourceLayers dstSubresource;
{ makeOffset3D(0, 0, 0), makeExtentOffset(resource) }, // VkOffset3D dstOffsets[2];
};
return blitRegion;
}
class BlitImplementation : public ImplementationBase
{
public:
BlitImplementation (OperationContext& context, Resource& resource, const AccessMode mode)
: ImplementationBase (context, resource, mode)
, m_blitRegion (makeBlitRegion(m_resource))
{
const InstanceInterface& vki = m_context.getInstanceInterface();
const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
const VkFormatProperties formatProps = getPhysicalDeviceFormatProperties(vki, physDevice, m_resource.getImage().format);
const VkFormatFeatureFlags requiredFlags = (VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT);
// SRC and DST blit is required because both images are using the same format.
if ((formatProps.optimalTilingFeatures & requiredFlags) != requiredFlags)
TCU_THROW(NotSupportedError, "Format doesn't support blits");
}
void recordCopyCommand (const VkCommandBuffer cmdBuffer)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
if (m_mode == ACCESS_MODE_READ)
{
// Resource Image -> Staging image
vk.cmdBlitImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1u, &m_blitRegion, VK_FILTER_NEAREST);
}
else
{
// Staging image -> Resource Image
vk.cmdBlitImage(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1u, &m_blitRegion, VK_FILTER_NEAREST);
}
}
private:
const VkImageBlit m_blitRegion;
};
VkImageCopy makeImageCopyRegion (const Resource& resource)
{
const VkImageCopy imageCopyRegion =
{
resource.getImage().subresourceLayers, // VkImageSubresourceLayers srcSubresource;
makeOffset3D(0, 0, 0), // VkOffset3D srcOffset;
resource.getImage().subresourceLayers, // VkImageSubresourceLayers dstSubresource;
makeOffset3D(0, 0, 0), // VkOffset3D dstOffset;
resource.getImage().extent, // VkExtent3D extent;
};
return imageCopyRegion;
}
class CopyImplementation : public ImplementationBase
{
public:
CopyImplementation (OperationContext& context, Resource& resource, const AccessMode mode)
: ImplementationBase (context, resource, mode)
, m_imageCopyRegion (makeImageCopyRegion(m_resource))
{
}
void recordCopyCommand (const VkCommandBuffer cmdBuffer)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
if (m_mode == ACCESS_MODE_READ)
{
// Resource Image -> Staging image
vk.cmdCopyImage(cmdBuffer, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &m_imageCopyRegion);
}
else
{
// Staging image -> Resource Image
vk.cmdCopyImage(cmdBuffer, **m_image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_resource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &m_imageCopyRegion);
}
}
private:
const VkImageCopy m_imageCopyRegion;
};
enum Type
{
TYPE_COPY,
TYPE_BLIT,
};
class Support : public OperationSupport
{
public:
Support (const ResourceDescription& resourceDesc, const Type type, const AccessMode mode)
: m_type (type)
, m_mode (mode)
{
DE_ASSERT(resourceDesc.type == RESOURCE_TYPE_IMAGE);
const bool isDepthStencil = isDepthStencilFormat(resourceDesc.imageFormat);
m_requiredQueueFlags = (isDepthStencil || m_type == TYPE_BLIT ? VK_QUEUE_GRAPHICS_BIT : VK_QUEUE_TRANSFER_BIT);
// Don't blit depth/stencil images.
DE_ASSERT(m_type != TYPE_BLIT || !isDepthStencil);
}
deUint32 getInResourceUsageFlags (void) const
{
return (m_mode == ACCESS_MODE_READ ? VK_IMAGE_USAGE_TRANSFER_SRC_BIT : 0);
}
deUint32 getOutResourceUsageFlags (void) const
{
return (m_mode == ACCESS_MODE_WRITE ? VK_IMAGE_USAGE_TRANSFER_DST_BIT : 0);
}
VkQueueFlags getQueueFlags (const OperationContext& context) const
{
DE_UNREF(context);
return m_requiredQueueFlags;
}
de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
{
if (m_type == TYPE_COPY)
return de::MovePtr<Operation>(new CopyImplementation(context, resource, m_mode));
else
return de::MovePtr<Operation>(new BlitImplementation(context, resource, m_mode));
}
de::MovePtr<Operation> build (OperationContext&, Resource&, Resource&) const
{
DE_ASSERT(0);
return de::MovePtr<Operation>();
}
private:
const Type m_type;
const AccessMode m_mode;
VkQueueFlags m_requiredQueueFlags;
};
class BlitCopyImplementation : public Operation
{
public:
BlitCopyImplementation (OperationContext& context, Resource& inResource, Resource& outResource)
: m_context (context)
, m_inResource (inResource)
, m_outResource (outResource)
, m_blitRegion (makeBlitRegion(m_inResource))
{
DE_ASSERT(m_inResource.getType() == RESOURCE_TYPE_IMAGE);
DE_ASSERT(m_outResource.getType() == RESOURCE_TYPE_IMAGE);
const InstanceInterface& vki = m_context.getInstanceInterface();
const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
const VkFormatProperties formatProps = getPhysicalDeviceFormatProperties(vki, physDevice, m_inResource.getImage().format);
const VkFormatFeatureFlags requiredFlags = (VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT);
// SRC and DST blit is required because both images are using the same format.
if ((formatProps.optimalTilingFeatures & requiredFlags) != requiredFlags)
TCU_THROW(NotSupportedError, "Format doesn't support blits");
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
{
const VkImageMemoryBarrier layoutBarrier =
makeImageMemoryBarrier(
(VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
m_outResource.getImage().handle, m_outResource.getImage().subresourceRange);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
}
vk.cmdBlitImage(cmdBuffer,
m_inResource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
m_outResource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1u, &m_blitRegion, VK_FILTER_NEAREST);
}
SyncInfo getInSyncInfo (void) const
{
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout imageLayout;
};
return syncInfo;
}
SyncInfo getOutSyncInfo (void) const
{
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout imageLayout;
};
return syncInfo;
}
Data getData (void) const
{
Data data = { 0, DE_NULL };
return data;
}
void setData (const Data&)
{
DE_ASSERT(0);
}
private:
OperationContext& m_context;
Resource& m_inResource;
Resource& m_outResource;
const VkImageBlit m_blitRegion;
};
class CopyCopyImplementation : public Operation
{
public:
CopyCopyImplementation (OperationContext& context, Resource& inResource, Resource& outResource)
: m_context (context)
, m_inResource (inResource)
, m_outResource (outResource)
, m_imageCopyRegion (makeImageCopyRegion(m_inResource))
{
DE_ASSERT(m_inResource.getType() == RESOURCE_TYPE_IMAGE);
DE_ASSERT(m_outResource.getType() == RESOURCE_TYPE_IMAGE);
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
{
const VkImageMemoryBarrier layoutBarrier =
makeImageMemoryBarrier(
(VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
m_outResource.getImage().handle, m_outResource.getImage().subresourceRange);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &layoutBarrier);
}
vk.cmdCopyImage(cmdBuffer,
m_inResource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
m_outResource.getImage().handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1u, &m_imageCopyRegion);
}
SyncInfo getInSyncInfo (void) const
{
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout imageLayout;
};
return syncInfo;
}
SyncInfo getOutSyncInfo (void) const
{
const SyncInfo syncInfo =
{
VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags stageMask;
VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout imageLayout;
};
return syncInfo;
}
Data getData (void) const
{
Data data = { 0, DE_NULL };
return data;
}
void setData (const Data&)
{
DE_ASSERT(0);
}
private:
OperationContext& m_context;
Resource& m_inResource;
Resource& m_outResource;
const VkImageCopy m_imageCopyRegion;
};
class CopySupport : public OperationSupport
{
public:
CopySupport (const ResourceDescription& resourceDesc, const Type type)
: m_type (type)
{
DE_ASSERT(resourceDesc.type == RESOURCE_TYPE_IMAGE);
const bool isDepthStencil = isDepthStencilFormat(resourceDesc.imageFormat);
m_requiredQueueFlags = (isDepthStencil || m_type == TYPE_BLIT ? VK_QUEUE_GRAPHICS_BIT : VK_QUEUE_TRANSFER_BIT);
// Don't blit depth/stencil images.
DE_ASSERT(m_type != TYPE_BLIT || !isDepthStencil);
}
deUint32 getInResourceUsageFlags (void) const
{
return VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
}
deUint32 getOutResourceUsageFlags (void) const
{
return VK_IMAGE_USAGE_TRANSFER_DST_BIT;
}
VkQueueFlags getQueueFlags (const OperationContext& context) const
{
DE_UNREF(context);
return m_requiredQueueFlags;
}
de::MovePtr<Operation> build (OperationContext&, Resource&) const
{
DE_ASSERT(0);
return de::MovePtr<Operation>();
}
de::MovePtr<Operation> build (OperationContext& context, Resource& inResource, Resource& outResource) const
{
if (m_type == TYPE_COPY)
return de::MovePtr<Operation>(new CopyCopyImplementation(context, inResource, outResource));
else
return de::MovePtr<Operation>(new BlitCopyImplementation(context, inResource, outResource));
}
private:
const Type m_type;
VkQueueFlags m_requiredQueueFlags;
};
} // CopyBlitImage ns
namespace ShaderAccess
{
enum DispatchCall
{
DISPATCH_CALL_DISPATCH,
DISPATCH_CALL_DISPATCH_INDIRECT,
};
class GraphicsPipeline : public Pipeline
{
public:
GraphicsPipeline (OperationContext& context, const VkShaderStageFlagBits stage, const std::string& shaderPrefix, const VkDescriptorSetLayout descriptorSetLayout)
: m_vertices (context)
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkDevice device = context.getDevice();
Allocator& allocator = context.getAllocator();
const VkShaderStageFlags requiredStages = getRequiredStages(stage);
// Color attachment
m_colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
m_colorImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
m_colorImageExtent = makeExtent3D(16u, 16u, 1u);
m_colorAttachmentImage = de::MovePtr<Image>(new Image(vk, device, allocator,
makeImageCreateInfo(VK_IMAGE_TYPE_2D, m_colorImageExtent, m_colorFormat, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT),
MemoryRequirement::Any));
// Pipeline
m_colorAttachmentView = makeImageView (vk, device, **m_colorAttachmentImage, VK_IMAGE_VIEW_TYPE_2D, m_colorFormat, m_colorImageSubresourceRange);
m_renderPass = makeRenderPass (vk, device, m_colorFormat);
m_framebuffer = makeFramebuffer (vk, device, *m_renderPass, *m_colorAttachmentView, m_colorImageExtent.width, m_colorImageExtent.height);
m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
GraphicsPipelineBuilder pipelineBuilder;
pipelineBuilder
.setRenderSize (tcu::IVec2(m_colorImageExtent.width, m_colorImageExtent.height))
.setVertexInputSingleAttribute (m_vertices.getVertexFormat(), m_vertices.getVertexStride())
.setShader (vk, device, VK_SHADER_STAGE_VERTEX_BIT, context.getBinaryCollection().get(shaderPrefix + "vert"), DE_NULL)
.setShader (vk, device, VK_SHADER_STAGE_FRAGMENT_BIT, context.getBinaryCollection().get(shaderPrefix + "frag"), DE_NULL);
if (requiredStages & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))
pipelineBuilder
.setPatchControlPoints (m_vertices.getNumVertices())
.setShader (vk, device, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, context.getBinaryCollection().get(shaderPrefix + "tesc"), DE_NULL)
.setShader (vk, device, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, context.getBinaryCollection().get(shaderPrefix + "tese"), DE_NULL);
if (requiredStages & VK_SHADER_STAGE_GEOMETRY_BIT)
pipelineBuilder
.setShader (vk, device, VK_SHADER_STAGE_GEOMETRY_BIT, context.getBinaryCollection().get(shaderPrefix + "geom"), DE_NULL);
m_pipeline = pipelineBuilder.build(vk, device, *m_pipelineLayout, *m_renderPass, context.getPipelineCacheData());
}
void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
{
const DeviceInterface& vk = context.getDeviceInterface();
// Change color attachment image layout
{
const VkImageMemoryBarrier colorAttachmentLayoutBarrier = makeImageMemoryBarrier(
(VkAccessFlags)0, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
**m_colorAttachmentImage, m_colorImageSubresourceRange);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &colorAttachmentLayoutBarrier);
}
{
const VkRect2D renderArea = makeRect2D(m_colorImageExtent);
const tcu::Vec4 clearColor = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
beginRenderPass(vk, cmdBuffer, *m_renderPass, *m_framebuffer, renderArea, clearColor);
}
vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
{
const VkDeviceSize vertexBufferOffset = 0ull;
const VkBuffer vertexBuffer = m_vertices.getVertexBuffer();
vk.cmdBindVertexBuffers(cmdBuffer, 0u, 1u, &vertexBuffer, &vertexBufferOffset);
}
vk.cmdDraw(cmdBuffer, m_vertices.getNumVertices(), 1u, 0u, 0u);
endRenderPass(vk, cmdBuffer);
}
private:
const VertexGrid m_vertices;
VkFormat m_colorFormat;
de::MovePtr<Image> m_colorAttachmentImage;
Move<VkImageView> m_colorAttachmentView;
VkExtent3D m_colorImageExtent;
VkImageSubresourceRange m_colorImageSubresourceRange;
Move<VkRenderPass> m_renderPass;
Move<VkFramebuffer> m_framebuffer;
Move<VkPipelineLayout> m_pipelineLayout;
Move<VkPipeline> m_pipeline;
};
class ComputePipeline : public Pipeline
{
public:
ComputePipeline (OperationContext& context, const DispatchCall dispatchCall, const std::string& shaderPrefix, const VkDescriptorSetLayout descriptorSetLayout)
: m_dispatchCall (dispatchCall)
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkDevice device = context.getDevice();
Allocator& allocator = context.getAllocator();
if (m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT)
{
m_indirectBuffer = de::MovePtr<Buffer>(new Buffer(vk, device, allocator,
makeBufferCreateInfo(sizeof(VkDispatchIndirectCommand), VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT), MemoryRequirement::HostVisible));
const Allocation& alloc = m_indirectBuffer->getAllocation();
VkDispatchIndirectCommand* const pIndirectCommand = static_cast<VkDispatchIndirectCommand*>(alloc.getHostPtr());
pIndirectCommand->x = 1u;
pIndirectCommand->y = 1u;
pIndirectCommand->z = 1u;
flushAlloc(vk, device, alloc);
}
const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, context.getBinaryCollection().get(shaderPrefix + "comp"), (VkShaderModuleCreateFlags)0));
m_pipelineLayout = makePipelineLayout(vk, device, descriptorSetLayout);
m_pipeline = makeComputePipeline(vk, device, *m_pipelineLayout, *shaderModule, DE_NULL, context.getPipelineCacheData());
}
void recordCommands (OperationContext& context, const VkCommandBuffer cmdBuffer, const VkDescriptorSet descriptorSet)
{
const DeviceInterface& vk = context.getDeviceInterface();
vk.cmdBindPipeline(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipeline);
vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *m_pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
if (m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT)
vk.cmdDispatchIndirect(cmdBuffer, **m_indirectBuffer, 0u);
else
vk.cmdDispatch(cmdBuffer, 1u, 1u, 1u);
}
private:
const DispatchCall m_dispatchCall;
de::MovePtr<Buffer> m_indirectBuffer;
Move<VkPipelineLayout> m_pipelineLayout;
Move<VkPipeline> m_pipeline;
};
//! Read/write operation on a UBO/SSBO in graphics/compute pipeline.
class BufferImplementation : public Operation
{
public:
BufferImplementation (OperationContext& context,
Resource& resource,
const VkShaderStageFlagBits stage,
const BufferType bufferType,
const std::string& shaderPrefix,
const AccessMode mode,
const PipelineType pipelineType,
const DispatchCall dispatchCall)
: m_context (context)
, m_resource (resource)
, m_stage (stage)
, m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
, m_bufferType (bufferType)
, m_mode (mode)
, m_dispatchCall (dispatchCall)
{
requireFeaturesForSSBOAccess (m_context, m_stage);
const DeviceInterface& vk = m_context.getDeviceInterface();
const VkDevice device = m_context.getDevice();
Allocator& allocator = m_context.getAllocator();
m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
vk, device, allocator, makeBufferCreateInfo(m_resource.getBuffer().size, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible));
// Init host buffer data
{
const Allocation& alloc = m_hostBuffer->getAllocation();
if (m_mode == ACCESS_MODE_READ)
deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_resource.getBuffer().size));
else
fillPattern(alloc.getHostPtr(), m_resource.getBuffer().size);
flushAlloc(vk, device, alloc);
}
// Prepare descriptors
{
const VkDescriptorType bufferDescriptorType = (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
m_descriptorSetLayout = DescriptorSetLayoutBuilder()
.addSingleBinding(bufferDescriptorType, m_stage)
.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_stage)
.build(vk, device);
m_descriptorPool = DescriptorPoolBuilder()
.addType(bufferDescriptorType)
.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
const VkDescriptorBufferInfo bufferInfo = makeDescriptorBufferInfo(m_resource.getBuffer().handle, m_resource.getBuffer().offset, m_resource.getBuffer().size);
const VkDescriptorBufferInfo hostBufferInfo = makeDescriptorBufferInfo(**m_hostBuffer, 0u, m_resource.getBuffer().size);
if (m_mode == ACCESS_MODE_READ)
{
DescriptorSetUpdateBuilder()
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), bufferDescriptorType, &bufferInfo)
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &hostBufferInfo)
.update(vk, device);
}
else
{
DescriptorSetUpdateBuilder()
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &hostBufferInfo)
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferInfo)
.update(vk, device);
}
}
// Create pipeline
m_pipeline = (pipelineType == PIPELINE_TYPE_GRAPHICS ? de::MovePtr<Pipeline>(new GraphicsPipeline(context, stage, shaderPrefix, *m_descriptorSetLayout))
: de::MovePtr<Pipeline>(new ComputePipeline(context, m_dispatchCall, shaderPrefix, *m_descriptorSetLayout)));
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
m_pipeline->recordCommands(m_context, cmdBuffer, *m_descriptorSet);
// Post draw/dispatch commands
if (m_mode == ACCESS_MODE_READ)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
// Insert a barrier so data written by the shader is available to the host
const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_resource.getBuffer().size);
vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
}
}
SyncInfo getInSyncInfo (void) const
{
const VkAccessFlags accessFlags = (m_mode == ACCESS_MODE_READ ? (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_ACCESS_UNIFORM_READ_BIT
: VK_ACCESS_SHADER_READ_BIT)
: VK_ACCESS_SHADER_WRITE_BIT);
const SyncInfo syncInfo =
{
m_pipelineStage, // VkPipelineStageFlags stageMask;
accessFlags, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
return syncInfo;
}
SyncInfo getOutSyncInfo (void) const
{
const VkAccessFlags accessFlags = m_mode == ACCESS_MODE_WRITE ? VK_ACCESS_SHADER_WRITE_BIT : 0;
const SyncInfo syncInfo =
{
m_pipelineStage, // VkPipelineStageFlags stageMask;
accessFlags, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
return syncInfo;
}
Data getData (void) const
{
return getHostBufferData(m_context, *m_hostBuffer, m_resource.getBuffer().size);
}
void setData (const Data& data)
{
DE_ASSERT(m_mode == ACCESS_MODE_WRITE);
setHostBufferData(m_context, *m_hostBuffer, data);
}
private:
OperationContext& m_context;
Resource& m_resource;
const VkShaderStageFlagBits m_stage;
const VkPipelineStageFlags m_pipelineStage;
const BufferType m_bufferType;
const AccessMode m_mode;
const DispatchCall m_dispatchCall;
de::MovePtr<Buffer> m_hostBuffer;
Move<VkDescriptorPool> m_descriptorPool;
Move<VkDescriptorSetLayout> m_descriptorSetLayout;
Move<VkDescriptorSet> m_descriptorSet;
de::MovePtr<Pipeline> m_pipeline;
};
class ImageImplementation : public Operation
{
public:
ImageImplementation (OperationContext& context,
Resource& resource,
const VkShaderStageFlagBits stage,
const std::string& shaderPrefix,
const AccessMode mode,
const PipelineType pipelineType,
const DispatchCall dispatchCall)
: m_context (context)
, m_resource (resource)
, m_stage (stage)
, m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
, m_mode (mode)
, m_dispatchCall (dispatchCall)
, m_hostBufferSizeBytes (getPixelBufferSize(m_resource.getImage().format, m_resource.getImage().extent))
{
const DeviceInterface& vk = m_context.getDeviceInterface();
const InstanceInterface& vki = m_context.getInstanceInterface();
const VkDevice device = m_context.getDevice();
const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
Allocator& allocator = m_context.getAllocator();
// Image stores are always required, in either access mode.
requireFeaturesForSSBOAccess(m_context, m_stage);
// Some storage image formats require additional capability.
if (isStorageImageExtendedFormat(m_resource.getImage().format))
requireFeatures(vki, physDevice, FEATURE_SHADER_STORAGE_IMAGE_EXTENDED_FORMATS);
m_hostBuffer = de::MovePtr<Buffer>(new Buffer(
vk, device, allocator, makeBufferCreateInfo(m_hostBufferSizeBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
MemoryRequirement::HostVisible));
// Init host buffer data
{
const Allocation& alloc = m_hostBuffer->getAllocation();
if (m_mode == ACCESS_MODE_READ)
deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(m_hostBufferSizeBytes));
else
fillPattern(alloc.getHostPtr(), m_hostBufferSizeBytes);
flushAlloc(vk, device, alloc);
}
// Image resources
{
m_image = de::MovePtr<Image>(new Image(vk, device, allocator,
makeImageCreateInfo(m_resource.getImage().imageType, m_resource.getImage().extent, m_resource.getImage().format,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_STORAGE_BIT),
MemoryRequirement::Any));
if (m_mode == ACCESS_MODE_READ)
{
m_srcImage = &m_resource.getImage().handle;
m_dstImage = &(**m_image);
}
else
{
m_srcImage = &(**m_image);
m_dstImage = &m_resource.getImage().handle;
}
const VkImageViewType viewType = getImageViewType(m_resource.getImage().imageType);
m_srcImageView = makeImageView(vk, device, *m_srcImage, viewType, m_resource.getImage().format, m_resource.getImage().subresourceRange);
m_dstImageView = makeImageView(vk, device, *m_dstImage, viewType, m_resource.getImage().format, m_resource.getImage().subresourceRange);
}
// Prepare descriptors
{
m_descriptorSetLayout = DescriptorSetLayoutBuilder()
.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_stage)
.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_stage)
.build(vk, device);
m_descriptorPool = DescriptorPoolBuilder()
.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
const VkDescriptorImageInfo srcImageInfo = makeDescriptorImageInfo(DE_NULL, *m_srcImageView, VK_IMAGE_LAYOUT_GENERAL);
const VkDescriptorImageInfo dstImageInfo = makeDescriptorImageInfo(DE_NULL, *m_dstImageView, VK_IMAGE_LAYOUT_GENERAL);
DescriptorSetUpdateBuilder()
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &srcImageInfo)
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &dstImageInfo)
.update(vk, device);
}
// Create pipeline
m_pipeline = (pipelineType == PIPELINE_TYPE_GRAPHICS ? de::MovePtr<Pipeline>(new GraphicsPipeline(context, stage, shaderPrefix, *m_descriptorSetLayout))
: de::MovePtr<Pipeline>(new ComputePipeline(context, m_dispatchCall, shaderPrefix, *m_descriptorSetLayout)));
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
const VkBufferImageCopy bufferCopyRegion = makeBufferImageCopy(m_resource.getImage().subresourceLayers, m_resource.getImage().extent);
// Destination image layout
{
const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
(VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
*m_dstImage, m_resource.getImage().subresourceRange);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, m_pipelineStage, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
}
// In write mode, source image must be filled with data.
if (m_mode == ACCESS_MODE_WRITE)
{
// Layout for transfer
{
const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
(VkAccessFlags)0, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
*m_srcImage, m_resource.getImage().subresourceRange);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
}
// Host buffer -> Src image
vk.cmdCopyBufferToImage(cmdBuffer, **m_hostBuffer, *m_srcImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, &bufferCopyRegion);
// Layout for shader reading
{
const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
*m_srcImage, m_resource.getImage().subresourceRange);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_pipelineStage, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
}
}
// Execute shaders
m_pipeline->recordCommands(m_context, cmdBuffer, *m_descriptorSet);
// Post draw/dispatch commands
if (m_mode == ACCESS_MODE_READ)
{
// Layout for transfer
{
const VkImageMemoryBarrier barrier = makeImageMemoryBarrier(
VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
*m_dstImage, m_resource.getImage().subresourceRange);
vk.cmdPipelineBarrier(cmdBuffer, m_pipelineStage, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, 1u, &barrier);
}
// Dst image -> Host buffer
vk.cmdCopyImageToBuffer(cmdBuffer, *m_dstImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **m_hostBuffer, 1u, &bufferCopyRegion);
// Insert a barrier so data written by the shader is available to the host
{
const VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **m_hostBuffer, 0u, m_hostBufferSizeBytes);
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0u, DE_NULL, 1u, &barrier, 0u, DE_NULL);
}
}
}
SyncInfo getInSyncInfo (void) const
{
const VkAccessFlags accessFlags = (m_mode == ACCESS_MODE_READ ? VK_ACCESS_SHADER_READ_BIT : 0);
const SyncInfo syncInfo =
{
m_pipelineStage, // VkPipelineStageFlags stageMask;
accessFlags, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout imageLayout;
};
return syncInfo;
}
SyncInfo getOutSyncInfo (void) const
{
const VkAccessFlags accessFlags = (m_mode == ACCESS_MODE_WRITE ? VK_ACCESS_SHADER_WRITE_BIT : 0);
const SyncInfo syncInfo =
{
m_pipelineStage, // VkPipelineStageFlags stageMask;
accessFlags, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout imageLayout;
};
return syncInfo;
}
Data getData (void) const
{
return getHostBufferData(m_context, *m_hostBuffer, m_hostBufferSizeBytes);
}
void setData (const Data& data)
{
DE_ASSERT(m_mode == ACCESS_MODE_WRITE);
setHostBufferData(m_context, *m_hostBuffer, data);
}
private:
OperationContext& m_context;
Resource& m_resource;
const VkShaderStageFlagBits m_stage;
const VkPipelineStageFlags m_pipelineStage;
const AccessMode m_mode;
const DispatchCall m_dispatchCall;
const VkDeviceSize m_hostBufferSizeBytes;
de::MovePtr<Buffer> m_hostBuffer;
de::MovePtr<Image> m_image; //! Additional image used as src or dst depending on operation mode.
const VkImage* m_srcImage;
const VkImage* m_dstImage;
Move<VkImageView> m_srcImageView;
Move<VkImageView> m_dstImageView;
Move<VkDescriptorPool> m_descriptorPool;
Move<VkDescriptorSetLayout> m_descriptorSetLayout;
Move<VkDescriptorSet> m_descriptorSet;
de::MovePtr<Pipeline> m_pipeline;
};
//! Create generic passthrough shaders with bits of custom code inserted in a specific shader stage.
void initPassthroughPrograms (SourceCollections& programCollection,
const std::string& shaderPrefix,
const std::string& declCode,
const std::string& mainCode,
const VkShaderStageFlagBits stage)
{
const VkShaderStageFlags requiredStages = getRequiredStages(stage);
if (requiredStages & VK_SHADER_STAGE_VERTEX_BIT)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
<< "\n"
<< "layout(location = 0) in vec4 v_in_position;\n"
<< "\n"
<< "out " << s_perVertexBlock << ";\n"
<< "\n"
<< (stage & VK_SHADER_STAGE_VERTEX_BIT ? declCode + "\n" : "")
<< "void main (void)\n"
<< "{\n"
<< " gl_Position = v_in_position;\n"
<< (stage & VK_SHADER_STAGE_VERTEX_BIT ? mainCode : "")
<< "}\n";
if (!programCollection.glslSources.contains(shaderPrefix + "vert"))
programCollection.glslSources.add(shaderPrefix + "vert") << glu::VertexSource(src.str());
}
if (requiredStages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
<< "\n"
<< "layout(vertices = 3) out;\n"
<< "\n"
<< "in " << s_perVertexBlock << " gl_in[gl_MaxPatchVertices];\n"
<< "\n"
<< "out " << s_perVertexBlock << " gl_out[];\n"
<< "\n"
<< (stage & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ? declCode + "\n" : "")
<< "void main (void)\n"
<< "{\n"
<< " gl_TessLevelInner[0] = 1.0;\n"
<< " gl_TessLevelInner[1] = 1.0;\n"
<< "\n"
<< " gl_TessLevelOuter[0] = 1.0;\n"
<< " gl_TessLevelOuter[1] = 1.0;\n"
<< " gl_TessLevelOuter[2] = 1.0;\n"
<< " gl_TessLevelOuter[3] = 1.0;\n"
<< "\n"
<< " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
<< (stage & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT ? "\n" + mainCode : "")
<< "}\n";
if (!programCollection.glslSources.contains(shaderPrefix + "tesc"))
programCollection.glslSources.add(shaderPrefix + "tesc") << glu::TessellationControlSource(src.str());
}
if (requiredStages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
<< "\n"
<< "layout(triangles, equal_spacing, ccw) in;\n"
<< "\n"
<< "in " << s_perVertexBlock << " gl_in[gl_MaxPatchVertices];\n"
<< "\n"
<< "out " << s_perVertexBlock << ";\n"
<< "\n"
<< (stage & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT ? declCode + "\n" : "")
<< "void main (void)\n"
<< "{\n"
<< " vec3 px = gl_TessCoord.x * gl_in[0].gl_Position.xyz;\n"
<< " vec3 py = gl_TessCoord.y * gl_in[1].gl_Position.xyz;\n"
<< " vec3 pz = gl_TessCoord.z * gl_in[2].gl_Position.xyz;\n"
<< " gl_Position = vec4(px + py + pz, 1.0);\n"
<< (stage & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT ? mainCode : "")
<< "}\n";
if (!programCollection.glslSources.contains(shaderPrefix + "tese"))
programCollection.glslSources.add(shaderPrefix + "tese") << glu::TessellationEvaluationSource(src.str());
}
if (requiredStages & VK_SHADER_STAGE_GEOMETRY_BIT)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
<< "\n"
<< "layout(triangles) in;\n"
<< "layout(triangle_strip, max_vertices = 3) out;\n"
<< "\n"
<< "in " << s_perVertexBlock << " gl_in[];\n"
<< "\n"
<< "out " << s_perVertexBlock << ";\n"
<< "\n"
<< (stage & VK_SHADER_STAGE_GEOMETRY_BIT ? declCode + "\n" : "")
<< "void main (void)\n"
<< "{\n"
<< " gl_Position = gl_in[0].gl_Position;\n"
<< " EmitVertex();\n"
<< "\n"
<< " gl_Position = gl_in[1].gl_Position;\n"
<< " EmitVertex();\n"
<< "\n"
<< " gl_Position = gl_in[2].gl_Position;\n"
<< " EmitVertex();\n"
<< (stage & VK_SHADER_STAGE_GEOMETRY_BIT ? "\n" + mainCode : "")
<< "}\n";
if (!programCollection.glslSources.contains(shaderPrefix + "geom"))
programCollection.glslSources.add(shaderPrefix + "geom") << glu::GeometrySource(src.str());
}
if (requiredStages & VK_SHADER_STAGE_FRAGMENT_BIT)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
<< "\n"
<< "layout(location = 0) out vec4 o_color;\n"
<< "\n"
<< (stage & VK_SHADER_STAGE_FRAGMENT_BIT ? declCode + "\n" : "")
<< "void main (void)\n"
<< "{\n"
<< " o_color = vec4(1.0);\n"
<< (stage & VK_SHADER_STAGE_FRAGMENT_BIT ? "\n" + mainCode : "")
<< "}\n";
if (!programCollection.glslSources.contains(shaderPrefix + "frag"))
programCollection.glslSources.add(shaderPrefix + "frag") << glu::FragmentSource(src.str());
}
if (requiredStages & VK_SHADER_STAGE_COMPUTE_BIT)
{
std::ostringstream src;
src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
<< "\n"
<< "layout(local_size_x = 1) in;\n"
<< "\n"
<< (stage & VK_SHADER_STAGE_COMPUTE_BIT ? declCode + "\n" : "")
<< "void main (void)\n"
<< "{\n"
<< (stage & VK_SHADER_STAGE_COMPUTE_BIT ? mainCode : "")
<< "}\n";
if (!programCollection.glslSources.contains(shaderPrefix + "comp"))
programCollection.glslSources.add(shaderPrefix + "comp") << glu::ComputeSource(src.str());
}
}
class BufferSupport : public OperationSupport
{
public:
BufferSupport (const ResourceDescription& resourceDesc,
const BufferType bufferType,
const AccessMode mode,
const VkShaderStageFlagBits stage,
const DispatchCall dispatchCall = DISPATCH_CALL_DISPATCH)
: m_resourceDesc (resourceDesc)
, m_bufferType (bufferType)
, m_mode (mode)
, m_stage (stage)
, m_shaderPrefix (std::string(m_mode == ACCESS_MODE_READ ? "read_" : "write_") + (m_bufferType == BUFFER_TYPE_UNIFORM ? "ubo_" : "ssbo_"))
, m_dispatchCall (dispatchCall)
{
DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_BUFFER);
DE_ASSERT(m_bufferType == BUFFER_TYPE_UNIFORM || m_bufferType == BUFFER_TYPE_STORAGE);
DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
DE_ASSERT(m_mode == ACCESS_MODE_READ || m_bufferType == BUFFER_TYPE_STORAGE);
DE_ASSERT(m_bufferType != BUFFER_TYPE_UNIFORM || m_resourceDesc.size.x() <= MAX_UBO_RANGE);
DE_ASSERT(m_dispatchCall == DISPATCH_CALL_DISPATCH || m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT);
assertValidShaderStage(m_stage);
}
void initPrograms (SourceCollections& programCollection) const
{
DE_ASSERT((m_resourceDesc.size.x() % sizeof(tcu::UVec4)) == 0);
const std::string bufferTypeStr = (m_bufferType == BUFFER_TYPE_UNIFORM ? "uniform" : "buffer");
const int numVecElements = static_cast<int>(m_resourceDesc.size.x() / sizeof(tcu::UVec4)); // std140 must be aligned to a multiple of 16
std::ostringstream declSrc;
declSrc << "layout(set = 0, binding = 0, std140) readonly " << bufferTypeStr << " Input {\n"
<< " uvec4 data[" << numVecElements << "];\n"
<< "} b_in;\n"
<< "\n"
<< "layout(set = 0, binding = 1, std140) writeonly buffer Output {\n"
<< " uvec4 data[" << numVecElements << "];\n"
<< "} b_out;\n";
std::ostringstream copySrc;
copySrc << " for (int i = 0; i < " << numVecElements << "; ++i) {\n"
<< " b_out.data[i] = b_in.data[i];\n"
<< " }\n";
initPassthroughPrograms(programCollection, m_shaderPrefix, declSrc.str(), copySrc.str(), m_stage);
}
deUint32 getInResourceUsageFlags (void) const
{
if (m_bufferType == BUFFER_TYPE_UNIFORM)
return m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : 0;
else
return m_mode == ACCESS_MODE_READ ? VK_BUFFER_USAGE_STORAGE_BUFFER_BIT : 0;
}
deUint32 getOutResourceUsageFlags (void) const
{
if (m_bufferType == BUFFER_TYPE_UNIFORM)
return m_mode == ACCESS_MODE_WRITE ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : 0;
else
return m_mode == ACCESS_MODE_WRITE ? VK_BUFFER_USAGE_STORAGE_BUFFER_BIT : 0;
}
VkQueueFlags getQueueFlags (const OperationContext& context) const
{
DE_UNREF(context);
return (m_stage == VK_SHADER_STAGE_COMPUTE_BIT ? VK_QUEUE_COMPUTE_BIT : VK_QUEUE_GRAPHICS_BIT);
}
de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
{
if (m_stage & VK_SHADER_STAGE_COMPUTE_BIT)
return de::MovePtr<Operation>(new BufferImplementation(context, resource, m_stage, m_bufferType, m_shaderPrefix, m_mode, PIPELINE_TYPE_COMPUTE, m_dispatchCall));
else
return de::MovePtr<Operation>(new BufferImplementation(context, resource, m_stage, m_bufferType, m_shaderPrefix, m_mode, PIPELINE_TYPE_GRAPHICS, m_dispatchCall));
}
de::MovePtr<Operation> build (OperationContext&, Resource&, Resource&) const
{
DE_ASSERT(0);
return de::MovePtr<Operation>();
}
private:
const ResourceDescription m_resourceDesc;
const BufferType m_bufferType;
const AccessMode m_mode;
const VkShaderStageFlagBits m_stage;
const std::string m_shaderPrefix;
const DispatchCall m_dispatchCall;
};
class ImageSupport : public OperationSupport
{
public:
ImageSupport (const ResourceDescription& resourceDesc,
const AccessMode mode,
const VkShaderStageFlagBits stage,
const DispatchCall dispatchCall = DISPATCH_CALL_DISPATCH)
: m_resourceDesc (resourceDesc)
, m_mode (mode)
, m_stage (stage)
, m_shaderPrefix (m_mode == ACCESS_MODE_READ ? "read_image_" : "write_image_")
, m_dispatchCall (dispatchCall)
{
DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_IMAGE);
DE_ASSERT(m_mode == ACCESS_MODE_READ || m_mode == ACCESS_MODE_WRITE);
DE_ASSERT(m_dispatchCall == DISPATCH_CALL_DISPATCH || m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT);
assertValidShaderStage(m_stage);
}
void initPrograms (SourceCollections& programCollection) const
{
const std::string imageFormat = getShaderImageFormatQualifier(m_resourceDesc.imageFormat);
const std::string imageType = getShaderImageType(m_resourceDesc.imageFormat, m_resourceDesc.imageType);
std::ostringstream declSrc;
declSrc << "layout(set = 0, binding = 0, " << imageFormat << ") readonly uniform " << imageType << " srcImg;\n"
<< "layout(set = 0, binding = 1, " << imageFormat << ") writeonly uniform " << imageType << " dstImg;\n";
std::ostringstream mainSrc;
if (m_resourceDesc.imageType == VK_IMAGE_TYPE_1D)
mainSrc << " for (int x = 0; x < " << m_resourceDesc.size.x() << "; ++x)\n"
<< " imageStore(dstImg, x, imageLoad(srcImg, x));\n";
else if (m_resourceDesc.imageType == VK_IMAGE_TYPE_2D)
mainSrc << " for (int y = 0; y < " << m_resourceDesc.size.y() << "; ++y)\n"
<< " for (int x = 0; x < " << m_resourceDesc.size.x() << "; ++x)\n"
<< " imageStore(dstImg, ivec2(x, y), imageLoad(srcImg, ivec2(x, y)));\n";
else if (m_resourceDesc.imageType == VK_IMAGE_TYPE_3D)
mainSrc << " for (int z = 0; z < " << m_resourceDesc.size.z() << "; ++z)\n"
<< " for (int y = 0; y < " << m_resourceDesc.size.y() << "; ++y)\n"
<< " for (int x = 0; x < " << m_resourceDesc.size.x() << "; ++x)\n"
<< " imageStore(dstImg, ivec3(x, y, z), imageLoad(srcImg, ivec3(x, y, z)));\n";
else
DE_ASSERT(0);
initPassthroughPrograms(programCollection, m_shaderPrefix, declSrc.str(), mainSrc.str(), m_stage);
}
deUint32 getInResourceUsageFlags (void) const
{
return VK_IMAGE_USAGE_STORAGE_BIT;
}
deUint32 getOutResourceUsageFlags (void) const
{
return VK_IMAGE_USAGE_STORAGE_BIT;
}
VkQueueFlags getQueueFlags (const OperationContext& context) const
{
DE_UNREF(context);
return (m_stage == VK_SHADER_STAGE_COMPUTE_BIT ? VK_QUEUE_COMPUTE_BIT : VK_QUEUE_GRAPHICS_BIT);
}
de::MovePtr<Operation> build (OperationContext& context, Resource& resource) const
{
if (m_stage & VK_SHADER_STAGE_COMPUTE_BIT)
return de::MovePtr<Operation>(new ImageImplementation(context, resource, m_stage, m_shaderPrefix, m_mode, PIPELINE_TYPE_COMPUTE, m_dispatchCall));
else
return de::MovePtr<Operation>(new ImageImplementation(context, resource, m_stage, m_shaderPrefix, m_mode, PIPELINE_TYPE_GRAPHICS, m_dispatchCall));
}
de::MovePtr<Operation> build (OperationContext&, Resource&, Resource&) const
{
DE_ASSERT(0);
return de::MovePtr<Operation>();
}
private:
const ResourceDescription m_resourceDesc;
const AccessMode m_mode;
const VkShaderStageFlagBits m_stage;
const std::string m_shaderPrefix;
const DispatchCall m_dispatchCall;
};
//! Copy operation on a UBO/SSBO in graphics/compute pipeline.
class BufferCopyImplementation : public Operation
{
public:
BufferCopyImplementation (OperationContext& context,
Resource& inResource,
Resource& outResource,
const VkShaderStageFlagBits stage,
const BufferType bufferType,
const std::string& shaderPrefix,
const PipelineType pipelineType,
const DispatchCall dispatchCall)
: m_context (context)
, m_inResource (inResource)
, m_outResource (outResource)
, m_stage (stage)
, m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
, m_bufferType (bufferType)
, m_dispatchCall (dispatchCall)
{
requireFeaturesForSSBOAccess (m_context, m_stage);
const DeviceInterface& vk = m_context.getDeviceInterface();
const VkDevice device = m_context.getDevice();
// Prepare descriptors
{
const VkDescriptorType bufferDescriptorType = (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
m_descriptorSetLayout = DescriptorSetLayoutBuilder()
.addSingleBinding(bufferDescriptorType, m_stage)
.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, m_stage)
.build(vk, device);
m_descriptorPool = DescriptorPoolBuilder()
.addType(bufferDescriptorType)
.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
const VkDescriptorBufferInfo inBufferInfo = makeDescriptorBufferInfo(m_inResource.getBuffer().handle, m_inResource.getBuffer().offset, m_inResource.getBuffer().size);
const VkDescriptorBufferInfo outBufferInfo = makeDescriptorBufferInfo(m_outResource.getBuffer().handle, m_outResource.getBuffer().offset, m_outResource.getBuffer().size);
DescriptorSetUpdateBuilder()
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &inBufferInfo)
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &outBufferInfo)
.update(vk, device);
}
// Create pipeline
m_pipeline = (pipelineType == PIPELINE_TYPE_GRAPHICS ? de::MovePtr<Pipeline>(new GraphicsPipeline(context, stage, shaderPrefix, *m_descriptorSetLayout))
: de::MovePtr<Pipeline>(new ComputePipeline(context, m_dispatchCall, shaderPrefix, *m_descriptorSetLayout)));
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
m_pipeline->recordCommands(m_context, cmdBuffer, *m_descriptorSet);
}
SyncInfo getInSyncInfo (void) const
{
const SyncInfo syncInfo =
{
m_pipelineStage, // VkPipelineStageFlags stageMask;
VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
return syncInfo;
}
SyncInfo getOutSyncInfo (void) const
{
const SyncInfo syncInfo =
{
m_pipelineStage, // VkPipelineStageFlags stageMask;
VK_ACCESS_SHADER_WRITE_BIT, // VkAccessFlags accessMask;
VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout imageLayout;
};
return syncInfo;
}
Data getData (void) const
{
Data data = { 0, DE_NULL };
return data;
}
void setData (const Data&)
{
DE_ASSERT(0);
}
private:
OperationContext& m_context;
Resource& m_inResource;
Resource& m_outResource;
const VkShaderStageFlagBits m_stage;
const VkPipelineStageFlags m_pipelineStage;
const BufferType m_bufferType;
const DispatchCall m_dispatchCall;
Move<VkDescriptorPool> m_descriptorPool;
Move<VkDescriptorSetLayout> m_descriptorSetLayout;
Move<VkDescriptorSet> m_descriptorSet;
de::MovePtr<Pipeline> m_pipeline;
};
class CopyBufferSupport : public OperationSupport
{
public:
CopyBufferSupport (const ResourceDescription& resourceDesc,
const BufferType bufferType,
const VkShaderStageFlagBits stage,
const DispatchCall dispatchCall = DISPATCH_CALL_DISPATCH)
: m_resourceDesc (resourceDesc)
, m_bufferType (bufferType)
, m_stage (stage)
, m_shaderPrefix (std::string("copy_") + getShaderStageName(stage) + (m_bufferType == BUFFER_TYPE_UNIFORM ? "_ubo_" : "_ssbo_"))
, m_dispatchCall (dispatchCall)
{
DE_ASSERT(m_resourceDesc.type == RESOURCE_TYPE_BUFFER);
DE_ASSERT(m_bufferType == BUFFER_TYPE_UNIFORM || m_bufferType == BUFFER_TYPE_STORAGE);
DE_ASSERT(m_bufferType != BUFFER_TYPE_UNIFORM || m_resourceDesc.size.x() <= MAX_UBO_RANGE);
DE_ASSERT(m_dispatchCall == DISPATCH_CALL_DISPATCH || m_dispatchCall == DISPATCH_CALL_DISPATCH_INDIRECT);
assertValidShaderStage(m_stage);
}
void initPrograms (SourceCollections& programCollection) const
{
DE_ASSERT((m_resourceDesc.size.x() % sizeof(tcu::UVec4)) == 0);
const std::string bufferTypeStr = (m_bufferType == BUFFER_TYPE_UNIFORM ? "uniform" : "buffer");
const int numVecElements = static_cast<int>(m_resourceDesc.size.x() / sizeof(tcu::UVec4)); // std140 must be aligned to a multiple of 16
std::ostringstream declSrc;
declSrc << "layout(set = 0, binding = 0, std140) readonly " << bufferTypeStr << " Input {\n"
<< " uvec4 data[" << numVecElements << "];\n"
<< "} b_in;\n"
<< "\n"
<< "layout(set = 0, binding = 1, std140) writeonly buffer Output {\n"
<< " uvec4 data[" << numVecElements << "];\n"
<< "} b_out;\n";
std::ostringstream copySrc;
copySrc << " for (int i = 0; i < " << numVecElements << "; ++i) {\n"
<< " b_out.data[i] = b_in.data[i];\n"
<< " }\n";
initPassthroughPrograms(programCollection, m_shaderPrefix, declSrc.str(), copySrc.str(), m_stage);
}
deUint32 getInResourceUsageFlags (void) const
{
return (m_bufferType == BUFFER_TYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
}
deUint32 getOutResourceUsageFlags (void) const
{
return VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
}
VkQueueFlags getQueueFlags (const OperationContext& context) const
{
DE_UNREF(context);
return (m_stage == VK_SHADER_STAGE_COMPUTE_BIT ? VK_QUEUE_COMPUTE_BIT : VK_QUEUE_GRAPHICS_BIT);
}
de::MovePtr<Operation> build (OperationContext&, Resource&) const
{
DE_ASSERT(0);
return de::MovePtr<Operation>();
}
de::MovePtr<Operation> build (OperationContext& context, Resource& inResource, Resource& outResource) const
{
if (m_stage & VK_SHADER_STAGE_COMPUTE_BIT)
return de::MovePtr<Operation>(new BufferCopyImplementation(context, inResource, outResource, m_stage, m_bufferType, m_shaderPrefix, PIPELINE_TYPE_COMPUTE, m_dispatchCall));
else
return de::MovePtr<Operation>(new BufferCopyImplementation(context, inResource, outResource, m_stage, m_bufferType, m_shaderPrefix, PIPELINE_TYPE_GRAPHICS, m_dispatchCall));
}
private:
const ResourceDescription m_resourceDesc;
const BufferType m_bufferType;
const VkShaderStageFlagBits m_stage;
const std::string m_shaderPrefix;
const DispatchCall m_dispatchCall;
};
class CopyImageImplementation : public Operation
{
public:
CopyImageImplementation (OperationContext& context,
Resource& inResource,
Resource& outResource,
const VkShaderStageFlagBits stage,
const std::string& shaderPrefix,
const PipelineType pipelineType,
const DispatchCall dispatchCall)
: m_context (context)
, m_inResource (inResource)
, m_outResource (outResource)
, m_stage (stage)
, m_pipelineStage (pipelineStageFlagsFromShaderStageFlagBits(m_stage))
, m_dispatchCall (dispatchCall)
{
const DeviceInterface& vk = m_context.getDeviceInterface();
const InstanceInterface& vki = m_context.getInstanceInterface();
const VkDevice device = m_context.getDevice();
const VkPhysicalDevice physDevice = m_context.getPhysicalDevice();
// Image stores are always required, in either access mode.
requireFeaturesForSSBOAccess(m_context, m_stage);
// Some storage image formats require additional capability.
if (isStorageImageExtendedFormat(m_inResource.getImage().format))
requireFeatures(vki, physDevice, FEATURE_SHADER_STORAGE_IMAGE_EXTENDED_FORMATS);
// Image resources
{
const VkImageViewType viewType = getImageViewType(m_inResource.getImage().imageType);
m_srcImageView = makeImageView(vk, device, m_inResource.getImage().handle, viewType, m_inResource.getImage().format, m_inResource.getImage().subresourceRange);
m_dstImageView = makeImageView(vk, device, m_outResource.getImage().handle, viewType, m_outResource.getImage().format, m_outResource.getImage().subresourceRange);
}
// Prepare descriptors
{
m_descriptorSetLayout = DescriptorSetLayoutBuilder()
.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_stage)
.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, m_stage)
.build(vk, device);
m_descriptorPool = DescriptorPoolBuilder()
.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
const VkDescriptorImageInfo srcImageInfo = makeDescriptorImageInfo(DE_NULL, *m_srcImageView, VK_IMAGE_LAYOUT_GENERAL);
const VkDescriptorImageInfo dstImageInfo = makeDescriptorImageInfo(DE_NULL, *m_dstImageView, VK_IMAGE_LAYOUT_GENERAL);
DescriptorSetUpdateBuilder()
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &srcImageInfo)
.writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &dstImageInfo)
.update(vk, device);
}
// Create pipeline
m_pipeline = (pipelineType == PIPELINE_TYPE_GRAPHICS ? de::MovePtr<Pipeline>(new GraphicsPipeline(context, stage, shaderPrefix, *m_descriptorSetLayout))
: de::MovePtr<Pipeline>(new ComputePipeline(context, m_dispatchCall, shaderPrefix, *m_descriptorSetLayout)));
}
void recordCommands (const VkCommandBuffer cmdBuffer)
{
{
const DeviceInterface& vk = m_context.getDeviceInterface();
const VkImageMemoryBarrier barriers[] = {
makeImageMemoryBarrier(
(VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
m_outResource.getImage().handle, m_outResource.getImage().subresourceRange),
makeImageMemoryBarrier(
(VkAccessFlags) 0, VK_ACCESS_SHADER_READ_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
m_inResource.getImage().handle, m_inResource.getImage().subresourceRange),
};
vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, m_pipelineStage, (VkDependencyFlags)0,
0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
}
// Execute shaders
m_pipeline->recordCommands(m_context, cmdBuffer,