blob: 1634dfa7d42d681511eeee740e9e2510fabd0a0c [file] [log] [blame]
/*-------------------------------------------------------------------------
* Vulkan Conformance Tests
* ------------------------
*
* Copyright (c) 2015 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief Binding shader access tests
*//*--------------------------------------------------------------------*/
#include "vktBindingShaderAccessTests.hpp"
#include "vktTestCase.hpp"
#include "vkDefs.hpp"
#include "vkRef.hpp"
#include "vkRefUtil.hpp"
#include "vkPlatform.hpp"
#include "vkPrograms.hpp"
#include "vkMemUtil.hpp"
#include "vkBuilderUtil.hpp"
#include "vkQueryUtil.hpp"
#include "vkImageUtil.hpp"
#include "vkTypeUtil.hpp"
#include "vkCmdUtil.hpp"
#include "vkObjUtil.hpp"
#include "tcuVector.hpp"
#include "tcuVectorUtil.hpp"
#include "tcuTexture.hpp"
#include "tcuTextureUtil.hpp"
#include "tcuResultCollector.hpp"
#include "tcuTestLog.hpp"
#include "tcuRGBA.hpp"
#include "tcuSurface.hpp"
#include "tcuImageCompare.hpp"
#include "deUniquePtr.hpp"
#include "deSharedPtr.hpp"
#include "deStringUtil.hpp"
#include "deArrayUtil.hpp"
#include "qpInfo.h"
#include <iostream>
namespace vkt
{
namespace BindingModel
{
namespace
{
enum ResourceFlag
{
RESOURCE_FLAG_IMMUTABLE_SAMPLER = (1u << 0u),
RESOURCE_FLAG_LAST = (1u << 1u)
};
enum DescriptorUpdateMethod
{
DESCRIPTOR_UPDATE_METHOD_NORMAL = 0, //!< use vkUpdateDescriptorSets
DESCRIPTOR_UPDATE_METHOD_WITH_TEMPLATE, //!< use descriptor update templates
DESCRIPTOR_UPDATE_METHOD_WITH_PUSH, //!< use push descriptor updates
DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE, //!< use push descriptor update templates
DESCRIPTOR_UPDATE_METHOD_LAST
};
std::string stringifyDescriptorUpdateMethod(DescriptorUpdateMethod method)
{
switch (method)
{
case DESCRIPTOR_UPDATE_METHOD_NORMAL:
return "";
break;
case DESCRIPTOR_UPDATE_METHOD_WITH_TEMPLATE:
return "with_template";
break;
case DESCRIPTOR_UPDATE_METHOD_WITH_PUSH:
return "with_push";
break;
case DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE:
return "with_push_template";
break;
default:
return "N/A";
break;
}
}
static const char* const s_quadrantGenVertexPosSource = " highp int quadPhase = gl_VertexIndex % 6;\n"
" highp int quadXcoord = int(quadPhase == 1 || quadPhase == 4 || quadPhase == 5);\n"
" highp int quadYcoord = int(quadPhase == 2 || quadPhase == 3 || quadPhase == 5);\n"
" highp int quadOriginX = (gl_VertexIndex / 6) % 2;\n"
" highp int quadOriginY = (gl_VertexIndex / 6) / 2;\n"
" quadrant_id = gl_VertexIndex / 6;\n"
" result_position = vec4(float(quadOriginX + quadXcoord - 1), float(quadOriginY + quadYcoord - 1), 0.0, 1.0);\n";
std::string genPerVertexBlock (const vk::VkShaderStageFlagBits stage, const glu::GLSLVersion version)
{
static const char* const block = "gl_PerVertex {\n"
" vec4 gl_Position;\n"
" float gl_PointSize;\n" // not used, but for compatibility with how implicit block is declared in ES
"}";
std::ostringstream str;
if (!glu::glslVersionIsES(version))
switch (stage)
{
case vk::VK_SHADER_STAGE_VERTEX_BIT:
str << "out " << block << ";\n";
break;
case vk::VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
str << "in " << block << " gl_in[gl_MaxPatchVertices];\n"
<< "out " << block << " gl_out[];\n";
break;
case vk::VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
str << "in " << block << " gl_in[gl_MaxPatchVertices];\n"
<< "out " << block << ";\n";
break;
case vk::VK_SHADER_STAGE_GEOMETRY_BIT:
str << "in " << block << " gl_in[];\n"
<< "out " << block << ";\n";
break;
default:
break;
}
return str.str();
}
bool isUniformDescriptorType (vk::VkDescriptorType type)
{
return type == vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
type == vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
type == vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
}
bool isDynamicDescriptorType (vk::VkDescriptorType type)
{
return type == vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || type == vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
}
void verifyDriverSupport(const deUint32 apiVersion,
const vk::VkPhysicalDeviceFeatures& deviceFeatures,
const std::vector<std::string>& deviceExtensions,
DescriptorUpdateMethod updateMethod,
vk::VkDescriptorType descType,
vk::VkShaderStageFlags activeStages)
{
std::vector<std::string> extensionNames;
size_t numExtensionsNeeded = 0;
switch (updateMethod)
{
case DESCRIPTOR_UPDATE_METHOD_WITH_PUSH:
extensionNames.push_back("VK_KHR_push_descriptor");
break;
case DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE:
extensionNames.push_back("VK_KHR_push_descriptor");
// Fallthrough
case DESCRIPTOR_UPDATE_METHOD_WITH_TEMPLATE:
if (!vk::isCoreDeviceExtension(apiVersion, "VK_KHR_descriptor_update_template"))
extensionNames.push_back("VK_KHR_descriptor_update_template");
break;
case DESCRIPTOR_UPDATE_METHOD_NORMAL:
// no extensions needed
break;
default:
DE_FATAL("Impossible");
}
numExtensionsNeeded = extensionNames.size();
if (numExtensionsNeeded > 0)
{
for (size_t deviceExtNdx = 0; deviceExtNdx < deviceExtensions.size(); deviceExtNdx++)
{
for (size_t requiredExtNdx = 0; requiredExtNdx < extensionNames.size(); requiredExtNdx++)
{
if (deStringEqual(deviceExtensions[deviceExtNdx].c_str(), extensionNames[requiredExtNdx].c_str()))
{
--numExtensionsNeeded;
break;
}
}
if (numExtensionsNeeded == 0)
break;
}
if (numExtensionsNeeded > 0)
{
TCU_THROW(NotSupportedError, (stringifyDescriptorUpdateMethod(updateMethod) + " tests are not supported").c_str());
}
}
switch (descType)
{
case vk::VK_DESCRIPTOR_TYPE_SAMPLER:
case vk::VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case vk::VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case vk::VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
// These are supported in all stages
return;
case vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case vk::VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
if (activeStages & (vk::VK_SHADER_STAGE_VERTEX_BIT |
vk::VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
vk::VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
vk::VK_SHADER_STAGE_GEOMETRY_BIT))
{
if (!deviceFeatures.vertexPipelineStoresAndAtomics)
TCU_THROW(NotSupportedError, (de::toString(descType) + " is not supported in the vertex pipeline").c_str());
}
if (activeStages & vk::VK_SHADER_STAGE_FRAGMENT_BIT)
{
if (!deviceFeatures.fragmentStoresAndAtomics)
TCU_THROW(NotSupportedError, (de::toString(descType) + " is not supported in fragment shaders").c_str());
}
return;
default:
DE_FATAL("Impossible");
}
}
vk::VkImageType viewTypeToImageType (vk::VkImageViewType type)
{
switch (type)
{
case vk::VK_IMAGE_VIEW_TYPE_1D:
case vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY: return vk::VK_IMAGE_TYPE_1D;
case vk::VK_IMAGE_VIEW_TYPE_2D:
case vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY: return vk::VK_IMAGE_TYPE_2D;
case vk::VK_IMAGE_VIEW_TYPE_3D: return vk::VK_IMAGE_TYPE_3D;
case vk::VK_IMAGE_VIEW_TYPE_CUBE:
case vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return vk::VK_IMAGE_TYPE_2D;
default:
DE_FATAL("Impossible");
return (vk::VkImageType)0;
}
}
vk::VkImageLayout getImageLayoutForDescriptorType (vk::VkDescriptorType descType)
{
if (descType == vk::VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
return vk::VK_IMAGE_LAYOUT_GENERAL;
else
return vk::VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
deUint32 getTextureLevelPyramidDataSize (const tcu::TextureLevelPyramid& srcImage)
{
deUint32 dataSize = 0;
for (int level = 0; level < srcImage.getNumLevels(); ++level)
{
const tcu::ConstPixelBufferAccess srcAccess = srcImage.getLevel(level);
// tightly packed
DE_ASSERT(srcAccess.getFormat().getPixelSize() == srcAccess.getPixelPitch());
dataSize += srcAccess.getWidth() * srcAccess.getHeight() * srcAccess.getDepth() * srcAccess.getFormat().getPixelSize();
}
return dataSize;
}
void writeTextureLevelPyramidData (void* dst, deUint32 dstLen, const tcu::TextureLevelPyramid& srcImage, vk::VkImageViewType viewType, std::vector<vk::VkBufferImageCopy>* copySlices)
{
// \note cube is copied face-by-face
const deUint32 arraySize = (viewType == vk::VK_IMAGE_VIEW_TYPE_1D || viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? (srcImage.getLevel(0).getHeight()) :
(viewType == vk::VK_IMAGE_VIEW_TYPE_2D || viewType == vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY) ? (srcImage.getLevel(0).getDepth()) :
(viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? (1) :
(viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE || viewType == vk::VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? (srcImage.getLevel(0).getDepth()) :
((deUint32)0);
deUint32 levelOffset = 0;
DE_ASSERT(arraySize != 0);
for (int level = 0; level < srcImage.getNumLevels(); ++level)
{
const tcu::ConstPixelBufferAccess srcAccess = srcImage.getLevel(level);
const tcu::PixelBufferAccess dstAccess (srcAccess.getFormat(), srcAccess.getSize(), srcAccess.getPitch(), (deUint8*)dst + levelOffset);
const deUint32 dataSize = srcAccess.getWidth() * srcAccess.getHeight() * srcAccess.getDepth() * srcAccess.getFormat().getPixelSize();
const deUint32 sliceDataSize = dataSize / arraySize;
const deInt32 sliceHeight = (viewType == vk::VK_IMAGE_VIEW_TYPE_1D || viewType == vk::VK_IMAGE_VIEW_TYPE_1D_ARRAY) ? (1) : (srcAccess.getHeight());
const deInt32 sliceDepth = (viewType == vk::VK_IMAGE_VIEW_TYPE_3D) ? (srcAccess.getDepth()) : (1);
const tcu::IVec3 sliceSize (srcAccess.getWidth(), sliceHeight, sliceDepth);
// tightly packed
DE_ASSERT(srcAccess.getFormat().getPixelSize() == srcAccess.getPixelPitch());
for (int sliceNdx = 0; sliceNdx < (int)arraySize; ++sliceNdx)
{
const vk::VkBufferImageCopy copySlice =
{
(vk::VkDeviceSize)levelOffset + sliceNdx * sliceDataSize, // bufferOffset
(deUint32)sliceSize.x(), // bufferRowLength
(deUint32)sliceSize.y(), // bufferImageHeight
{
vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
(deUint32)level, // mipLevel
(deUint32)sliceNdx, // arrayLayer
1u, // arraySize
}, // imageSubresource
{
0,
0,
0,
}, // imageOffset
{
(deUint32)sliceSize.x(),
(deUint32)sliceSize.y(),
(deUint32)sliceSize.z(),
} // imageExtent
};
copySlices->push_back(copySlice);
}
DE_ASSERT(arraySize * sliceDataSize == dataSize);
tcu::copy(dstAccess, srcAccess);
levelOffset += dataSize;
}
DE_ASSERT(dstLen == levelOffset);
DE_UNREF(dstLen);
}
de::MovePtr<vk::Allocation> allocateAndBindObjectMemory (const vk::DeviceInterface& vki, vk::VkDevice device, vk::Allocator& allocator, vk::VkBuffer buffer, vk::MemoryRequirement requirement)
{
const vk::VkMemoryRequirements requirements = vk::getBufferMemoryRequirements(vki, device, buffer);
de::MovePtr<vk::Allocation> allocation = allocator.allocate(requirements, requirement);
VK_CHECK(vki.bindBufferMemory(device, buffer, allocation->getMemory(), allocation->getOffset()));
return allocation;
}
de::MovePtr<vk::Allocation> allocateAndBindObjectMemory (const vk::DeviceInterface& vki, vk::VkDevice device, vk::Allocator& allocator, vk::VkImage image, vk::MemoryRequirement requirement)
{
const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vki, device, image);
de::MovePtr<vk::Allocation> allocation = allocator.allocate(requirements, requirement);
VK_CHECK(vki.bindImageMemory(device, image, allocation->getMemory(), allocation->getOffset()));
return allocation;
}
vk::VkDescriptorImageInfo makeDescriptorImageInfo (vk::VkSampler sampler)
{
return vk::makeDescriptorImageInfo(sampler, (vk::VkImageView)0, (vk::VkImageLayout)0);
}
vk::VkDescriptorImageInfo makeDescriptorImageInfo (vk::VkImageView imageView, vk::VkImageLayout layout)
{
return vk::makeDescriptorImageInfo((vk::VkSampler)0, imageView, layout);
}
void drawQuadrantReferenceResult (const tcu::PixelBufferAccess& dst, const tcu::Vec4& c1, const tcu::Vec4& c2, const tcu::Vec4& c3, const tcu::Vec4& c4)
{
tcu::clear(tcu::getSubregion(dst, 0, 0, dst.getWidth() / 2, dst.getHeight() / 2), c1);
tcu::clear(tcu::getSubregion(dst, dst.getWidth() / 2, 0, dst.getWidth() - dst.getWidth() / 2, dst.getHeight() / 2), c2);
tcu::clear(tcu::getSubregion(dst, 0, dst.getHeight() / 2, dst.getWidth() / 2, dst.getHeight() - dst.getHeight() / 2), c3);
tcu::clear(tcu::getSubregion(dst, dst.getWidth() / 2, dst.getHeight() / 2, dst.getWidth() - dst.getWidth() / 2, dst.getHeight() - dst.getHeight() / 2), c4);
}
static const vk::VkDescriptorUpdateTemplateEntry createTemplateBinding (deUint32 binding, deUint32 arrayElement, deUint32 descriptorCount, vk::VkDescriptorType descriptorType, size_t offset, size_t stride)
{
const vk::VkDescriptorUpdateTemplateEntry updateBinding =
{
binding,
arrayElement,
descriptorCount,
descriptorType,
offset,
stride
};
return updateBinding;
}
class RawUpdateRegistry
{
public:
RawUpdateRegistry (void);
template<typename Type>
void addWriteObject (const Type& updateObject);
size_t getWriteObjectOffset (const deUint32 objectId);
const deUint8* getRawPointer () const;
private:
std::vector<deUint8> m_updateEntries;
std::vector<size_t> m_updateEntryOffsets;
size_t m_nextOffset;
};
RawUpdateRegistry::RawUpdateRegistry (void)
: m_updateEntries()
, m_updateEntryOffsets()
, m_nextOffset(0)
{
}
template<typename Type>
void RawUpdateRegistry::addWriteObject (const Type& updateObject)
{
m_updateEntryOffsets.push_back(m_nextOffset);
// in this case, elements <=> bytes
m_updateEntries.resize(m_nextOffset + sizeof(updateObject));
Type* t = reinterpret_cast<Type*>(m_updateEntries.data() + m_nextOffset);
*t = updateObject;
m_nextOffset += sizeof(updateObject);
}
size_t RawUpdateRegistry::getWriteObjectOffset (const deUint32 objectId)
{
return m_updateEntryOffsets[objectId];
}
const deUint8* RawUpdateRegistry::getRawPointer () const
{
return m_updateEntries.data();
}
class SingleTargetRenderInstance : public vkt::TestInstance
{
public:
SingleTargetRenderInstance (Context& context,
const tcu::UVec2& size);
private:
static vk::Move<vk::VkImage> createColorAttachment (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator,
const tcu::TextureFormat& format,
const tcu::UVec2& size,
de::MovePtr<vk::Allocation>* outAllocation);
static vk::Move<vk::VkImageView> createColorAttachmentView (const vk::DeviceInterface& vki,
vk::VkDevice device,
const tcu::TextureFormat& format,
vk::VkImage image);
static vk::Move<vk::VkFramebuffer> createFramebuffer (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkRenderPass renderpass,
vk::VkImageView colorAttachmentView,
const tcu::UVec2& size);
static vk::Move<vk::VkCommandPool> createCommandPool (const vk::DeviceInterface& vki,
vk::VkDevice device,
deUint32 queueFamilyIndex);
virtual void logTestPlan (void) const = 0;
virtual void renderToTarget (void) = 0;
virtual tcu::TestStatus verifyResultImage (const tcu::ConstPixelBufferAccess& result) const = 0;
void readRenderTarget (tcu::TextureLevel& dst);
tcu::TestStatus iterate (void);
protected:
const tcu::TextureFormat m_targetFormat;
const tcu::UVec2 m_targetSize;
const vk::DeviceInterface& m_vki;
const vk::VkDevice m_device;
const vk::VkQueue m_queue;
const deUint32 m_queueFamilyIndex;
vk::Allocator& m_allocator;
de::MovePtr<vk::Allocation> m_colorAttachmentMemory;
const vk::Unique<vk::VkImage> m_colorAttachmentImage;
const vk::Unique<vk::VkImageView> m_colorAttachmentView;
const vk::Unique<vk::VkRenderPass> m_renderPass;
const vk::Unique<vk::VkFramebuffer> m_framebuffer;
const vk::Unique<vk::VkCommandPool> m_cmdPool;
bool m_firstIteration;
};
SingleTargetRenderInstance::SingleTargetRenderInstance (Context& context,
const tcu::UVec2& size)
: vkt::TestInstance (context)
, m_targetFormat (tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8)
, m_targetSize (size)
, m_vki (context.getDeviceInterface())
, m_device (context.getDevice())
, m_queue (context.getUniversalQueue())
, m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
, m_allocator (context.getDefaultAllocator())
, m_colorAttachmentMemory (DE_NULL)
, m_colorAttachmentImage (createColorAttachment(m_vki, m_device, m_allocator, m_targetFormat, m_targetSize, &m_colorAttachmentMemory))
, m_colorAttachmentView (createColorAttachmentView(m_vki, m_device, m_targetFormat, *m_colorAttachmentImage))
, m_renderPass (makeRenderPass(m_vki, m_device, vk::mapTextureFormat(m_targetFormat)))
, m_framebuffer (createFramebuffer(m_vki, m_device, *m_renderPass, *m_colorAttachmentView, m_targetSize))
, m_cmdPool (createCommandPool(m_vki, m_device, context.getUniversalQueueFamilyIndex()))
, m_firstIteration (true)
{
}
vk::Move<vk::VkImage> SingleTargetRenderInstance::createColorAttachment (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator,
const tcu::TextureFormat& format,
const tcu::UVec2& size,
de::MovePtr<vk::Allocation>* outAllocation)
{
const vk::VkImageCreateInfo imageInfo =
{
vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
DE_NULL,
(vk::VkImageCreateFlags)0,
vk::VK_IMAGE_TYPE_2D, // imageType
vk::mapTextureFormat(format), // format
{ size.x(), size.y(), 1u }, // extent
1, // mipLevels
1, // arraySize
vk::VK_SAMPLE_COUNT_1_BIT, // samples
vk::VK_IMAGE_TILING_OPTIMAL, // tiling
vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // usage
vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
0u, // queueFamilyCount
DE_NULL, // pQueueFamilyIndices
vk::VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
};
vk::Move<vk::VkImage> image (vk::createImage(vki, device, &imageInfo));
de::MovePtr<vk::Allocation> allocation (allocateAndBindObjectMemory(vki, device, allocator, *image, vk::MemoryRequirement::Any));
*outAllocation = allocation;
return image;
}
vk::Move<vk::VkImageView> SingleTargetRenderInstance::createColorAttachmentView (const vk::DeviceInterface& vki,
vk::VkDevice device,
const tcu::TextureFormat& format,
vk::VkImage image)
{
const vk::VkImageViewCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
DE_NULL,
(vk::VkImageViewCreateFlags)0,
image, // image
vk::VK_IMAGE_VIEW_TYPE_2D, // viewType
vk::mapTextureFormat(format), // format
vk::makeComponentMappingRGBA(),
{
vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
0u, // baseMipLevel
1u, // mipLevels
0u, // baseArrayLayer
1u, // arraySize
},
};
return vk::createImageView(vki, device, &createInfo);
}
vk::Move<vk::VkFramebuffer> SingleTargetRenderInstance::createFramebuffer (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkRenderPass renderpass,
vk::VkImageView colorAttachmentView,
const tcu::UVec2& size)
{
const vk::VkFramebufferCreateInfo framebufferCreateInfo =
{
vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
DE_NULL,
(vk::VkFramebufferCreateFlags)0,
renderpass, // renderPass
1u, // attachmentCount
&colorAttachmentView, // pAttachments
size.x(), // width
size.y(), // height
1, // layers
};
return vk::createFramebuffer(vki, device, &framebufferCreateInfo);
}
vk::Move<vk::VkCommandPool> SingleTargetRenderInstance::createCommandPool (const vk::DeviceInterface& vki,
vk::VkDevice device,
deUint32 queueFamilyIndex)
{
return vk::createCommandPool(vki, device, vk::VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
}
void SingleTargetRenderInstance::readRenderTarget (tcu::TextureLevel& dst)
{
const deUint64 pixelDataSize = (deUint64)(m_targetSize.x() * m_targetSize.y() * m_targetFormat.getPixelSize());
const vk::VkBufferCreateInfo bufferCreateInfo =
{
vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
DE_NULL,
0u, // flags
pixelDataSize, // size
vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
0u, // queueFamilyCount
DE_NULL, // pQueueFamilyIndices
};
const vk::Unique<vk::VkBuffer> buffer (vk::createBuffer(m_vki, m_device, &bufferCreateInfo));
const de::MovePtr<vk::Allocation> bufferMemory = allocateAndBindObjectMemory(m_vki, m_device, m_allocator, *buffer, vk::MemoryRequirement::HostVisible);
const vk::Unique<vk::VkCommandBuffer> cmd (vk::allocateCommandBuffer(m_vki, m_device, *m_cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
// copy content to buffer
beginCommandBuffer(m_vki, *cmd);
copyImageToBuffer(m_vki, *cmd, *m_colorAttachmentImage, *buffer, tcu::IVec2(m_targetSize.x(), m_targetSize.y()));
endCommandBuffer(m_vki, *cmd);
submitCommandsAndWait(m_vki, m_device, m_queue, cmd.get());
dst.setStorage(m_targetFormat, m_targetSize.x(), m_targetSize.y());
// copy data
invalidateAlloc(m_vki, m_device, *bufferMemory);
tcu::copy(dst, tcu::ConstPixelBufferAccess(dst.getFormat(), dst.getSize(), bufferMemory->getHostPtr()));
}
tcu::TestStatus SingleTargetRenderInstance::iterate (void)
{
tcu::TextureLevel resultImage;
// log
if (m_firstIteration)
{
logTestPlan();
m_firstIteration = false;
}
// render
{
// transition to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
const vk::VkImageSubresourceRange fullSubrange =
{
vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
0u, // baseMipLevel
1u, // mipLevels
0u, // baseArraySlice
1u, // arraySize
};
const vk::VkImageMemoryBarrier imageBarrier =
{
vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
DE_NULL,
0u, // srcAccessMask
vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
*m_colorAttachmentImage, // image
fullSubrange, // subresourceRange
};
const vk::Unique<vk::VkCommandBuffer> cmd (vk::allocateCommandBuffer(m_vki, m_device, *m_cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
beginCommandBuffer(m_vki, *cmd);
m_vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (vk::VkDependencyFlags)0,
0, (const vk::VkMemoryBarrier*)DE_NULL,
0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1, &imageBarrier);
endCommandBuffer(m_vki, *cmd);
submitCommandsAndWait(m_vki, m_device, m_queue, cmd.get());
renderToTarget();
}
// read and verify
readRenderTarget(resultImage);
return verifyResultImage(resultImage.getAccess());
}
class RenderInstanceShaders
{
public:
RenderInstanceShaders (const vk::DeviceInterface& vki,
vk::VkDevice device,
const vk::VkPhysicalDeviceFeatures& deviceFeatures,
const vk::BinaryCollection& programCollection);
inline bool hasTessellationStage (void) const { return *m_tessCtrlShaderModule != 0 || *m_tessEvalShaderModule != 0; }
inline deUint32 getNumStages (void) const { return (deUint32)m_stageInfos.size(); }
inline const vk::VkPipelineShaderStageCreateInfo* getStages (void) const { return &m_stageInfos[0]; }
private:
void addStage (const vk::DeviceInterface& vki,
vk::VkDevice device,
const vk::VkPhysicalDeviceFeatures& deviceFeatures,
const vk::BinaryCollection& programCollection,
const char* name,
vk::VkShaderStageFlagBits stage,
vk::Move<vk::VkShaderModule>* outModule);
vk::VkPipelineShaderStageCreateInfo getShaderStageCreateInfo (vk::VkShaderStageFlagBits stage, vk::VkShaderModule shader) const;
vk::Move<vk::VkShaderModule> m_vertexShaderModule;
vk::Move<vk::VkShaderModule> m_tessCtrlShaderModule;
vk::Move<vk::VkShaderModule> m_tessEvalShaderModule;
vk::Move<vk::VkShaderModule> m_geometryShaderModule;
vk::Move<vk::VkShaderModule> m_fragmentShaderModule;
std::vector<vk::VkPipelineShaderStageCreateInfo> m_stageInfos;
};
RenderInstanceShaders::RenderInstanceShaders (const vk::DeviceInterface& vki,
vk::VkDevice device,
const vk::VkPhysicalDeviceFeatures& deviceFeatures,
const vk::BinaryCollection& programCollection)
{
addStage(vki, device, deviceFeatures, programCollection, "vertex", vk::VK_SHADER_STAGE_VERTEX_BIT, &m_vertexShaderModule);
addStage(vki, device, deviceFeatures, programCollection, "tess_ctrl", vk::VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, &m_tessCtrlShaderModule);
addStage(vki, device, deviceFeatures, programCollection, "tess_eval", vk::VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, &m_tessEvalShaderModule);
addStage(vki, device, deviceFeatures, programCollection, "geometry", vk::VK_SHADER_STAGE_GEOMETRY_BIT, &m_geometryShaderModule);
addStage(vki, device, deviceFeatures, programCollection, "fragment", vk::VK_SHADER_STAGE_FRAGMENT_BIT, &m_fragmentShaderModule);
DE_ASSERT(!m_stageInfos.empty());
}
void RenderInstanceShaders::addStage (const vk::DeviceInterface& vki,
vk::VkDevice device,
const vk::VkPhysicalDeviceFeatures& deviceFeatures,
const vk::BinaryCollection& programCollection,
const char* name,
vk::VkShaderStageFlagBits stage,
vk::Move<vk::VkShaderModule>* outModule)
{
if (programCollection.contains(name))
{
if (vk::isShaderStageSupported(deviceFeatures, stage))
{
vk::Move<vk::VkShaderModule> module = createShaderModule(vki, device, programCollection.get(name), (vk::VkShaderModuleCreateFlags)0);
m_stageInfos.push_back(getShaderStageCreateInfo(stage, *module));
*outModule = module;
}
else
{
// Wait for the GPU to idle so that throwing the exception
// below doesn't free in-use GPU resource.
vki.deviceWaitIdle(device);
TCU_THROW(NotSupportedError, (de::toString(stage) + " is not supported").c_str());
}
}
}
vk::VkPipelineShaderStageCreateInfo RenderInstanceShaders::getShaderStageCreateInfo (vk::VkShaderStageFlagBits stage, vk::VkShaderModule shader) const
{
const vk::VkPipelineShaderStageCreateInfo stageCreateInfo =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineShaderStageCreateFlags)0,
stage, // stage
shader, // shader
"main",
DE_NULL, // pSpecializationInfo
};
return stageCreateInfo;
}
class SingleCmdRenderInstance : public SingleTargetRenderInstance
{
public:
SingleCmdRenderInstance (Context& context,
bool isPrimaryCmdBuf,
const tcu::UVec2& renderSize);
private:
vk::Move<vk::VkPipeline> createPipeline (vk::VkPipelineLayout pipelineLayout);
virtual vk::VkPipelineLayout getPipelineLayout (void) const = 0;
virtual void writeDrawCmdBuffer (vk::VkCommandBuffer cmd) const = 0;
void renderToTarget (void);
const bool m_isPrimaryCmdBuf;
};
SingleCmdRenderInstance::SingleCmdRenderInstance (Context& context,
bool isPrimaryCmdBuf,
const tcu::UVec2& renderSize)
: SingleTargetRenderInstance (context, renderSize)
, m_isPrimaryCmdBuf (isPrimaryCmdBuf)
{
}
vk::Move<vk::VkPipeline> SingleCmdRenderInstance::createPipeline (vk::VkPipelineLayout pipelineLayout)
{
const RenderInstanceShaders shaderStages (m_vki, m_device, m_context.getDeviceFeatures(), m_context.getBinaryCollection());
const vk::VkPrimitiveTopology topology = shaderStages.hasTessellationStage() ? vk::VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
const vk::VkPipelineVertexInputStateCreateInfo vertexInputState =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineVertexInputStateCreateFlags)0,
0u, // bindingCount
DE_NULL, // pVertexBindingDescriptions
0u, // attributeCount
DE_NULL, // pVertexAttributeDescriptions
};
const vk::VkPipelineInputAssemblyStateCreateInfo iaState =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineInputAssemblyStateCreateFlags)0,
topology, // topology
VK_FALSE, // primitiveRestartEnable
};
const vk::VkPipelineTessellationStateCreateInfo tessState =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineTessellationStateCreateFlags)0,
3u, // patchControlPoints
};
const vk::VkViewport viewport = vk::makeViewport(m_targetSize);
const vk::VkRect2D renderArea = vk::makeRect2D(m_targetSize);
const vk::VkPipelineViewportStateCreateInfo vpState =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineViewportStateCreateFlags)0,
1u, // viewportCount
&viewport,
1u,
&renderArea,
};
const vk::VkPipelineRasterizationStateCreateInfo rsState =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineRasterizationStateCreateFlags)0,
VK_FALSE, // depthClipEnable
VK_FALSE, // rasterizerDiscardEnable
vk::VK_POLYGON_MODE_FILL, // fillMode
vk::VK_CULL_MODE_NONE, // cullMode
vk::VK_FRONT_FACE_COUNTER_CLOCKWISE, // frontFace
VK_FALSE, // depthBiasEnable
0.0f, // depthBias
0.0f, // depthBiasClamp
0.0f, // slopeScaledDepthBias
1.0f, // lineWidth
};
const vk::VkSampleMask sampleMask = 0x01u;
const vk::VkPipelineMultisampleStateCreateInfo msState =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineMultisampleStateCreateFlags)0,
vk::VK_SAMPLE_COUNT_1_BIT, // rasterSamples
VK_FALSE, // sampleShadingEnable
0.0f, // minSampleShading
&sampleMask, // sampleMask
VK_FALSE, // alphaToCoverageEnable
VK_FALSE, // alphaToOneEnable
};
const vk::VkPipelineDepthStencilStateCreateInfo dsState =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineDepthStencilStateCreateFlags)0,
VK_FALSE, // depthTestEnable
VK_FALSE, // depthWriteEnable
vk::VK_COMPARE_OP_ALWAYS, // depthCompareOp
VK_FALSE, // depthBoundsTestEnable
VK_FALSE, // stencilTestEnable
{ vk::VK_STENCIL_OP_KEEP, vk::VK_STENCIL_OP_KEEP, vk::VK_STENCIL_OP_KEEP, vk::VK_COMPARE_OP_ALWAYS, 0u, 0u, 0u }, // front
{ vk::VK_STENCIL_OP_KEEP, vk::VK_STENCIL_OP_KEEP, vk::VK_STENCIL_OP_KEEP, vk::VK_COMPARE_OP_ALWAYS, 0u, 0u, 0u }, // back
-1.0f, // minDepthBounds
+1.0f, // maxDepthBounds
};
const vk::VkPipelineColorBlendAttachmentState cbAttachment =
{
VK_FALSE, // blendEnable
vk::VK_BLEND_FACTOR_ZERO, // srcBlendColor
vk::VK_BLEND_FACTOR_ZERO, // destBlendColor
vk::VK_BLEND_OP_ADD, // blendOpColor
vk::VK_BLEND_FACTOR_ZERO, // srcBlendAlpha
vk::VK_BLEND_FACTOR_ZERO, // destBlendAlpha
vk::VK_BLEND_OP_ADD, // blendOpAlpha
(vk::VK_COLOR_COMPONENT_R_BIT |
vk::VK_COLOR_COMPONENT_G_BIT |
vk::VK_COLOR_COMPONENT_B_BIT |
vk::VK_COLOR_COMPONENT_A_BIT), // channelWriteMask
};
const vk::VkPipelineColorBlendStateCreateInfo cbState =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineColorBlendStateCreateFlags)0,
VK_FALSE, // logicOpEnable
vk::VK_LOGIC_OP_CLEAR, // logicOp
1u, // attachmentCount
&cbAttachment, // pAttachments
{ 0.0f, 0.0f, 0.0f, 0.0f }, // blendConst
};
const vk::VkGraphicsPipelineCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineCreateFlags)0,
shaderStages.getNumStages(), // stageCount
shaderStages.getStages(), // pStages
&vertexInputState, // pVertexInputState
&iaState, // pInputAssemblyState
(shaderStages.hasTessellationStage() ? &tessState : DE_NULL), // pTessellationState
&vpState, // pViewportState
&rsState, // pRasterState
&msState, // pMultisampleState
&dsState, // pDepthStencilState
&cbState, // pColorBlendState
(const vk::VkPipelineDynamicStateCreateInfo*)DE_NULL, // pDynamicState
pipelineLayout, // layout
*m_renderPass, // renderPass
0u, // subpass
(vk::VkPipeline)0, // basePipelineHandle
0u, // basePipelineIndex
};
return createGraphicsPipeline(m_vki, m_device, (vk::VkPipelineCache)0u, &createInfo);
}
void SingleCmdRenderInstance::renderToTarget (void)
{
const vk::VkRect2D renderArea =
{
{ 0, 0 }, // offset
{ m_targetSize.x(), m_targetSize.y() }, // extent
};
const vk::VkCommandBufferInheritanceInfo passCmdBufInheritInfo =
{
vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
DE_NULL,
(vk::VkRenderPass)*m_renderPass, // renderPass
0u, // subpass
(vk::VkFramebuffer)*m_framebuffer, // framebuffer
VK_FALSE, // occlusionQueryEnable
(vk::VkQueryControlFlags)0,
(vk::VkQueryPipelineStatisticFlags)0,
};
const vk::VkCommandBufferBeginInfo passCmdBufBeginInfo =
{
vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
DE_NULL,
vk::VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT |
vk::VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, // flags
&passCmdBufInheritInfo,
};
const vk::VkPipelineLayout pipelineLayout (getPipelineLayout());
const vk::Unique<vk::VkPipeline> pipeline (createPipeline(pipelineLayout));
const vk::Unique<vk::VkCommandBuffer> mainCmd (vk::allocateCommandBuffer(m_vki, m_device, *m_cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
const vk::Unique<vk::VkCommandBuffer> passCmd ((m_isPrimaryCmdBuf) ? (vk::Move<vk::VkCommandBuffer>()) : (vk::allocateCommandBuffer(m_vki, m_device, *m_cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_SECONDARY)));
const vk::Unique<vk::VkFence> fence (vk::createFence(m_vki, m_device));
const vk::VkSubpassContents passContents = (m_isPrimaryCmdBuf) ? (vk::VK_SUBPASS_CONTENTS_INLINE) : (vk::VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
beginCommandBuffer(m_vki, *mainCmd);
beginRenderPass(m_vki, *mainCmd, *m_renderPass, *m_framebuffer, renderArea, tcu::Vec4(0.0f), passContents);
if (m_isPrimaryCmdBuf)
{
m_vki.cmdBindPipeline(*mainCmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
writeDrawCmdBuffer(*mainCmd);
}
else
{
VK_CHECK(m_vki.beginCommandBuffer(*passCmd, &passCmdBufBeginInfo));
m_vki.cmdBindPipeline(*passCmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
writeDrawCmdBuffer(*passCmd);
endCommandBuffer(m_vki, *passCmd);
m_vki.cmdExecuteCommands(*mainCmd, 1, &passCmd.get());
}
endRenderPass(m_vki, *mainCmd);
endCommandBuffer(m_vki, *mainCmd);
// submit and wait for them to finish before exiting scope. (Killing in-flight objects is a no-no).
submitCommandsAndWait(m_vki, m_device, m_queue, mainCmd.get());
}
enum DescriptorSetCount
{
DESCRIPTOR_SET_COUNT_SINGLE = 0, //!< single descriptor set
DESCRIPTOR_SET_COUNT_MULTIPLE, //!< multiple descriptor sets
DESCRIPTOR_SET_COUNT_MULTIPLE_DISCONTIGUOUS, //!< multiple discontiguous descriptor sets
DESCRIPTOR_SET_COUNT_LAST
};
deUint32 getDescriptorSetCount (DescriptorSetCount count)
{
switch (count)
{
case DESCRIPTOR_SET_COUNT_SINGLE:
return 1u;
case DESCRIPTOR_SET_COUNT_MULTIPLE:
case DESCRIPTOR_SET_COUNT_MULTIPLE_DISCONTIGUOUS:
return 2u;
default:
DE_FATAL("Impossible");
return 0u;
}
}
deUint32 getDescriptorSetNdx (DescriptorSetCount count, deUint32 setNdx)
{
DE_ASSERT(setNdx < getDescriptorSetCount(count));
const deUint32 contiguousNdx[] = { 0, 1 };
const deUint32 discontiguousNdx[] = { 0, 2 };
switch (count)
{
case DESCRIPTOR_SET_COUNT_SINGLE:
return 0u;
case DESCRIPTOR_SET_COUNT_MULTIPLE:
return contiguousNdx[setNdx];
case DESCRIPTOR_SET_COUNT_MULTIPLE_DISCONTIGUOUS:
return discontiguousNdx[setNdx];
default:
DE_FATAL("Impossible");
return 0u;
}
}
enum ShaderInputInterface
{
SHADER_INPUT_SINGLE_DESCRIPTOR = 0, //!< one descriptor
SHADER_INPUT_MULTIPLE_CONTIGUOUS_DESCRIPTORS, //!< multiple descriptors with contiguous binding id's
SHADER_INPUT_MULTIPLE_DISCONTIGUOUS_DESCRIPTORS, //!< multiple descriptors with discontiguous binding id's
SHADER_INPUT_MULTIPLE_ARBITRARY_DESCRIPTORS, //!< multiple descriptors with large gaps between binding id's
SHADER_INPUT_DESCRIPTOR_ARRAY, //!< descriptor array
SHADER_INPUT_LAST
};
deUint32 getInterfaceNumResources (ShaderInputInterface shaderInterface)
{
switch (shaderInterface)
{
case SHADER_INPUT_SINGLE_DESCRIPTOR: return 1u;
case SHADER_INPUT_MULTIPLE_CONTIGUOUS_DESCRIPTORS: return 2u;
case SHADER_INPUT_MULTIPLE_DISCONTIGUOUS_DESCRIPTORS: return 2u;
case SHADER_INPUT_MULTIPLE_ARBITRARY_DESCRIPTORS: return 2u;
case SHADER_INPUT_DESCRIPTOR_ARRAY: return 2u;
default:
DE_FATAL("Impossible");
return 0u;
}
}
deUint32 getArbitraryBindingIndex (deUint32 ndx)
{
DE_ASSERT(ndx < 2);
// Binding decoration value can be any 32-bit unsigned integer value.
// 0xFFFE is the largest binding value accepted by glslang
const deUint32 bufferIndices[] =
{
0x7FFEu,
0xFFFEu
};
return bufferIndices[ndx];
}
typedef de::MovePtr<vk::Allocation> AllocationMp;
typedef de::SharedPtr<vk::Allocation> AllocationSp;
typedef vk::Unique<vk::VkBuffer> BufferHandleUp;
typedef de::SharedPtr<BufferHandleUp> BufferHandleSp;
typedef vk::Unique<vk::VkBufferView> BufferViewHandleUp;
typedef de::SharedPtr<BufferViewHandleUp> BufferViewHandleSp;
typedef vk::Unique<vk::VkSampler> SamplerHandleUp;
typedef de::SharedPtr<SamplerHandleUp> SamplerHandleSp;
typedef vk::Unique<vk::VkImage> ImageHandleUp;
typedef de::SharedPtr<ImageHandleUp> ImageHandleSp;
typedef vk::Unique<vk::VkImageView> ImageViewHandleUp;
typedef de::SharedPtr<ImageViewHandleUp> ImageViewHandleSp;
typedef vk::Unique<vk::VkDescriptorSet> DescriptorSetHandleUp;
typedef de::SharedPtr<DescriptorSetHandleUp> DescriptorSetHandleSp;
typedef vk::Unique<vk::VkDescriptorSetLayout> DescriptorSetLayoutHandleUp;
typedef de::SharedPtr<DescriptorSetLayoutHandleUp> DescriptorSetLayoutHandleSp;
typedef vk::Unique<vk::VkDescriptorUpdateTemplate> UpdateTemplateHandleUp;
typedef de::SharedPtr<UpdateTemplateHandleUp> UpdateTemplateHandleSp;
class BufferRenderInstance : public SingleCmdRenderInstance
{
public:
BufferRenderInstance (Context& context,
DescriptorUpdateMethod updateMethod,
bool isPrimaryCmdBuf,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
vk::VkShaderStageFlags stageFlags,
ShaderInputInterface shaderInterface,
bool viewOffset,
bool dynamicOffset,
bool dynamicOffsetNonZero);
static std::vector<deUint32> getViewOffsets (DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
bool setViewOffset);
static std::vector<deUint32> getDynamicOffsets (DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
bool dynamicOffsetNonZero);
static std::vector<BufferHandleSp> createSourceBuffers (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
const std::vector<deUint32>& viewOffset,
const std::vector<deUint32>& dynamicOffset,
std::vector<AllocationSp>& bufferMemory);
static vk::Move<vk::VkBuffer> createSourceBuffer (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator,
vk::VkDescriptorType descriptorType,
deUint32 setNdx,
deUint32 offset,
deUint32 bufferSize,
de::MovePtr<vk::Allocation>* outMemory);
static vk::Move<vk::VkDescriptorPool> createDescriptorPool (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface);
static std::vector<DescriptorSetLayoutHandleSp> createDescriptorSetLayouts (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
vk::VkShaderStageFlags stageFlags,
DescriptorUpdateMethod updateMethod);
static vk::Move<vk::VkPipelineLayout> createPipelineLayout (const vk::DeviceInterface& vki,
vk::VkDevice device,
const std::vector<DescriptorSetLayoutHandleSp>& descriptorSetLayout);
static std::vector<DescriptorSetHandleSp> createDescriptorSets (const vk::DeviceInterface& vki,
DescriptorUpdateMethod updateMethod,
vk::VkDevice device,
const std::vector<DescriptorSetLayoutHandleSp>& descriptorSetLayouts,
vk::VkDescriptorPool descriptorPool,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
const std::vector<BufferHandleSp>& buffers,
const std::vector<deUint32>& offsets,
vk::DescriptorSetUpdateBuilder& updateBuilder,
std::vector<deUint32>& descriptorsPerSet,
std::vector<UpdateTemplateHandleSp>& updateTemplates,
std::vector<RawUpdateRegistry>& updateRegistry,
vk::VkPipelineLayout pipelineLayout = DE_NULL);
static void writeDescriptorSet (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkDescriptorType descriptorType,
ShaderInputInterface shaderInterface,
vk::VkBuffer sourceBufferA,
const deUint32 viewOffsetA,
vk::VkBuffer sourceBufferB,
const deUint32 viewOffsetB,
vk::VkDescriptorSet descriptorSet,
vk::DescriptorSetUpdateBuilder& updateBuilder,
std::vector<deUint32>& descriptorsPerSet,
DescriptorUpdateMethod updateMethod = DESCRIPTOR_UPDATE_METHOD_NORMAL);
static void writeDescriptorSetWithTemplate (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkDescriptorSetLayout descriptorSetLayout,
deUint32 setNdx,
vk::VkDescriptorPool descriptorPool,
vk::VkDescriptorType descriptorType,
ShaderInputInterface shaderInterface,
vk::VkBuffer sourceBufferA,
const deUint32 viewOffsetA,
vk::VkBuffer sourceBufferB,
const deUint32 viewOffsetB,
vk::VkDescriptorSet descriptorSet,
std::vector<UpdateTemplateHandleSp>& updateTemplates,
std::vector<RawUpdateRegistry>& registry,
bool withPush = false,
vk::VkPipelineLayout pipelineLayout = 0);
void logTestPlan (void) const;
vk::VkPipelineLayout getPipelineLayout (void) const;
void writeDrawCmdBuffer (vk::VkCommandBuffer cmd) const;
tcu::TestStatus verifyResultImage (const tcu::ConstPixelBufferAccess& result) const;
enum
{
RENDER_SIZE = 128,
BUFFER_DATA_SIZE = 8 * sizeof(float),
BUFFER_SIZE_A = 2048, //!< a lot more than required
BUFFER_SIZE_B = 2560, //!< a lot more than required
BUFFER_SIZE_C = 2128, //!< a lot more than required
BUFFER_SIZE_D = 2136, //!< a lot more than required
STATIC_OFFSET_VALUE_A = 256,
DYNAMIC_OFFSET_VALUE_A = 512,
STATIC_OFFSET_VALUE_B = 1024,
DYNAMIC_OFFSET_VALUE_B = 768,
STATIC_OFFSET_VALUE_C = 512,
DYNAMIC_OFFSET_VALUE_C = 512,
STATIC_OFFSET_VALUE_D = 768,
DYNAMIC_OFFSET_VALUE_D = 1024,
};
const DescriptorUpdateMethod m_updateMethod;
const vk::VkDescriptorType m_descriptorType;
const DescriptorSetCount m_descriptorSetCount;
const ShaderInputInterface m_shaderInterface;
const bool m_setViewOffset;
const bool m_setDynamicOffset;
const bool m_dynamicOffsetNonZero;
const vk::VkShaderStageFlags m_stageFlags;
const std::vector<deUint32> m_viewOffset;
const std::vector<deUint32> m_dynamicOffset;
std::vector<AllocationSp> m_bufferMemory;
const std::vector<BufferHandleSp> m_sourceBuffer;
const vk::Unique<vk::VkDescriptorPool> m_descriptorPool;
std::vector<UpdateTemplateHandleSp> m_updateTemplates;
std::vector<RawUpdateRegistry> m_updateRegistry;
vk::DescriptorSetUpdateBuilder m_updateBuilder;
const std::vector<DescriptorSetLayoutHandleSp> m_descriptorSetLayouts;
const vk::Unique<vk::VkPipelineLayout> m_pipelineLayout;
std::vector<deUint32> m_descriptorsPerSet;
const std::vector<DescriptorSetHandleSp> m_descriptorSets;
};
BufferRenderInstance::BufferRenderInstance (Context& context,
DescriptorUpdateMethod updateMethod,
bool isPrimaryCmdBuf,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
vk::VkShaderStageFlags stageFlags,
ShaderInputInterface shaderInterface,
bool viewOffset,
bool dynamicOffset,
bool dynamicOffsetNonZero)
: SingleCmdRenderInstance (context, isPrimaryCmdBuf, tcu::UVec2(RENDER_SIZE, RENDER_SIZE))
, m_updateMethod (updateMethod)
, m_descriptorType (descriptorType)
, m_descriptorSetCount (descriptorSetCount)
, m_shaderInterface (shaderInterface)
, m_setViewOffset (viewOffset)
, m_setDynamicOffset (dynamicOffset)
, m_dynamicOffsetNonZero (dynamicOffsetNonZero)
, m_stageFlags (stageFlags)
, m_viewOffset (getViewOffsets(m_descriptorSetCount, m_shaderInterface, m_setViewOffset))
, m_dynamicOffset (getDynamicOffsets(m_descriptorSetCount, m_shaderInterface, m_dynamicOffsetNonZero))
, m_bufferMemory ()
, m_sourceBuffer (createSourceBuffers(m_vki, m_device, m_allocator, m_descriptorType, m_descriptorSetCount, m_shaderInterface, m_viewOffset, m_dynamicOffset, m_bufferMemory))
, m_descriptorPool (createDescriptorPool(m_vki, m_device, m_descriptorType, m_descriptorSetCount, m_shaderInterface))
, m_updateTemplates ()
, m_updateRegistry ()
, m_updateBuilder ()
, m_descriptorSetLayouts (createDescriptorSetLayouts(m_vki, m_device, m_descriptorType, m_descriptorSetCount, m_shaderInterface, m_stageFlags, m_updateMethod))
, m_pipelineLayout (createPipelineLayout(m_vki, m_device, m_descriptorSetLayouts))
, m_descriptorsPerSet ()
, m_descriptorSets (createDescriptorSets(m_vki, m_updateMethod, m_device, m_descriptorSetLayouts, *m_descriptorPool, m_descriptorType, m_descriptorSetCount, m_shaderInterface, m_sourceBuffer, m_viewOffset, m_updateBuilder, m_descriptorsPerSet, m_updateTemplates, m_updateRegistry, *m_pipelineLayout))
{
if (m_setDynamicOffset)
DE_ASSERT(isDynamicDescriptorType(m_descriptorType));
if (m_dynamicOffsetNonZero)
DE_ASSERT(m_setDynamicOffset);
}
std::vector<deUint32> BufferRenderInstance::getViewOffsets (DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
bool setViewOffset)
{
const int numBuffers = getDescriptorSetCount(descriptorSetCount) * getInterfaceNumResources(shaderInterface);
std::vector<deUint32> viewOffset;
for (int bufferNdx = 0; bufferNdx < numBuffers; bufferNdx++)
{
const deUint32 staticOffsetValues[] =
{
STATIC_OFFSET_VALUE_A,
STATIC_OFFSET_VALUE_B,
STATIC_OFFSET_VALUE_C,
STATIC_OFFSET_VALUE_D
};
viewOffset.push_back(setViewOffset ? (staticOffsetValues[bufferNdx % getInterfaceNumResources(shaderInterface)]) : (0u));
}
return viewOffset;
}
std::vector<deUint32> BufferRenderInstance::getDynamicOffsets (DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
bool dynamicOffsetNonZero)
{
const int numBuffers = getDescriptorSetCount(descriptorSetCount) * getInterfaceNumResources(shaderInterface);
std::vector<deUint32> dynamicOffset;
for (int bufferNdx = 0; bufferNdx < numBuffers; bufferNdx++)
{
const deUint32 dynamicOffsetValues[] =
{
DYNAMIC_OFFSET_VALUE_A,
DYNAMIC_OFFSET_VALUE_B,
DYNAMIC_OFFSET_VALUE_C,
DYNAMIC_OFFSET_VALUE_D
};
dynamicOffset.push_back(dynamicOffsetNonZero ? (dynamicOffsetValues[bufferNdx % getInterfaceNumResources(shaderInterface)]) : (0u));
}
return dynamicOffset;
}
std::vector<BufferHandleSp> BufferRenderInstance::createSourceBuffers (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
const std::vector<deUint32>& viewOffset,
const std::vector<deUint32>& dynamicOffset,
std::vector<AllocationSp>& bufferMemory)
{
const int numBuffers = getDescriptorSetCount(descriptorSetCount) * getInterfaceNumResources(shaderInterface);
std::vector<deUint32> effectiveOffset;
std::vector<deUint32> bufferSize;
std::vector<BufferHandleSp> sourceBuffers;
for (int bufferNdx = 0; bufferNdx < numBuffers; bufferNdx++)
{
const deUint32 bufferSizeValues[] =
{
BUFFER_SIZE_A,
BUFFER_SIZE_B,
BUFFER_SIZE_C,
BUFFER_SIZE_D
};
effectiveOffset.push_back(isDynamicDescriptorType(descriptorType) ? (viewOffset[bufferNdx] + dynamicOffset[bufferNdx]) : (viewOffset[bufferNdx]));
bufferSize.push_back(bufferSizeValues[bufferNdx % getInterfaceNumResources(shaderInterface)]);
}
// Create source buffers
for (deUint32 setNdx = 0; setNdx < getDescriptorSetCount(descriptorSetCount); setNdx++)
{
for (deUint32 bufferNdx = 0; bufferNdx < getInterfaceNumResources(shaderInterface); bufferNdx++)
{
de::MovePtr<vk::Allocation> memory;
vk::Move<vk::VkBuffer> buffer = createSourceBuffer(vki, device, allocator, descriptorType, setNdx, effectiveOffset[bufferNdx], bufferSize[bufferNdx], &memory);
bufferMemory.push_back(AllocationSp(memory.release()));
sourceBuffers.push_back(BufferHandleSp(new BufferHandleUp(buffer)));
}
}
return sourceBuffers;
}
vk::Move<vk::VkBuffer> BufferRenderInstance::createSourceBuffer (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator,
vk::VkDescriptorType descriptorType,
deUint32 setNdx,
deUint32 offset,
deUint32 bufferSize,
de::MovePtr<vk::Allocation>* outMemory)
{
static const float s_colors[] =
{
0.0f, 1.0f, 0.0f, 1.0f, // green
1.0f, 1.0f, 0.0f, 1.0f, // yellow
0.0f, 0.0f, 1.0f, 1.0f, // blue
1.0f, 0.0f, 0.0f, 1.0f // red
};
DE_STATIC_ASSERT(sizeof(s_colors) / 2 == BUFFER_DATA_SIZE);
DE_ASSERT(offset + BUFFER_DATA_SIZE <= bufferSize);
DE_ASSERT(offset % sizeof(float) == 0);
DE_ASSERT(bufferSize % sizeof(float) == 0);
const bool isUniformBuffer = isUniformDescriptorType(descriptorType);
const vk::VkBufferUsageFlags usageFlags = (isUniformBuffer) ? (vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : (vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
const float preGuardValue = 0.5f;
const float postGuardValue = 0.75f;
const vk::VkBufferCreateInfo bufferCreateInfo =
{
vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
DE_NULL,
0u, // flags
bufferSize, // size
usageFlags, // usage
vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
0u, // queueFamilyCount
DE_NULL, // pQueueFamilyIndices
};
vk::Move<vk::VkBuffer> buffer (vk::createBuffer(vki, device, &bufferCreateInfo));
de::MovePtr<vk::Allocation> bufferMemory = allocateAndBindObjectMemory(vki, device, allocator, *buffer, vk::MemoryRequirement::HostVisible);
void* const mapPtr = bufferMemory->getHostPtr();
// guard with interesting values
for (size_t preGuardOffset = 0; preGuardOffset + sizeof(float) <= (size_t)offset; preGuardOffset += sizeof(float))
deMemcpy((deUint8*)mapPtr + preGuardOffset, &preGuardValue, sizeof(float));
deMemcpy((deUint8*)mapPtr + offset, &s_colors[8 * (setNdx % 2)], sizeof(s_colors) / 2);
for (size_t postGuardOffset = (size_t)offset + sizeof(s_colors) / 2; postGuardOffset + sizeof(float) <= (size_t)bufferSize; postGuardOffset += sizeof(float))
deMemcpy((deUint8*)mapPtr + postGuardOffset, &postGuardValue, sizeof(float));
deMemset((deUint8*)mapPtr + offset + sizeof(s_colors) / 2, 0x5A, (size_t)bufferSize - (size_t)offset - sizeof(s_colors) / 2); // fill with interesting pattern that produces valid floats
flushAlloc(vki, device, *bufferMemory);
// Flushed host-visible memory is automatically made available to the GPU, no barrier is needed.
*outMemory = bufferMemory;
return buffer;
}
vk::Move<vk::VkDescriptorPool> BufferRenderInstance::createDescriptorPool (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface)
{
return vk::DescriptorPoolBuilder()
.addType(descriptorType, getDescriptorSetCount(descriptorSetCount) * getInterfaceNumResources(shaderInterface))
.build(vki, device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, getDescriptorSetCount(descriptorSetCount));
}
std::vector<DescriptorSetLayoutHandleSp> BufferRenderInstance::createDescriptorSetLayouts (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
vk::VkShaderStageFlags stageFlags,
DescriptorUpdateMethod updateMethod)
{
vk::VkDescriptorSetLayoutCreateFlags extraFlags = 0;
if (updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE ||
updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_PUSH)
{
extraFlags |= vk::VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
}
std::vector<DescriptorSetLayoutHandleSp> descriptorSetLayouts;
for (deUint32 setNdx = 0; setNdx < getDescriptorSetCount(descriptorSetCount); setNdx++)
{
vk::DescriptorSetLayoutBuilder builder;
switch (shaderInterface)
{
case SHADER_INPUT_SINGLE_DESCRIPTOR:
builder.addSingleBinding(descriptorType, stageFlags);
break;
case SHADER_INPUT_MULTIPLE_CONTIGUOUS_DESCRIPTORS:
builder.addSingleBinding(descriptorType, stageFlags);
builder.addSingleBinding(descriptorType, stageFlags);
break;
case SHADER_INPUT_MULTIPLE_DISCONTIGUOUS_DESCRIPTORS:
builder.addSingleIndexedBinding(descriptorType, stageFlags, 0u);
builder.addSingleIndexedBinding(descriptorType, stageFlags, 2u);
break;
case SHADER_INPUT_MULTIPLE_ARBITRARY_DESCRIPTORS:
builder.addSingleIndexedBinding(descriptorType, stageFlags, getArbitraryBindingIndex(0));
builder.addSingleIndexedBinding(descriptorType, stageFlags, getArbitraryBindingIndex(1));
break;
case SHADER_INPUT_DESCRIPTOR_ARRAY:
builder.addArrayBinding(descriptorType, 2u, stageFlags);
break;
default:
DE_FATAL("Impossible");
}
vk::Move<vk::VkDescriptorSetLayout> layout = builder.build(vki, device, extraFlags);
descriptorSetLayouts.push_back(DescriptorSetLayoutHandleSp(new DescriptorSetLayoutHandleUp(layout)));
// Add an empty descriptor set layout between sets 0 and 2
if (setNdx == 0 && descriptorSetCount == DESCRIPTOR_SET_COUNT_MULTIPLE_DISCONTIGUOUS)
{
vk::DescriptorSetLayoutBuilder emptyBuilder;
vk::Move<vk::VkDescriptorSetLayout> emptyLayout = emptyBuilder.build(vki, device, (vk::VkDescriptorSetLayoutCreateFlags)0);
descriptorSetLayouts.push_back(DescriptorSetLayoutHandleSp(new DescriptorSetLayoutHandleUp(emptyLayout)));
}
}
return descriptorSetLayouts;
}
vk::Move<vk::VkPipelineLayout> BufferRenderInstance::createPipelineLayout (const vk::DeviceInterface& vki,
vk::VkDevice device,
const std::vector<DescriptorSetLayoutHandleSp>& descriptorSetLayout)
{
std::vector<vk::VkDescriptorSetLayout> layoutHandles;
for (size_t setNdx = 0; setNdx < descriptorSetLayout.size(); setNdx++)
layoutHandles.push_back(**descriptorSetLayout[setNdx]);
const vk::VkPipelineLayoutCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
DE_NULL,
(vk::VkPipelineLayoutCreateFlags)0,
(deUint32)layoutHandles.size(), // descriptorSetCount
&layoutHandles.front(), // pSetLayouts
0u, // pushConstantRangeCount
DE_NULL, // pPushConstantRanges
};
return vk::createPipelineLayout(vki, device, &createInfo);
}
std::vector<DescriptorSetHandleSp> BufferRenderInstance::createDescriptorSets (const vk::DeviceInterface& vki,
DescriptorUpdateMethod updateMethod,
vk::VkDevice device,
const std::vector<DescriptorSetLayoutHandleSp>& descriptorSetLayouts,
vk::VkDescriptorPool descriptorPool,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
const std::vector<BufferHandleSp>& buffers,
const std::vector<deUint32>& offsets,
vk::DescriptorSetUpdateBuilder& updateBuilder,
std::vector<deUint32>& descriptorsPerSet,
std::vector<UpdateTemplateHandleSp>& updateTemplates,
std::vector<RawUpdateRegistry>& updateRegistry,
vk::VkPipelineLayout pipelineLayout)
{
std::vector<DescriptorSetHandleSp> descriptorSets;
for (deUint32 setNdx = 0; setNdx < getDescriptorSetCount(descriptorSetCount); setNdx++)
{
vk::VkDescriptorSetLayout layout = **descriptorSetLayouts[getDescriptorSetNdx(descriptorSetCount, setNdx)];
const vk::VkDescriptorSetAllocateInfo allocInfo =
{
vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
DE_NULL,
descriptorPool,
1u,
&layout
};
vk::VkBuffer bufferA = **buffers[(setNdx * getInterfaceNumResources(shaderInterface)) % buffers.size()];
vk::VkBuffer bufferB = **buffers[(setNdx * getInterfaceNumResources(shaderInterface) + 1) % buffers.size()];
deUint32 offsetA = offsets[(setNdx * getInterfaceNumResources(shaderInterface)) % offsets.size()];
deUint32 offsetB = offsets[(setNdx * getInterfaceNumResources(shaderInterface) + 1) % offsets.size()];
vk::Move<vk::VkDescriptorSet> descriptorSet;
if (updateMethod != DESCRIPTOR_UPDATE_METHOD_WITH_PUSH && updateMethod != DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE)
{
descriptorSet = allocateDescriptorSet(vki, device, &allocInfo);
}
else
{
descriptorSet = vk::Move<vk::VkDescriptorSet>();
}
if (updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_TEMPLATE)
{
writeDescriptorSetWithTemplate(vki, device, layout, setNdx, descriptorPool, descriptorType, shaderInterface, bufferA, offsetA, bufferB, offsetB, *descriptorSet, updateTemplates, updateRegistry);
}
else if (updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE)
{
writeDescriptorSetWithTemplate(vki, device, layout, setNdx, descriptorPool, descriptorType, shaderInterface, bufferA, offsetA, bufferB, offsetB, *descriptorSet, updateTemplates, updateRegistry, true, pipelineLayout);
}
else if (updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_PUSH)
{
writeDescriptorSet(vki, device, descriptorType, shaderInterface, bufferA, offsetA, bufferB, offsetB, *descriptorSet, updateBuilder, descriptorsPerSet, updateMethod);
}
else if (updateMethod == DESCRIPTOR_UPDATE_METHOD_NORMAL)
{
writeDescriptorSet(vki, device, descriptorType, shaderInterface, bufferA, offsetA, bufferB, offsetB, *descriptorSet, updateBuilder, descriptorsPerSet);
}
descriptorSets.push_back(DescriptorSetHandleSp(new DescriptorSetHandleUp(descriptorSet)));
}
return descriptorSets;
}
void BufferRenderInstance::writeDescriptorSet (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkDescriptorType descriptorType,
ShaderInputInterface shaderInterface,
vk::VkBuffer bufferA,
const deUint32 offsetA,
vk::VkBuffer bufferB,
const deUint32 offsetB,
vk::VkDescriptorSet descriptorSet,
vk::DescriptorSetUpdateBuilder& updateBuilder,
std::vector<deUint32>& descriptorsPerSet,
DescriptorUpdateMethod updateMethod)
{
const vk::VkDescriptorBufferInfo bufferInfos[2] =
{
vk::makeDescriptorBufferInfo(bufferA, (vk::VkDeviceSize)offsetA, (vk::VkDeviceSize)BUFFER_DATA_SIZE),
vk::makeDescriptorBufferInfo(bufferB, (vk::VkDeviceSize)offsetB, (vk::VkDeviceSize)BUFFER_DATA_SIZE),
};
deUint32 numDescriptors = 0u;
switch (shaderInterface)
{
case SHADER_INPUT_SINGLE_DESCRIPTOR:
updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, &bufferInfos[0]);
numDescriptors++;
break;
case SHADER_INPUT_MULTIPLE_CONTIGUOUS_DESCRIPTORS:
updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, &bufferInfos[0]);
updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1u), descriptorType, &bufferInfos[1]);
numDescriptors += 2;
break;
case SHADER_INPUT_MULTIPLE_DISCONTIGUOUS_DESCRIPTORS:
updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, &bufferInfos[0]);
updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2u), descriptorType, &bufferInfos[1]);
numDescriptors += 2;
break;
case SHADER_INPUT_MULTIPLE_ARBITRARY_DESCRIPTORS:
updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(getArbitraryBindingIndex(0)), descriptorType, &bufferInfos[0]);
updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(getArbitraryBindingIndex(1)), descriptorType, &bufferInfos[1]);
numDescriptors += 2;
break;
case SHADER_INPUT_DESCRIPTOR_ARRAY:
updateBuilder.writeArray(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0u), descriptorType, 2u, bufferInfos);
numDescriptors++;
break;
default:
DE_FATAL("Impossible");
}
descriptorsPerSet.push_back(numDescriptors);
if (updateMethod == DESCRIPTOR_UPDATE_METHOD_NORMAL)
{
updateBuilder.update(vki, device);
updateBuilder.clear();
}
}
void BufferRenderInstance::writeDescriptorSetWithTemplate (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkDescriptorSetLayout layout,
deUint32 setNdx,
vk::VkDescriptorPool descriptorPool,
vk::VkDescriptorType descriptorType,
ShaderInputInterface shaderInterface,
vk::VkBuffer bufferA,
const deUint32 offsetA,
vk::VkBuffer bufferB,
const deUint32 offsetB,
vk::VkDescriptorSet descriptorSet,
std::vector<UpdateTemplateHandleSp>& updateTemplates,
std::vector<RawUpdateRegistry>& registry,
bool withPush,
vk::VkPipelineLayout pipelineLayout)
{
DE_UNREF(descriptorPool);
const vk::VkDescriptorBufferInfo bufferInfos[2] =
{
vk::makeDescriptorBufferInfo(bufferA, (vk::VkDeviceSize)offsetA, (vk::VkDeviceSize)BUFFER_DATA_SIZE),
vk::makeDescriptorBufferInfo(bufferB, (vk::VkDeviceSize)offsetB, (vk::VkDeviceSize)BUFFER_DATA_SIZE),
};
std::vector<vk::VkDescriptorUpdateTemplateEntry> updateEntries;
vk::VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
{
vk::VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
DE_NULL,
0,
0, // descriptorUpdateEntryCount
DE_NULL, // pDescriptorUpdateEntries
withPush ? vk::VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR : vk::VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,
layout,
vk::VK_PIPELINE_BIND_POINT_GRAPHICS,
pipelineLayout,
setNdx
};
RawUpdateRegistry updateRegistry;
updateRegistry.addWriteObject(bufferInfos[0]);
updateRegistry.addWriteObject(bufferInfos[1]);
switch (shaderInterface)
{
case SHADER_INPUT_SINGLE_DESCRIPTOR:
updateEntries.push_back(createTemplateBinding(0u, 0, 1, descriptorType, updateRegistry.getWriteObjectOffset(0), 0));
break;
case SHADER_INPUT_MULTIPLE_CONTIGUOUS_DESCRIPTORS:
updateEntries.push_back(createTemplateBinding(0u, 0, 1, descriptorType, updateRegistry.getWriteObjectOffset(0), 0));
updateEntries.push_back(createTemplateBinding(1u, 0, 1, descriptorType, updateRegistry.getWriteObjectOffset(1), 0));
break;
case SHADER_INPUT_MULTIPLE_DISCONTIGUOUS_DESCRIPTORS:
updateEntries.push_back(createTemplateBinding(0u, 0, 1, descriptorType, updateRegistry.getWriteObjectOffset(0), 0));
updateEntries.push_back(createTemplateBinding(2u, 0, 1, descriptorType, updateRegistry.getWriteObjectOffset(1), 0));
break;
case SHADER_INPUT_MULTIPLE_ARBITRARY_DESCRIPTORS:
updateEntries.push_back(createTemplateBinding(getArbitraryBindingIndex(0), 0, 1, descriptorType, updateRegistry.getWriteObjectOffset(0), 0));
updateEntries.push_back(createTemplateBinding(getArbitraryBindingIndex(1), 0, 1, descriptorType, updateRegistry.getWriteObjectOffset(1), 0));
break;
case SHADER_INPUT_DESCRIPTOR_ARRAY:
updateEntries.push_back(createTemplateBinding(0u, 0, 2, descriptorType, updateRegistry.getWriteObjectOffset(0), sizeof(bufferInfos[0])));
break;
default:
DE_FATAL("Impossible");
}
templateCreateInfo.pDescriptorUpdateEntries = &updateEntries[0];
templateCreateInfo.descriptorUpdateEntryCount = (deUint32)updateEntries.size();
vk::Move<vk::VkDescriptorUpdateTemplate> updateTemplate = vk::createDescriptorUpdateTemplate(vki, device, &templateCreateInfo);
updateTemplates.push_back(UpdateTemplateHandleSp(new UpdateTemplateHandleUp(updateTemplate)));
registry.push_back(updateRegistry);
if (!withPush)
{
vki.updateDescriptorSetWithTemplate(device, descriptorSet, **updateTemplates.back(), registry.back().getRawPointer());
}
}
void BufferRenderInstance::logTestPlan (void) const
{
std::ostringstream msg;
msg << "Rendering 2x2 yellow-green grid.\n"
<< ((m_descriptorSetCount == DESCRIPTOR_SET_COUNT_SINGLE) ? "Single descriptor set. " : "Multiple descriptor sets. ")
<< "Each descriptor set contains "
<< ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :
(m_shaderInterface == SHADER_INPUT_MULTIPLE_CONTIGUOUS_DESCRIPTORS) ? "two" :
(m_shaderInterface == SHADER_INPUT_MULTIPLE_DISCONTIGUOUS_DESCRIPTORS) ? "two" :
(m_shaderInterface == SHADER_INPUT_MULTIPLE_ARBITRARY_DESCRIPTORS) ? "two" :
(m_shaderInterface == SHADER_INPUT_DESCRIPTOR_ARRAY) ? "an array (size 2) of" :
(const char*)DE_NULL)
<< " descriptor(s) of type " << vk::getDescriptorTypeName(m_descriptorType) << "\n"
<< "Buffer view(s) have " << ((m_setViewOffset) ? ("non-") : ("")) << "zero offset.\n";
if (isDynamicDescriptorType(m_descriptorType))
{
if (m_setDynamicOffset)
{
msg << "Source buffer(s) are given a dynamic offset at bind time.\n"
<< "The supplied dynamic offset is " << ((m_dynamicOffsetNonZero) ? ("non-") : ("")) << "zero.\n";
}
else
{
msg << "Dynamic offset is not supplied at bind time. Expecting bind to offset 0.\n";
}
}
if (m_stageFlags == 0u)
{
msg << "Descriptors are not accessed in any shader stage.\n";
}
else
{
msg << "Descriptors are accessed in {"
<< (((m_stageFlags & vk::VK_SHADER_STAGE_VERTEX_BIT) != 0) ? (" vertex") : (""))
<< (((m_stageFlags & vk::VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0) ? (" tess_control") : (""))
<< (((m_stageFlags & vk::VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0) ? (" tess_evaluation") : (""))
<< (((m_stageFlags & vk::VK_SHADER_STAGE_GEOMETRY_BIT) != 0) ? (" geometry") : (""))
<< (((m_stageFlags & vk::VK_SHADER_STAGE_FRAGMENT_BIT) != 0) ? (" fragment") : (""))
<< " } stages.\n";
}
m_context.getTestContext().getLog()
<< tcu::TestLog::Message
<< msg.str()
<< tcu::TestLog::EndMessage;
}
vk::VkPipelineLayout BufferRenderInstance::getPipelineLayout (void) const
{
return *m_pipelineLayout;
}
void BufferRenderInstance::writeDrawCmdBuffer (vk::VkCommandBuffer cmd) const
{
if (m_updateMethod != DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE && m_updateMethod != DESCRIPTOR_UPDATE_METHOD_WITH_PUSH)
{
std::vector<vk::VkDescriptorSet> sets;
for (deUint32 setNdx = 0; setNdx < getDescriptorSetCount(m_descriptorSetCount); setNdx++)
sets.push_back(**m_descriptorSets[setNdx]);
switch (m_descriptorSetCount)
{
case DESCRIPTOR_SET_COUNT_SINGLE:
case DESCRIPTOR_SET_COUNT_MULTIPLE:
{
// \note dynamic offset replaces the view offset, i.e. it is not offset relative to the view offset
const deUint32 numOffsets = (!m_setDynamicOffset) ? (0u) : ((deUint32)m_dynamicOffset.size());
const deUint32* const dynamicOffsetPtr = (!m_setDynamicOffset) ? (DE_NULL) : (&m_dynamicOffset.front());
m_vki.cmdBindDescriptorSets(cmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, getPipelineLayout(), 0, (int)sets.size(), &sets.front(), numOffsets, dynamicOffsetPtr);
break;
}
case DESCRIPTOR_SET_COUNT_MULTIPLE_DISCONTIGUOUS:
{
deUint32 dynamicOffsetNdx = 0u;
for (deUint32 setNdx = 0; setNdx < getDescriptorSetCount(m_descriptorSetCount); setNdx++)
{
// \note dynamic offset replaces the view offset, i.e. it is not offset relative to the view offset
const deUint32 numOffsets = (!m_setDynamicOffset) ? (0u) : (getInterfaceNumResources(m_shaderInterface));
const deUint32* const dynamicOffsetPtr = (!m_setDynamicOffset) ? (DE_NULL) : (&m_dynamicOffset[dynamicOffsetNdx]);
const deUint32 descriptorSetNdx = getDescriptorSetNdx(m_descriptorSetCount, setNdx);
m_vki.cmdBindDescriptorSets(cmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, getPipelineLayout(), descriptorSetNdx, 1, &sets[setNdx], numOffsets, dynamicOffsetPtr);
dynamicOffsetNdx += getInterfaceNumResources(m_shaderInterface);
}
break;
}
default:
DE_FATAL("Impossible");
}
}
else if (m_updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE)
{
for (deUint32 setNdx = 0; setNdx < getDescriptorSetCount(m_descriptorSetCount); setNdx++)
m_vki.cmdPushDescriptorSetWithTemplateKHR(cmd, **m_updateTemplates[setNdx], getPipelineLayout(), setNdx, (const void*)m_updateRegistry[setNdx].getRawPointer());
}
else if (m_updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_PUSH)
{
deUint32 descriptorNdx = 0u;
for (deUint32 setNdx = 0; setNdx < getDescriptorSetCount(m_descriptorSetCount); setNdx++)
{
const deUint32 numDescriptors = m_descriptorsPerSet[setNdx];
m_updateBuilder.updateWithPush(m_vki, cmd, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, setNdx, descriptorNdx, numDescriptors);
descriptorNdx += numDescriptors;
}
}
m_vki.cmdDraw(cmd, 6 * 4, 1, 0, 0); // render four quads (two separate triangles)
}
tcu::TestStatus BufferRenderInstance::verifyResultImage (const tcu::ConstPixelBufferAccess& result) const
{
const deUint32 numDescriptorSets = getDescriptorSetCount(m_descriptorSetCount);
const tcu::Vec4 green (0.0f, 1.0f, 0.0f, 1.0f);
const tcu::Vec4 yellow (1.0f, 1.0f, 0.0f, 1.0f);
tcu::Surface reference (m_targetSize.x(), m_targetSize.y());
tcu::Vec4 sample0 = tcu::Vec4(0.0f);
tcu::Vec4 sample1 = tcu::Vec4(0.0f);
if (m_stageFlags)
{
const tcu::Vec4 colors[] =
{
tcu::Vec4(0.0f, 1.0f, 0.0f, 1.0f), // green
tcu::Vec4(1.0f, 1.0f, 0.0f, 1.0f), // yellow
tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f), // blue
tcu::Vec4(1.0f, 0.0f, 0.0f, 1.0f), // red
};
for (deUint32 setNdx = 0; setNdx < numDescriptorSets; setNdx++)
{
sample0 += colors[2 * (setNdx % 2)];
sample1 += colors[2 * (setNdx % 2) + 1];
}
if (numDescriptorSets > 1)
{
sample0 = sample0 / tcu::Vec4(float(numDescriptorSets));
sample1 = sample1 / tcu::Vec4(float(numDescriptorSets));
}
}
else
{
sample0 = green;
sample1 = yellow;
}
drawQuadrantReferenceResult(reference.getAccess(), sample1, sample0, sample0, sample1);
if (!bilinearCompare(m_context.getTestContext().getLog(), "Compare", "Result comparison", reference.getAccess(), result, tcu::RGBA(1, 1, 1, 1), tcu::COMPARE_LOG_RESULT))
return tcu::TestStatus::fail("Image verification failed");
else
return tcu::TestStatus::pass("Pass");
}
class ComputeInstanceResultBuffer
{
public:
enum
{
DATA_SIZE = sizeof(tcu::Vec4[4])
};
ComputeInstanceResultBuffer (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator);
void readResultContentsTo (tcu::Vec4 (*results)[4]) const;
inline vk::VkBuffer getBuffer (void) const { return *m_buffer; }
inline const vk::VkBufferMemoryBarrier* getResultReadBarrier (void) const { return &m_bufferBarrier; }
private:
static vk::Move<vk::VkBuffer> createResultBuffer (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator,
de::MovePtr<vk::Allocation>* outAllocation);
static vk::VkBufferMemoryBarrier createResultBufferBarrier (vk::VkBuffer buffer);
const vk::DeviceInterface& m_vki;
const vk::VkDevice m_device;
de::MovePtr<vk::Allocation> m_bufferMem;
const vk::Unique<vk::VkBuffer> m_buffer;
const vk::VkBufferMemoryBarrier m_bufferBarrier;
};
ComputeInstanceResultBuffer::ComputeInstanceResultBuffer (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator)
: m_vki (vki)
, m_device (device)
, m_bufferMem (DE_NULL)
, m_buffer (createResultBuffer(m_vki, m_device, allocator, &m_bufferMem))
, m_bufferBarrier (createResultBufferBarrier(*m_buffer))
{
}
void ComputeInstanceResultBuffer::readResultContentsTo (tcu::Vec4 (*results)[4]) const
{
invalidateAlloc(m_vki, m_device, *m_bufferMem);
deMemcpy(*results, m_bufferMem->getHostPtr(), sizeof(*results));
}
vk::Move<vk::VkBuffer> ComputeInstanceResultBuffer::createResultBuffer (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::Allocator& allocator,
de::MovePtr<vk::Allocation>* outAllocation)
{
const vk::VkBufferCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
DE_NULL,
0u, // flags
(vk::VkDeviceSize)DATA_SIZE, // size
vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, // usage
vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
0u, // queueFamilyCount
DE_NULL, // pQueueFamilyIndices
};
vk::Move<vk::VkBuffer> buffer (vk::createBuffer(vki, device, &createInfo));
de::MovePtr<vk::Allocation> allocation (allocateAndBindObjectMemory(vki, device, allocator, *buffer, vk::MemoryRequirement::HostVisible));
const float clearValue = -1.0f;
void* mapPtr = allocation->getHostPtr();
for (size_t offset = 0; offset < DATA_SIZE; offset += sizeof(float))
deMemcpy(((deUint8*)mapPtr) + offset, &clearValue, sizeof(float));
flushAlloc(vki, device, *allocation);
*outAllocation = allocation;
return buffer;
}
vk::VkBufferMemoryBarrier ComputeInstanceResultBuffer::createResultBufferBarrier (vk::VkBuffer buffer)
{
const vk::VkBufferMemoryBarrier bufferBarrier =
{
vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
DE_NULL,
vk::VK_ACCESS_SHADER_WRITE_BIT, // srcAccessMask
vk::VK_ACCESS_HOST_READ_BIT, // dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // destQueueFamilyIndex
buffer, // buffer
(vk::VkDeviceSize)0u, // offset
DATA_SIZE, // size
};
return bufferBarrier;
}
class ComputePipeline
{
public:
ComputePipeline (const vk::DeviceInterface& vki,
vk::VkDevice device,
const vk::BinaryCollection& programCollection,
deUint32 numDescriptorSets,
const vk::VkDescriptorSetLayout* descriptorSetLayouts);
inline vk::VkPipeline getPipeline (void) const { return *m_pipeline; };
inline vk::VkPipelineLayout getPipelineLayout (void) const { return *m_pipelineLayout; };
private:
static vk::Move<vk::VkPipelineLayout> createPipelineLayout (const vk::DeviceInterface& vki,
vk::VkDevice device,
deUint32 numDescriptorSets,
const vk::VkDescriptorSetLayout* descriptorSetLayouts);
static vk::Move<vk::VkPipeline> createPipeline (const vk::DeviceInterface& vki,
vk::VkDevice device,
const vk::BinaryCollection& programCollection,
vk::VkPipelineLayout layout);
const vk::Unique<vk::VkPipelineLayout> m_pipelineLayout;
const vk::Unique<vk::VkPipeline> m_pipeline;
};
ComputePipeline::ComputePipeline (const vk::DeviceInterface& vki,
vk::VkDevice device,
const vk::BinaryCollection& programCollection,
deUint32 numDescriptorSets,
const vk::VkDescriptorSetLayout* descriptorSetLayouts)
: m_pipelineLayout (createPipelineLayout(vki, device, numDescriptorSets, descriptorSetLayouts))
, m_pipeline (createPipeline(vki, device, programCollection, *m_pipelineLayout))
{
}
vk::Move<vk::VkPipelineLayout> ComputePipeline::createPipelineLayout (const vk::DeviceInterface& vki,
vk::VkDevice device,
deUint32 numDescriptorSets,
const vk::VkDescriptorSetLayout* descriptorSetLayouts)
{
const vk::VkPipelineLayoutCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
DE_NULL,
(vk::VkPipelineLayoutCreateFlags)0,
numDescriptorSets, // descriptorSetCount
descriptorSetLayouts, // pSetLayouts
0u, // pushConstantRangeCount
DE_NULL, // pPushConstantRanges
};
return vk::createPipelineLayout(vki, device, &createInfo);
}
vk::Move<vk::VkPipeline> ComputePipeline::createPipeline (const vk::DeviceInterface& vki,
vk::VkDevice device,
const vk::BinaryCollection& programCollection,
vk::VkPipelineLayout layout)
{
const vk::Unique<vk::VkShaderModule> computeModule (vk::createShaderModule(vki, device, programCollection.get("compute"), (vk::VkShaderModuleCreateFlags)0u));
const vk::VkPipelineShaderStageCreateInfo cs =
{
vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
DE_NULL,
(vk::VkPipelineShaderStageCreateFlags)0,
vk::VK_SHADER_STAGE_COMPUTE_BIT, // stage
*computeModule, // shader
"main",
DE_NULL, // pSpecializationInfo
};
const vk::VkComputePipelineCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
DE_NULL,
0u, // flags
cs, // cs
layout, // layout
(vk::VkPipeline)0, // basePipelineHandle
0u, // basePipelineIndex
};
return createComputePipeline(vki, device, (vk::VkPipelineCache)0u, &createInfo);
}
class ComputeCommand
{
public:
ComputeCommand (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkPipeline pipeline,
vk::VkPipelineLayout pipelineLayout,
const tcu::UVec3& numWorkGroups,
ShaderInputInterface shaderInterface,
DescriptorSetCount descriptorSetCount,
const vk::VkDescriptorSet* descriptorSets,
int numDynamicOffsets,
const deUint32* dynamicOffsets,
int numPreBarriers,
const vk::VkBufferMemoryBarrier* preBarriers,
int numPostBarriers,
const vk::VkBufferMemoryBarrier* postBarriers);
void submitAndWait (deUint32 queueFamilyIndex, vk::VkQueue queue, std::vector<UpdateTemplateHandleSp>* updateTemplates = DE_NULL, std::vector<RawUpdateRegistry>* updateRegistry = DE_NULL) const;
void submitAndWait (deUint32 queueFamilyIndex, vk::VkQueue queue, vk::DescriptorSetUpdateBuilder& updateBuilder, std::vector<deUint32>& descriptorsPerSet) const;
private:
const vk::DeviceInterface& m_vki;
const vk::VkDevice m_device;
const vk::VkPipeline m_pipeline;
const vk::VkPipelineLayout m_pipelineLayout;
const tcu::UVec3 m_numWorkGroups;
const ShaderInputInterface m_shaderInterface;
const DescriptorSetCount m_descriptorSetCount;
const vk::VkDescriptorSet* const m_descriptorSets;
const int m_numDynamicOffsets;
const deUint32* const m_dynamicOffsets;
const int m_numPreBarriers;
const vk::VkBufferMemoryBarrier* const m_preBarriers;
const int m_numPostBarriers;
const vk::VkBufferMemoryBarrier* const m_postBarriers;
};
ComputeCommand::ComputeCommand (const vk::DeviceInterface& vki,
vk::VkDevice device,
vk::VkPipeline pipeline,
vk::VkPipelineLayout pipelineLayout,
const tcu::UVec3& numWorkGroups,
ShaderInputInterface shaderInterface,
DescriptorSetCount descriptorSetCount,
const vk::VkDescriptorSet* descriptorSets,
int numDynamicOffsets,
const deUint32* dynamicOffsets,
int numPreBarriers,
const vk::VkBufferMemoryBarrier* preBarriers,
int numPostBarriers,
const vk::VkBufferMemoryBarrier* postBarriers)
: m_vki (vki)
, m_device (device)
, m_pipeline (pipeline)
, m_pipelineLayout (pipelineLayout)
, m_numWorkGroups (numWorkGroups)
, m_shaderInterface (shaderInterface)
, m_descriptorSetCount (descriptorSetCount)
, m_descriptorSets (descriptorSets)
, m_numDynamicOffsets (numDynamicOffsets)
, m_dynamicOffsets (dynamicOffsets)
, m_numPreBarriers (numPreBarriers)
, m_preBarriers (preBarriers)
, m_numPostBarriers (numPostBarriers)
, m_postBarriers (postBarriers)
{
}
void ComputeCommand::submitAndWait (deUint32 queueFamilyIndex, vk::VkQueue queue, std::vector<UpdateTemplateHandleSp>* updateTemplates, std::vector<RawUpdateRegistry>* updateRegistry) const
{
const vk::VkCommandPoolCreateInfo cmdPoolCreateInfo =
{
vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
DE_NULL,
vk::VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // flags
queueFamilyIndex, // queueFamilyIndex
};
const vk::Unique<vk::VkCommandPool> cmdPool (vk::createCommandPool(m_vki, m_device, &cmdPoolCreateInfo));
const vk::VkCommandBufferAllocateInfo cmdBufCreateInfo =
{
vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
DE_NULL,
*cmdPool, // cmdPool
vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1u, // count
};
const vk::Unique<vk::VkCommandBuffer> cmd (vk::allocateCommandBuffer(m_vki, m_device, &cmdBufCreateInfo));
beginCommandBuffer(m_vki, *cmd);
m_vki.cmdBindPipeline(*cmd, vk::VK_PIPELINE_BIND_POINT_COMPUTE, m_pipeline);
// normal update
if (updateTemplates == DE_NULL)
{
switch (m_descriptorSetCount)
{
case DESCRIPTOR_SET_COUNT_SINGLE:
case DESCRIPTOR_SET_COUNT_MULTIPLE:
{
m_vki.cmdBindDescriptorSets(*cmd, vk::VK_PIPELINE_BIND_POINT_COMPUTE, m_pipelineLayout, 0, getDescriptorSetCount(m_descriptorSetCount), m_descriptorSets, m_numDynamicOffsets, m_dynamicOffsets);
break;
}
case DESCRIPTOR_SET_COUNT_MULTIPLE_DISCONTIGUOUS:
{
deUint32 dynamicOffsetNdx = 0u;
for (deUint32 setNdx = 0; setNdx < getDescriptorSetCount(m_descriptorSetCount); setNdx++)
{
// \note dynamic offset replaces the view offset, i.e. it is not offset relative to the view offset
const deUint32 numOffsets = (!m_numDynamicOffsets) ? (0u) : (getInterfaceNumResources(m_shaderInterface));
const deUint32* const dynamicOffsetPtr = (!m_numDynamicOffsets) ? (DE_NULL) : (&m_dynamicOffsets[dynamicOffsetNdx]);
const deUint32 descriptorSetNdx = getDescriptorSetNdx(m_descriptorSetCount, setNdx);
m_vki.cmdBindDescriptorSets(*cmd, vk::VK_PIPELINE_BIND_POINT_COMPUTE, m_pipelineLayout, descriptorSetNdx, 1u, &m_descriptorSets[setNdx], numOffsets, dynamicOffsetPtr);
dynamicOffsetNdx += getInterfaceNumResources(m_shaderInterface);
}
break;
}
default:
DE_FATAL("Impossible");
}
}
// update with push template
else
{
for (deUint32 setNdx = 0; setNdx < (deUint32)(*updateTemplates).size(); setNdx++)
m_vki.cmdPushDescriptorSetWithTemplateKHR(*cmd, **(*updateTemplates)[setNdx], m_pipelineLayout, getDescriptorSetNdx(m_descriptorSetCount, setNdx), (const void*)(*updateRegistry)[setNdx].getRawPointer());
}
if (m_numPreBarriers)
m_vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (vk::VkDependencyFlags)0,
0, (const vk::VkMemoryBarrier*)DE_NULL,
m_numPreBarriers, m_preBarriers,
0, (const vk::VkImageMemoryBarrier*)DE_NULL);
m_vki.cmdDispatch(*cmd, m_numWorkGroups.x(), m_numWorkGroups.y(), m_numWorkGroups.z());
m_vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0,
0, (const vk::VkMemoryBarrier*)DE_NULL,
m_numPostBarriers, m_postBarriers,
0, (const vk::VkImageMemoryBarrier*)DE_NULL);
endCommandBuffer(m_vki, *cmd);
submitCommandsAndWait(m_vki, m_device, queue, cmd.get());
}
//cmdPushDescriptorSet variant
void ComputeCommand::submitAndWait (deUint32 queueFamilyIndex, vk::VkQueue queue, vk::DescriptorSetUpdateBuilder& updateBuilder, std::vector<deUint32>& descriptorsPerSet) const
{
const vk::VkCommandPoolCreateInfo cmdPoolCreateInfo =
{
vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
DE_NULL,
vk::VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // flags
queueFamilyIndex, // queueFamilyIndex
};
const vk::Unique<vk::VkCommandPool> cmdPool (vk::createCommandPool(m_vki, m_device, &cmdPoolCreateInfo));
const vk::Unique<vk::VkCommandBuffer> cmd (vk::allocateCommandBuffer(m_vki, m_device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
beginCommandBuffer(m_vki, *cmd);
m_vki.cmdBindPipeline(*cmd, vk::VK_PIPELINE_BIND_POINT_COMPUTE, m_pipeline);
{
deUint32 descriptorNdx = 0u;
for (deUint32 setNdx = 0; setNdx < (deUint32)descriptorsPerSet.size(); setNdx++)
{
const deUint32 numDescriptors = descriptorsPerSet[setNdx];
updateBuilder.updateWithPush(m_vki, *cmd, vk::VK_PIPELINE_BIND_POINT_COMPUTE, m_pipelineLayout, getDescriptorSetNdx(m_descriptorSetCount, setNdx), descriptorNdx, numDescriptors);
descriptorNdx += numDescriptors;
}
}
if (m_numPreBarriers)
m_vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (vk::VkDependencyFlags)0,
0, (const vk::VkMemoryBarrier*)DE_NULL,
m_numPreBarriers, m_preBarriers,
0, (const vk::VkImageMemoryBarrier*)DE_NULL);
m_vki.cmdDispatch(*cmd, m_numWorkGroups.x(), m_numWorkGroups.y(), m_numWorkGroups.z());
m_vki.cmdPipelineBarrier(*cmd, vk::VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0,
0, (const vk::VkMemoryBarrier*)DE_NULL,
m_numPostBarriers, m_postBarriers,
0, (const vk::VkImageMemoryBarrier*)DE_NULL);
endCommandBuffer(m_vki, *cmd);
submitCommandsAndWait(m_vki, m_device, queue, cmd.get());
}
class BufferComputeInstance : public vkt::TestInstance
{
public:
BufferComputeInstance (Context& context,
DescriptorUpdateMethod updateMethod,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
bool viewOffset,
bool dynamicOffset,
bool dynamicOffsetNonZero);
private:
vk::Move<vk::VkBuffer> createColorDataBuffer (deUint32 offset, deUint32 bufferSize, const tcu::Vec4& value1, const tcu::Vec4& value2, de::MovePtr<vk::Allocation>* outAllocation);
vk::Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (deUint32 setNdx) const;
vk::Move<vk::VkDescriptorPool> createDescriptorPool (void) const;
vk::Move<vk::VkDescriptorSet> createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout, deUint32 setNdx, vk::VkBuffer viewA, deUint32 offsetA, vk::VkBuffer viewB, deUint32 offsetB, vk::VkBuffer resBuf);
void writeDescriptorSet (vk::VkDescriptorSet descriptorSet, deUint32 setNdx, vk::VkBuffer viewA, deUint32 offsetA, vk::VkBuffer viewB, deUint32 offsetB, vk::VkBuffer resBuf);
void writeDescriptorSetWithTemplate (vk::VkDescriptorSet descriptorSet, vk::VkDescriptorSetLayout layout, deUint32 setNdx, vk::VkBuffer viewA, deUint32 offsetA, vk::VkBuffer viewB, deUint32 offsetB, vk::VkBuffer resBuf, bool withPush = false, vk::VkPipelineLayout pipelineLayout = DE_NULL);
tcu::TestStatus iterate (void);
void logTestPlan (void) const;
tcu::TestStatus testResourceAccess (void);
enum
{
STATIC_OFFSET_VALUE_A = 256,
DYNAMIC_OFFSET_VALUE_A = 512,
STATIC_OFFSET_VALUE_B = 1024,
DYNAMIC_OFFSET_VALUE_B = 768,
};
const DescriptorUpdateMethod m_updateMethod;
const vk::VkDescriptorType m_descriptorType;
const DescriptorSetCount m_descriptorSetCount;
const ShaderInputInterface m_shaderInterface;
const bool m_setViewOffset;
const bool m_setDynamicOffset;
const bool m_dynamicOffsetNonZero;
std::vector<UpdateTemplateHandleSp> m_updateTemplates;
const vk::DeviceInterface& m_vki;
const vk::VkDevice m_device;
const vk::VkQueue m_queue;
const deUint32 m_queueFamilyIndex;
vk::Allocator& m_allocator;
const ComputeInstanceResultBuffer m_result;
std::vector<RawUpdateRegistry> m_updateRegistry;
vk::DescriptorSetUpdateBuilder m_updateBuilder;
std::vector<deUint32> m_descriptorsPerSet;
};
BufferComputeInstance::BufferComputeInstance (Context& context,
DescriptorUpdateMethod updateMethod,
vk::VkDescriptorType descriptorType,
DescriptorSetCount descriptorSetCount,
ShaderInputInterface shaderInterface,
bool viewOffset,
bool dynamicOffset,
bool dynamicOffsetNonZero)
: vkt::TestInstance (context)
, m_updateMethod (updateMethod)
, m_descriptorType (descriptorType)
, m_descriptorSetCount (descriptorSetCount)
, m_shaderInterface (shaderInterface)
, m_setViewOffset (viewOffset)
, m_setDynamicOffset (dynamicOffset)
, m_dynamicOffsetNonZero (dynamicOffsetNonZero)
, m_updateTemplates ()
, m_vki (context.getDeviceInterface())
, m_device (context.getDevice())
, m_queue (context.getUniversalQueue())
, m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
, m_allocator (context.getDefaultAllocator())
, m_result (m_vki, m_device, m_allocator)
, m_updateRegistry ()
, m_updateBuilder ()
, m_descriptorsPerSet ()
{
if (m_dynamicOffsetNonZero)
DE_ASSERT(m_setDynamicOffset);
}
vk::Move<vk::VkBuffer> BufferComputeInstance::createColorDataBuffer (deUint32 offset, deUint32 bufferSize, const tcu::Vec4& value1, const tcu::Vec4& value2, de::MovePtr<vk::Allocation>* outAllocation)
{
DE_ASSERT(offset + sizeof(tcu::Vec4[2]) <= bufferSize);
const bool isUniformBuffer = isUniformDescriptorType(m_descriptorType);
const vk::VkBufferUsageFlags usageFlags = (isUniformBuffer) ? (vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : (vk::VK_BUFFER_USAGE_STORAGE_BUFFER_BIT);
const vk::VkBufferCreateInfo createInfo =
{
vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
DE_NULL,
0u, // flags
(vk::VkDeviceSize)bufferSize, // size
usageFlags, // usage
vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
0u, // queueFamilyCount
DE_NULL, // pQueueFamilyIndices
};
vk::Move<vk::VkBuffer> buffer (vk::createBuffer(m_vki, m_device, &createInfo));
de::MovePtr<vk::Allocation> allocation (allocateAndBindObjectMemory(m_vki, m_device, m_allocator, *buffer, vk::MemoryRequirement::HostVisible));
void* mapPtr = allocation->getHostPtr();
if (offset)
deMemset(mapPtr, 0x5A, (size_t)offset);
deMemcpy((deUint8*)mapPtr + offset, value1.getPtr(), sizeof(tcu::Vec4));
deMemcpy((deUint8*)mapPtr + offset + sizeof(tcu::Vec4), value2.getPtr(), sizeof(tcu::Vec4));
deMemset((deUint8*)mapPtr + offset + 2 * sizeof(tcu::Vec4), 0x5A, (size_t)bufferSize - (size_t)offset - 2 * sizeof(tcu::Vec4));
flushAlloc(m_vki, m_device, *allocation);
*outAllocation = allocation;
return buffer;
}
vk::Move<vk::VkDescriptorSetLayout> BufferComputeInstance::createDescriptorSetLayout (deUint32 setNdx) const
{
vk::DescriptorSetLayoutBuilder builder;
vk::VkDescriptorSetLayoutCreateFlags extraFlags = 0;
deUint32 binding = 0;
if (m_updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE ||
m_updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_PUSH)
{
extraFlags |= vk::VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
}
if (setNdx == 0)
builder.addSingleIndexedBinding(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, vk::VK_SHADER_STAGE_COMPUTE_BIT, binding++);
switch (m_shaderInterface)
{
case SHADER_INPUT_SINGLE_DESCRIPTOR:
builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
break;
case SHADER_INPUT_MULTIPLE_CONTIGUOUS_DESCRIPTORS:
builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
builder.addSingleBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT);
break;
case SHADER_INPUT_MULTIPLE_DISCONTIGUOUS_DESCRIPTORS:
builder.addSingleIndexedBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT, binding + 0u);
builder.addSingleIndexedBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT, binding + 2u);
break;
case SHADER_INPUT_MULTIPLE_ARBITRARY_DESCRIPTORS:
builder.addSingleIndexedBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT, getArbitraryBindingIndex(0));
builder.addSingleIndexedBinding(m_descriptorType, vk::VK_SHADER_STAGE_COMPUTE_BIT, getArbitraryBindingIndex(1));
break;
case SHADER_INPUT_DESCRIPTOR_ARRAY:
builder.addArrayBinding(m_descriptorType, 2u, vk::VK_SHADER_STAGE_COMPUTE_BIT);
break;
default:
DE_FATAL("Impossible");
};
return builder.build(m_vki, m_device, extraFlags);
}
vk::Move<vk::VkDescriptorPool> BufferComputeInstance::createDescriptorPool (void) const
{
return vk::DescriptorPoolBuilder()
.addType(vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
.addType(m_descriptorType, getDescriptorSetCount(m_descriptorSetCount) * getInterfaceNumResources(m_shaderInterface))
.build(m_vki, m_device, vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, getDescriptorSetCount(m_descriptorSetCount));
}
vk::Move<vk::VkDescriptorSet> BufferComputeInstance::createDescriptorSet (vk::VkDescriptorPool pool, vk::VkDescriptorSetLayout layout, deUint32 setNdx, vk::VkBuffer viewA, deUint32 offsetA, vk::VkBuffer viewB, deUint32 offsetB, vk::VkBuffer resBuf)
{
const vk::VkDescriptorSetAllocateInfo allocInfo =
{
vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
DE_NULL,
pool,
1u,
&layout
};
vk::Move<vk::VkDescriptorSet> descriptorSet;
if (m_updateMethod != DESCRIPTOR_UPDATE_METHOD_WITH_PUSH && m_updateMethod != DESCRIPTOR_UPDATE_METHOD_WITH_PUSH_TEMPLATE)
{
descriptorSet = allocateDescriptorSet(m_vki, m_device, &allocInfo);
}
else
{
descriptorSet = vk::Move<vk::VkDescriptorSet>();
}
if (m_updateMethod == DESCRIPTOR_UPDATE_METHOD_WITH_TEMPLATE)
{
writeDescriptorSetWithTemplate(*descriptorSet, layout, setNdx, viewA, offsetA, viewB, offsetB, resBuf);
}
else if (m_updateMethod == DESCRIPTOR_UPDATE_METHOD_NORMAL)
{
writeDescriptorSet(*descriptorSet, setNdx, viewA, offsetA, viewB, offsetB, resBuf);
}
return descriptorSet;
}
void BufferComputeInstance::writeDescriptorSet (vk::VkDescriptorSet descriptorSet, deUint32 setNdx, vk::VkBuffer viewA, deUint32 offsetA, vk::VkBuffer viewB, deUint32 offsetB, vk::VkBuffer resBuf)
{
const vk::VkDescriptorBufferInfo resultInfo = vk::makeDescriptorBufferInfo(resBuf, 0u, (vk::VkDeviceSize)ComputeInstanceResultBuffer::DATA_SIZE);
const vk::VkDescriptorBufferInfo bufferInfos[2] =
{
vk::makeDescriptorBufferInfo(viewA, (vk::VkDeviceSize)offsetA, (vk::VkDeviceSize)sizeof(tcu::Vec4[2])),
vk::makeDescriptorBufferInfo(viewB, (vk::VkDeviceSize)offsetB, (vk::VkDeviceSize)sizeof(tcu::Vec4[2])),
};
deUint32 numDescriptors = 0u;
deUint32 binding = 0u;
// result
if (setNdx == 0)
{
m_updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(binding++), vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultInfo);
numDescriptors++;
}
// buffers
switch (m_shaderInterface)
{
case SHADER_INPUT_SINGLE_DESCRIPTOR:
m_updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(binding++), m_descriptorType, &bufferInfos[0]);
numDescriptors++;
break;
case SHADER_INPUT_MULTIPLE_CONTIGUOUS_DESCRIPTORS:
m_updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(binding++), m_descriptorType, &bufferInfos[0]);
m_updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(binding++), m_descriptorType, &bufferInfos[1]);
numDescriptors += 2;
break;
case SHADER_INPUT_MULTIPLE_DISCONTIGUOUS_DESCRIPTORS:
m_updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(binding), m_descriptorType, &bufferInfos[0]);
m_updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(binding + 2), m_descriptorType, &bufferInfos[1]);
numDescriptors += 2;
break;
case SHADER_INPUT_MULTIPLE_ARBITRARY_DESCRIPTORS:
m_updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(getArbitraryBindingIndex(0)), m_descriptorType, &bufferInfos[0]);
m_updateBuilder.writeSingle(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(getArbitraryBindingIndex(1)), m_descriptorType, &bufferInfos[1]);
numDescriptors += 2;
break;
case SHADER_INPUT_DESCRIPTOR_ARRAY:
m_updateBuilder.writeArray(descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(binding++), m_descriptorType, 2u, bufferInfos);
numDescriptors++;
break;
default:
DE_FATAL("Impossible");
}
m_descriptorsPerSet.push_back(numDescriptors);
if (m_updateMethod == DESCRIPTOR_UPDATE_METHOD_NORMAL)
{
m_updateBuilder.update(m_vki, m_device);
m_updateBuilder.clear();
}
}
void BufferComputeInstance::writeDescriptorSetWithTemplate (vk::VkDescriptorSet descriptorSet, vk::VkDescriptorSetLayout layout, deUint32 setNdx, vk::VkBuffer viewA, deUint32 offsetA, vk::VkBuffer viewB, deUint32 offsetB, vk::VkBuffer resBuf, bool withPush, vk::VkPipelineLayout pipelineLayout)
{
const vk::VkDescriptorBufferInfo resultInfo = vk::makeDescriptorBufferInfo(resBuf, 0u, (vk::VkDeviceSize)ComputeInstanceResultBuffer::DATA_SIZE);
const vk::VkDescriptorBufferInfo bufferInfos[2] =
{
vk::makeDescriptorBufferInfo(viewA, (vk::VkDeviceSize)offsetA, (vk::VkDeviceSize)sizeof(tcu::Vec4[2])),
vk::makeDescriptorBufferInfo(viewB, (vk::VkDeviceSize)offsetB, (vk::VkDeviceSize)sizeof(tcu::Vec4[2])),
};
std::vector<vk::VkDescriptorUpdateTemplateEntry> updateEntries;
vk::VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
{
vk::VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
DE_NULL,
0,
0, // descriptorUpdateEntryCount
DE_NULL, // pDescriptorUpdateEntries
withPush ? vk::VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR : vk::VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,
layout,
vk::VK_PIPELINE_BIND_POINT_COMPUTE,
pipelineLayout,
setNdx
};
deUint32 binding = 0u;
deUint32 offset = 0u;
RawUpdateRegistry updateRegistry;
if (setNdx == 0)
updateRegistry.addWriteObject(resultInfo);
updateRegistry.addWriteObject(bufferInfos[0]);
updateRegistry.addWriteObject(bufferInfos[1]);
// result
if (setNdx == 0)
updateEntries.push_back(createTemplateBinding(binding++, 0, 1, vk::VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, updateRegistry.getWriteObjectOffset(offset++), 0));
// buffers
switch (m_shaderInterface)
{
case SHADER_INPUT_SINGLE_DESCRIPTOR:
updateEntries.push_back(createTemplateBinding(binding++, 0, 1, m_descriptorType, updateRegistry.getWriteObjectOffset(offset++), 0));
break;
case SHADER_INPUT_MULTIPLE_CONTIGUOUS_DESCRIPTORS:
updateEntries.push_back(createTemplateBinding(binding++, 0, 1, m_descriptorType, updateRegistry.getWriteObjectOffset(offset++), 0));
updateEntries.push_back(createTemplateBinding(binding++, 0, 1, m_descriptorType, updateRegistry.getWriteObjectOffset(offset++), 0));
break;
case SHADER_INPUT_MULTIPLE_DISCONTIGUOUS_DESCRIPTORS:
updateEntries.push_back(createTemplateBinding(binding, 0, 1, m_descriptorType, updateRegistry.getWriteObjectOffset(offset++), 0));
updateEntries.push_back(createTemplateBinding(binding + 2, 0, 1, m_descriptorType, updateRegistry.getWriteObjectOffset(offset++), 0));
break;
case SHADER_INPUT_MULTIPLE_ARBITRARY_DESCRIPTORS:
updateEntries.push_back(createTemplateBinding(getArbitraryBindingIndex(0), 0, 1, m_descriptorType, updateRegistry.getWriteObjectOffset(offset++), 0));
updateEntries.push_back(createTemplateBinding(getArbitraryBindingIndex(1), 0, 1, m_descriptorType, updateRegistry.getWriteObjectOffset(offset++), 0));
break;
case SHADER_INPUT_DESCRIPTOR_ARRAY:
updateEntries.push_back(createTemplateBinding(binding++, 0, 2, m_descriptorType, updateRegistry.getWriteObjectOffset(offset++), sizeof(bufferInfos[0])));
break;
default:
DE_FATAL("Impossible");
}
templateCreateInfo.pDescriptorUpdateEntries = &updateEntries[0];
templateCreateInfo.descriptorUpdateEntryCount = (deUint32)updateEntries.size();
vk::Move<vk::VkDescriptorUpdateTemplate> updateTemplate = vk::createDescriptorUpdateTemplate(m_vki, m_device, &templateCreateInfo);
m_updateTemplates.push_back(UpdateTemplateHandleSp(new UpdateTemplateHandleUp(updateTemplate)));
m_updateRegistry.push_back(updateRegistry);
if (!withPush)
{
m_vki.updateDescriptorSetWithTemplate(m_device, descriptorSet, **m_updateTemplates.back(), m_updateRegistry.back().getRawPointer());
}
}
tcu::TestStatus BufferComputeInstance::iterate (void)
{
logTestPlan();
return testResourceAccess();
}
void BufferComputeInstance::logTestPlan (void) const
{
std::ostringstream msg;
msg << "Accessing resource in a compute program.\n"
<< ((m_descriptorSetCount == DESCRIPTOR_SET_COUNT_SINGLE) ? "Single descriptor set. " : "Multiple descriptor sets. ")
<< "Each descriptor set contains "
<< ((m_shaderInterface == SHADER_INPUT_SINGLE_DESCRIPTOR) ? "single" :