blob: 3f0e5521b6efed1466be83853af6ef760139d82b [file] [log] [blame]
/*------------------------------------------------------------------------
* Vulkan Conformance Tests
* ------------------------
*
* Copyright (c) 2019 The Khronos Group Inc.
* Copyright (c) 2019 Google Inc.
* Copyright (c) 2017 Codeplay Software Ltd.
* Copyright (c) 2018 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/ /*!
* \file
* \brief Subgroups Tests
*/ /*--------------------------------------------------------------------*/
#include "vktSubgroupsPartitionedTests.hpp"
#include "vktSubgroupsScanHelpers.hpp"
#include "vktSubgroupsTestsUtils.hpp"
#include <string>
#include <vector>
using namespace tcu;
using namespace std;
using namespace vk;
using namespace vkt;
namespace
{
enum OpType
{
OPTYPE_ADD = 0,
OPTYPE_MUL,
OPTYPE_MIN,
OPTYPE_MAX,
OPTYPE_AND,
OPTYPE_OR,
OPTYPE_XOR,
OPTYPE_INCLUSIVE_ADD,
OPTYPE_INCLUSIVE_MUL,
OPTYPE_INCLUSIVE_MIN,
OPTYPE_INCLUSIVE_MAX,
OPTYPE_INCLUSIVE_AND,
OPTYPE_INCLUSIVE_OR,
OPTYPE_INCLUSIVE_XOR,
OPTYPE_EXCLUSIVE_ADD,
OPTYPE_EXCLUSIVE_MUL,
OPTYPE_EXCLUSIVE_MIN,
OPTYPE_EXCLUSIVE_MAX,
OPTYPE_EXCLUSIVE_AND,
OPTYPE_EXCLUSIVE_OR,
OPTYPE_EXCLUSIVE_XOR,
OPTYPE_LAST
};
static Operator getOperator(OpType t)
{
switch (t)
{
case OPTYPE_ADD:
case OPTYPE_INCLUSIVE_ADD:
case OPTYPE_EXCLUSIVE_ADD:
return OPERATOR_ADD;
case OPTYPE_MUL:
case OPTYPE_INCLUSIVE_MUL:
case OPTYPE_EXCLUSIVE_MUL:
return OPERATOR_MUL;
case OPTYPE_MIN:
case OPTYPE_INCLUSIVE_MIN:
case OPTYPE_EXCLUSIVE_MIN:
return OPERATOR_MIN;
case OPTYPE_MAX:
case OPTYPE_INCLUSIVE_MAX:
case OPTYPE_EXCLUSIVE_MAX:
return OPERATOR_MAX;
case OPTYPE_AND:
case OPTYPE_INCLUSIVE_AND:
case OPTYPE_EXCLUSIVE_AND:
return OPERATOR_AND;
case OPTYPE_OR:
case OPTYPE_INCLUSIVE_OR:
case OPTYPE_EXCLUSIVE_OR:
return OPERATOR_OR;
case OPTYPE_XOR:
case OPTYPE_INCLUSIVE_XOR:
case OPTYPE_EXCLUSIVE_XOR:
return OPERATOR_XOR;
default:
DE_FATAL("Unsupported op type");
return OPERATOR_ADD;
}
}
static ScanType getScanType(OpType t)
{
switch (t)
{
case OPTYPE_ADD:
case OPTYPE_MUL:
case OPTYPE_MIN:
case OPTYPE_MAX:
case OPTYPE_AND:
case OPTYPE_OR:
case OPTYPE_XOR:
return SCAN_REDUCE;
case OPTYPE_INCLUSIVE_ADD:
case OPTYPE_INCLUSIVE_MUL:
case OPTYPE_INCLUSIVE_MIN:
case OPTYPE_INCLUSIVE_MAX:
case OPTYPE_INCLUSIVE_AND:
case OPTYPE_INCLUSIVE_OR:
case OPTYPE_INCLUSIVE_XOR:
return SCAN_INCLUSIVE;
case OPTYPE_EXCLUSIVE_ADD:
case OPTYPE_EXCLUSIVE_MUL:
case OPTYPE_EXCLUSIVE_MIN:
case OPTYPE_EXCLUSIVE_MAX:
case OPTYPE_EXCLUSIVE_AND:
case OPTYPE_EXCLUSIVE_OR:
case OPTYPE_EXCLUSIVE_XOR:
return SCAN_EXCLUSIVE;
default:
DE_FATAL("Unsupported op type");
return SCAN_REDUCE;
}
}
static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
deUint32 width, deUint32)
{
DE_UNREF(internalData);
return vkt::subgroups::check(datas, width, 0xFFFFFF);
}
static bool checkCompute(const void* internalData, std::vector<const void*> datas,
const deUint32 numWorkgroups[3], const deUint32 localSize[3],
deUint32)
{
DE_UNREF(internalData);
return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 0xFFFFFF);
}
std::string getOpTypeName(Operator op, ScanType scanType)
{
return getScanOpName("subgroup", "", op, scanType);
}
std::string getOpTypeNamePartitioned(Operator op, ScanType scanType)
{
return getScanOpName("subgroupPartitioned", "NV", op, scanType);
}
struct CaseDefinition
{
Operator op;
ScanType scanType;
VkShaderStageFlags shaderStage;
VkFormat format;
de::SharedPtr<bool> geometryPointSizeSupported;
deBool requiredSubgroupSize;
};
std::string getExtHeader(CaseDefinition caseDef)
{
return "#extension GL_NV_shader_subgroup_partitioned: enable\n"
"#extension GL_KHR_shader_subgroup_arithmetic: enable\n"
"#extension GL_KHR_shader_subgroup_ballot: enable\n" +
subgroups::getAdditionalExtensionForFormat(caseDef.format);
}
string getTestString(const CaseDefinition &caseDef)
{
Operator op = caseDef.op;
ScanType st = caseDef.scanType;
// NOTE: tempResult can't have anything in bits 31:24 to avoid int->float
// conversion overflow in framebuffer tests.
string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
string bdy =
" uint tempResult = 0;\n"
" uint id = gl_SubgroupInvocationID;\n";
// Test the case where the partition has a single subset with all invocations in it.
// This should generate the same result as the non-partitioned function.
bdy +=
" uvec4 allBallot = mask;\n"
" " + fmt + " allResult = " + getOpTypeNamePartitioned(op, st) + "(data[gl_SubgroupInvocationID], allBallot);\n"
" " + fmt + " refResult = " + getOpTypeName(op, st) + "(data[gl_SubgroupInvocationID]);\n"
" if (" + getCompare(op, caseDef.format, "allResult", "refResult") + ") {\n"
" tempResult |= 0x1;\n"
" }\n";
// The definition of a partition doesn't forbid bits corresponding to inactive
// invocations being in the subset with active invocations. In other words, test that
// bits corresponding to inactive invocations are ignored.
bdy +=
" if (0 == (gl_SubgroupInvocationID % 2)) {\n"
" " + fmt + " allResult = " + getOpTypeNamePartitioned(op, st) + "(data[gl_SubgroupInvocationID], allBallot);\n"
" " + fmt + " refResult = " + getOpTypeName(op, st) + "(data[gl_SubgroupInvocationID]);\n"
" if (" + getCompare(op, caseDef.format, "allResult", "refResult") + ") {\n"
" tempResult |= 0x2;\n"
" }\n"
" } else {\n"
" tempResult |= 0x2;\n"
" }\n";
// Test the case where the partition has each invocation in a unique subset. For
// exclusive ops, the result is identity. For reduce/inclusive, it's the original value.
string expectedSelfResult = "data[gl_SubgroupInvocationID]";
if (st == SCAN_EXCLUSIVE)
expectedSelfResult = getIdentity(op, caseDef.format);
bdy +=
" uvec4 selfBallot = subgroupPartitionNV(gl_SubgroupInvocationID);\n"
" " + fmt + " selfResult = " + getOpTypeNamePartitioned(op, st) + "(data[gl_SubgroupInvocationID], selfBallot);\n"
" if (" + getCompare(op, caseDef.format, "selfResult", expectedSelfResult) + ") {\n"
" tempResult |= 0x4;\n"
" }\n";
// Test "random" partitions based on a hash of the invocation id.
// This "hash" function produces interesting/randomish partitions.
static const char *idhash = "((id%N)+(id%(N+1))-(id%2)+(id/2))%((N+1)/2)";
bdy +=
" for (uint N = 1; N < 16; ++N) {\n"
" " + fmt + " idhashFmt = " + fmt + "(" + idhash + ");\n"
" uvec4 partitionBallot = subgroupPartitionNV(idhashFmt) & mask;\n"
" " + fmt + " partitionedResult = " + getOpTypeNamePartitioned(op, st) + "(data[gl_SubgroupInvocationID], partitionBallot);\n"
" for (uint i = 0; i < N; ++i) {\n"
" " + fmt + " iFmt = " + fmt + "(i);\n"
" if (" + getCompare(op, caseDef.format, "idhashFmt", "iFmt") + ") {\n"
" " + fmt + " subsetResult = " + getOpTypeName(op, st) + "(data[gl_SubgroupInvocationID]);\n"
" tempResult |= " + getCompare(op, caseDef.format, "partitionedResult", "subsetResult") + " ? (0x4 << N) : 0;\n"
" }\n"
" }\n"
" }\n"
// tests in flow control:
" if (1 == (gl_SubgroupInvocationID % 2)) {\n"
" for (uint N = 1; N < 7; ++N) {\n"
" " + fmt + " idhashFmt = " + fmt + "(" + idhash + ");\n"
" uvec4 partitionBallot = subgroupPartitionNV(idhashFmt) & mask;\n"
" " + fmt + " partitionedResult = " + getOpTypeNamePartitioned(op, st) + "(data[gl_SubgroupInvocationID], partitionBallot);\n"
" for (uint i = 0; i < N; ++i) {\n"
" " + fmt + " iFmt = " + fmt + "(i);\n"
" if (" + getCompare(op, caseDef.format, "idhashFmt", "iFmt") + ") {\n"
" " + fmt + " subsetResult = " + getOpTypeName(op, st) + "(data[gl_SubgroupInvocationID]);\n"
" tempResult |= " + getCompare(op, caseDef.format, "partitionedResult", "subsetResult") + " ? (0x20000 << N) : 0;\n"
" }\n"
" }\n"
" }\n"
" } else {\n"
" tempResult |= 0xFC0000;\n"
" }\n"
;
return bdy;
}
void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
{
const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
std::ostringstream bdy;
std::string extHeader = getExtHeader(caseDef);
subgroups::setFragmentShaderFrameBuffer(programCollection);
if (VK_SHADER_STAGE_VERTEX_BIT != caseDef.shaderStage)
subgroups::setVertexShaderFrameBuffer(programCollection);
bdy << getTestString(caseDef);
if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
{
std::ostringstream vertexSrc;
vertexSrc << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< extHeader.c_str()
<< "layout(location = 0) in highp vec4 in_position;\n"
<< "layout(location = 0) out float out_color;\n"
<< "layout(set = 0, binding = 0) uniform Buffer1\n"
<< "{\n"
<< " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
<< "};\n"
<< "\n"
<< "void main (void)\n"
<< "{\n"
<< " uvec4 mask = subgroupBallot(true);\n"
<< bdy.str()
<< " out_color = float(tempResult);\n"
<< " gl_Position = in_position;\n"
<< " gl_PointSize = 1.0f;\n"
<< "}\n";
programCollection.glslSources.add("vert")
<< glu::VertexSource(vertexSrc.str()) << buildOptions;
}
else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
{
std::ostringstream geometry;
geometry << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< extHeader.c_str()
<< "layout(points) in;\n"
<< "layout(points, max_vertices = 1) out;\n"
<< "layout(location = 0) out float out_color;\n"
<< "layout(set = 0, binding = 0) uniform Buffer\n"
<< "{\n"
<< " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
<< "};\n"
<< "\n"
<< "void main (void)\n"
<< "{\n"
<< " uvec4 mask = subgroupBallot(true);\n"
<< bdy.str()
<< " out_color = float(tempResult);\n"
<< " gl_Position = gl_in[0].gl_Position;\n"
<< (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
<< " EmitVertex();\n"
<< " EndPrimitive();\n"
<< "}\n";
programCollection.glslSources.add("geometry")
<< glu::GeometrySource(geometry.str()) << buildOptions;
}
else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
{
std::ostringstream controlSource;
controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< extHeader.c_str()
<< "layout(vertices = 2) out;\n"
<< "layout(location = 0) out float out_color[];\n"
<< "layout(set = 0, binding = 0) uniform Buffer1\n"
<< "{\n"
<< " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
<< "};\n"
<< "\n"
<< "void main (void)\n"
<< "{\n"
<< " if (gl_InvocationID == 0)\n"
<<" {\n"
<< " gl_TessLevelOuter[0] = 1.0f;\n"
<< " gl_TessLevelOuter[1] = 1.0f;\n"
<< " }\n"
<< " uvec4 mask = subgroupBallot(true);\n"
<< bdy.str()
<< " out_color[gl_InvocationID] = float(tempResult);"
<< " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
<< (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "")
<< "}\n";
programCollection.glslSources.add("tesc")
<< glu::TessellationControlSource(controlSource.str()) << buildOptions;
subgroups::setTesEvalShaderFrameBuffer(programCollection);
}
else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
{
std::ostringstream evaluationSource;
evaluationSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< extHeader.c_str()
<< "layout(isolines, equal_spacing, ccw ) in;\n"
<< "layout(location = 0) out float out_color;\n"
<< "layout(set = 0, binding = 0) uniform Buffer1\n"
<< "{\n"
<< " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
<< "};\n"
<< "\n"
<< "void main (void)\n"
<< "{\n"
<< " uvec4 mask = subgroupBallot(true);\n"
<< bdy.str()
<< " out_color = float(tempResult);\n"
<< " gl_Position = mix(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_TessCoord.x);\n"
<< (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
<< "}\n";
subgroups::setTesCtrlShaderFrameBuffer(programCollection);
programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
}
else
{
DE_FATAL("Unsupported shader stage");
}
}
void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
{
const string extHeader = getExtHeader(caseDef);
const string bdy = getTestString(caseDef);
if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
{
std::ostringstream src;
src << "#version 450\n"
<< extHeader.c_str()
<< "layout (local_size_x_id = 0, local_size_y_id = 1, "
"local_size_z_id = 2) in;\n"
<< "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
<< "{\n"
<< " uint result[];\n"
<< "};\n"
<< "layout(set = 0, binding = 1, std430) buffer Buffer2\n"
<< "{\n"
<< " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[];\n"
<< "};\n"
<< "\n"
<< "void main (void)\n"
<< "{\n"
<< " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
<< " highp uint offset = globalSize.x * ((globalSize.y * "
"gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
"gl_GlobalInvocationID.x;\n"
<< " uvec4 mask = subgroupBallot(true);\n"
<< bdy
<< " result[offset] = tempResult;\n"
<< "}\n";
programCollection.glslSources.add("comp")
<< glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
}
else
{
{
const std::string vertex =
"#version 450\n"
+ extHeader +
"layout(set = 0, binding = 0, std430) buffer Buffer1\n"
"{\n"
" uint result[];\n"
"};\n"
"layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
"{\n"
" " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
"};\n"
"\n"
"void main (void)\n"
"{\n"
" uvec4 mask = subgroupBallot(true);\n"
+ bdy+
" result[gl_VertexIndex] = tempResult;\n"
" float pixelSize = 2.0f/1024.0f;\n"
" float pixelPosition = pixelSize/2.0f - 1.0f;\n"
" gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
" gl_PointSize = 1.0f;\n"
"}\n";
programCollection.glslSources.add("vert")
<< glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
}
{
const std::string tesc =
"#version 450\n"
+ extHeader +
"layout(vertices=1) out;\n"
"layout(set = 0, binding = 1, std430) buffer Buffer1\n"
"{\n"
" uint result[];\n"
"};\n"
"layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
"{\n"
" " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
"};\n"
"\n"
"void main (void)\n"
"{\n"
" uvec4 mask = subgroupBallot(true);\n"
+ bdy +
" result[gl_PrimitiveID] = tempResult;\n"
" if (gl_InvocationID == 0)\n"
" {\n"
" gl_TessLevelOuter[0] = 1.0f;\n"
" gl_TessLevelOuter[1] = 1.0f;\n"
" }\n"
" gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
"}\n";
programCollection.glslSources.add("tesc")
<< glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
}
{
const std::string tese =
"#version 450\n"
+ extHeader +
"layout(isolines) in;\n"
"layout(set = 0, binding = 2, std430) buffer Buffer1\n"
"{\n"
" uint result[];\n"
"};\n"
"layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
"{\n"
" " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
"};\n"
"\n"
"void main (void)\n"
"{\n"
" uvec4 mask = subgroupBallot(true);\n"
+ bdy +
" result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempResult;\n"
" float pixelSize = 2.0f/1024.0f;\n"
" gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
"}\n";
programCollection.glslSources.add("tese")
<< glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
}
{
const std::string geometry =
"#version 450\n"
+ extHeader +
"layout(${TOPOLOGY}) in;\n"
"layout(points, max_vertices = 1) out;\n"
"layout(set = 0, binding = 3, std430) buffer Buffer1\n"
"{\n"
" uint result[];\n"
"};\n"
"layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
"{\n"
" " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
"};\n"
"\n"
"void main (void)\n"
"{\n"
" uvec4 mask = subgroupBallot(true);\n"
+ bdy +
" result[gl_PrimitiveIDIn] = tempResult;\n"
" gl_Position = gl_in[0].gl_Position;\n"
" EmitVertex();\n"
" EndPrimitive();\n"
"}\n";
subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
programCollection.glslSources);
}
{
const std::string fragment =
"#version 450\n"
+ extHeader +
"layout(location = 0) out uint result;\n"
"layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
"{\n"
" " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
"};\n"
"void main (void)\n"
"{\n"
" uvec4 mask = subgroupBallot(true);\n"
+ bdy +
" result = tempResult;\n"
"}\n";
programCollection.glslSources.add("fragment")
<< glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
}
subgroups::addNoSubgroupShader(programCollection);
}
}
void supportedCheck (Context& context, CaseDefinition caseDef)
{
if (!subgroups::isSubgroupSupported(context))
TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV))
TCU_THROW(NotSupportedError, "Device does not support subgroup partitioned operations");
if (!subgroups::isFormatSupportedForDevice(context, caseDef.format))
TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
if (caseDef.requiredSubgroupSize)
{
if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
subgroupSizeControlFeatures.pNext = DE_NULL;
VkPhysicalDeviceFeatures2 features;
features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
features.pNext = &subgroupSizeControlFeatures;
context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
subgroupSizeControlProperties.pNext = DE_NULL;
VkPhysicalDeviceProperties2 properties;
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
properties.pNext = &subgroupSizeControlProperties;
context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
}
tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
{
if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
{
return tcu::TestStatus::fail(
"Shader stage " +
subgroups::getShaderStageName(caseDef.shaderStage) +
" is required to support subgroup operations!");
}
else
{
TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
}
}
subgroups::SSBOData inputData;
inputData.format = caseDef.format;
inputData.layout = subgroups::SSBOData::LayoutStd140;
inputData.numElements = subgroups::maxSupportedSubgroupSize();
inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
else
TCU_THROW(InternalError, "Unhandled shader stage");
}
tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
{
if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
{
if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
{
return tcu::TestStatus::fail(
"Shader stage " +
subgroups::getShaderStageName(caseDef.shaderStage) +
" is required to support subgroup operations!");
}
subgroups::SSBOData inputData;
inputData.format = caseDef.format;
inputData.layout = subgroups::SSBOData::LayoutStd430;
inputData.numElements = subgroups::maxSupportedSubgroupSize();
inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute);
tcu::TestLog& log = context.getTestContext().getLog();
VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
subgroupSizeControlProperties.pNext = DE_NULL;
VkPhysicalDeviceProperties2 properties;
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
properties.pNext = &subgroupSizeControlProperties;
context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
<< subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
return result;
}
}
return tcu::TestStatus::pass("OK");
}
else
{
VkPhysicalDeviceSubgroupProperties subgroupProperties;
subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
subgroupProperties.pNext = DE_NULL;
VkPhysicalDeviceProperties2 properties;
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
properties.pNext = &subgroupProperties;
context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
{
if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
else
stages = VK_SHADER_STAGE_FRAGMENT_BIT;
}
if ((VkShaderStageFlagBits)0u == stages)
TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
subgroups::SSBOData inputData;
inputData.format = caseDef.format;
inputData.layout = subgroups::SSBOData::LayoutStd430;
inputData.numElements = subgroups::maxSupportedSubgroupSize();
inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
inputData.binding = 4u;
inputData.stages = stages;
return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
}
}
}
namespace vkt
{
namespace subgroups
{
tcu::TestCaseGroup* createSubgroupsPartitionedTests(tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
testCtx, "graphics", "Subgroup partitioned category tests: graphics"));
de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
testCtx, "compute", "Subgroup partitioned category tests: compute"));
de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
testCtx, "framebuffer", "Subgroup partitioned category tests: framebuffer"));
const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
const std::vector<VkFormat> formats = subgroups::getAllFormats();
for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
const VkFormat format = formats[formatIndex];
for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
bool isBool = subgroups::isFormatBool(format);
bool isFloat = subgroups::isFormatFloat(format);
OpType opType = static_cast<OpType>(opTypeIndex);
Operator op = getOperator(opType);
ScanType st = getScanType(opType);
bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
// Skip float with bitwise category.
if (isFloat && isBitwiseOp)
continue;
// Skip bool when its not the bitwise category.
if (isBool && !isBitwiseOp)
continue;
const std::string name = de::toLower(getOpTypeName(op, st)) + "_" + subgroups::getFormatNameForGLSL(format);
{
CaseDefinition caseDef = {op, st, VK_SHADER_STAGE_COMPUTE_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE};
addFunctionCaseWithPrograms(computeGroup.get(), name,
"", supportedCheck, initPrograms, test, caseDef);
caseDef.requiredSubgroupSize = DE_TRUE;
addFunctionCaseWithPrograms(computeGroup.get(), name + "_requiredsubgroupsize",
"", supportedCheck, initPrograms, test, caseDef);
}
{
const CaseDefinition caseDef = {op, st, VK_SHADER_STAGE_ALL_GRAPHICS, format, de::SharedPtr<bool>(new bool), DE_FALSE};
addFunctionCaseWithPrograms(graphicGroup.get(), name,
"", supportedCheck, initPrograms, test, caseDef);
}
for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
const CaseDefinition caseDef = {op, st, stages[stageIndex], format, de::SharedPtr<bool>(new bool), DE_FALSE};
addFunctionCaseWithPrograms(framebufferGroup.get(), name +
"_" + getShaderStageName(caseDef.shaderStage), "",
supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
}
}
}
de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
testCtx, "partitioned", "Subgroup partitioned category tests"));
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
return group.release();
}
} // subgroups
} // vkt