blob: dc1226b31f3b115f6ba1bca479d3d09d56b1c3f2 [file] [log] [blame]
/*-------------------------------------------------------------------------
* Vulkan Conformance Tests
* ------------------------
*
* Copyright (c) 2015 Google Inc.
* Copyright (c) 2016 The Khronos Group Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief SPIR-V Assembly Tests for Instructions (special opcode/operand)
*//*--------------------------------------------------------------------*/
#include "vktSpvAsmInstructionTests.hpp"
#include "vktAmberTestCase.hpp"
#include "tcuCommandLine.hpp"
#include "tcuFormatUtil.hpp"
#include "tcuFloat.hpp"
#include "tcuFloatFormat.hpp"
#include "tcuRGBA.hpp"
#include "tcuStringTemplate.hpp"
#include "tcuTestLog.hpp"
#include "tcuVectorUtil.hpp"
#include "tcuInterval.hpp"
#include "vkDefs.hpp"
#include "vkDeviceUtil.hpp"
#include "vkMemUtil.hpp"
#include "vkPlatform.hpp"
#include "vkPrograms.hpp"
#include "vkQueryUtil.hpp"
#include "vkRef.hpp"
#include "vkRefUtil.hpp"
#include "vkStrUtil.hpp"
#include "vkTypeUtil.hpp"
#include "deStringUtil.hpp"
#include "deUniquePtr.hpp"
#include "deMath.h"
#include "deRandom.hpp"
#include "tcuStringTemplate.hpp"
#include "vktSpvAsmCrossStageInterfaceTests.hpp"
#include "vktSpvAsm8bitStorageTests.hpp"
#include "vktSpvAsm16bitStorageTests.hpp"
#include "vktSpvAsmUboMatrixPaddingTests.hpp"
#include "vktSpvAsmConditionalBranchTests.hpp"
#include "vktSpvAsmIndexingTests.hpp"
#include "vktSpvAsmImageSamplerTests.hpp"
#include "vktSpvAsmComputeShaderCase.hpp"
#include "vktSpvAsmComputeShaderTestUtil.hpp"
#include "vktSpvAsmFloatControlsTests.hpp"
#include "vktSpvAsmFromHlslTests.hpp"
#include "vktSpvAsmEmptyStructTests.hpp"
#include "vktSpvAsmGraphicsShaderTestUtil.hpp"
#include "vktSpvAsmVariablePointersTests.hpp"
#include "vktSpvAsmVariableInitTests.hpp"
#include "vktSpvAsmPointerParameterTests.hpp"
#include "vktSpvAsmSpirvVersion1p4Tests.hpp"
#include "vktSpvAsmSpirvVersionTests.hpp"
#include "vktTestCaseUtil.hpp"
#include "vktSpvAsmLoopDepLenTests.hpp"
#include "vktSpvAsmLoopDepInfTests.hpp"
#include "vktSpvAsmCompositeInsertTests.hpp"
#include "vktSpvAsmVaryingNameTests.hpp"
#include "vktSpvAsmWorkgroupMemoryTests.hpp"
#include "vktSpvAsmSignedIntCompareTests.hpp"
#include "vktSpvAsmSignedOpTests.hpp"
#include "vktSpvAsmPtrAccessChainTests.hpp"
#include "vktSpvAsmVectorShuffleTests.hpp"
#include "vktSpvAsmFloatControlsExtensionlessTests.hpp"
#include "vktSpvAsmNonSemanticInfoTests.hpp"
#include "vktSpvAsm64bitCompareTests.hpp"
#include "vktSpvAsmTrinaryMinMaxTests.hpp"
#include "vktSpvAsmTerminateInvocationTests.hpp"
#include "vktSpvAsmIntegerDotProductTests.hpp"
#include <cmath>
#include <limits>
#include <map>
#include <string>
#include <sstream>
#include <utility>
#include <stack>
namespace vkt
{
namespace SpirVAssembly
{
namespace
{
using namespace vk;
using std::map;
using std::string;
using std::vector;
using tcu::IVec3;
using tcu::IVec4;
using tcu::RGBA;
using tcu::TestLog;
using tcu::TestStatus;
using tcu::Vec4;
using de::UniquePtr;
using tcu::StringTemplate;
using tcu::Vec4;
const bool TEST_WITH_NAN = true;
const bool TEST_WITHOUT_NAN = false;
const string loadScalarF16FromUint =
"%ld_arg_${var} = OpFunction %f16 None %f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_conv = OpBitcast %u32 %ld_arg_${var}_param\n"
"%ld_arg_${var}_div = OpUDiv %u32 %ld_arg_${var}_conv %c_u32_2\n"
"%ld_arg_${var}_and_low = OpBitwiseAnd %u32 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_gep = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_div\n"
"%ld_arg_${var}_ld = OpLoad %u32 %ld_arg_${var}_gep\n"
"%ld_arg_${var}_unpack = OpBitcast %v2f16 %ld_arg_${var}_ld\n"
"%ld_arg_${var}_ex = OpVectorExtractDynamic %f16 %ld_arg_${var}_unpack %ld_arg_${var}_and_low\n"
"OpReturnValue %ld_arg_${var}_ex\n"
"OpFunctionEnd\n";
const string loadV2F16FromUint =
"%ld_arg_${var} = OpFunction %v2f16 None %v2f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param\n"
"%ld_arg_${var}_ld = OpLoad %u32 %ld_arg_${var}_gep\n"
"%ld_arg_${var}_cast = OpBitcast %v2f16 %ld_arg_${var}_ld\n"
"OpReturnValue %ld_arg_${var}_cast\n"
"OpFunctionEnd\n";
const string loadV3F16FromUints =
// Since we allocate a vec4 worth of values, this case is almost the
// same as that case.
"%ld_arg_${var} = OpFunction %v3f16 None %v3f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_ld0 = OpLoad %u32 %ld_arg_${var}_gep0\n"
"%ld_arg_${var}_bc0 = OpBitcast %v2f16 %ld_arg_${var}_ld0\n"
"%ld_arg_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_ld1 = OpLoad %u32 %ld_arg_${var}_gep1\n"
"%ld_arg_${var}_bc1 = OpBitcast %v2f16 %ld_arg_${var}_ld1\n"
"%ld_arg_${var}_shuffle = OpVectorShuffle %v3f16 %ld_arg_${var}_bc0 %ld_arg_${var}_bc1 0 1 2\n"
"OpReturnValue %ld_arg_${var}_shuffle\n"
"OpFunctionEnd\n";
const string loadV4F16FromUints =
"%ld_arg_${var} = OpFunction %v4f16 None %v4f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_ld0 = OpLoad %u32 %ld_arg_${var}_gep0\n"
"%ld_arg_${var}_bc0 = OpBitcast %v2f16 %ld_arg_${var}_ld0\n"
"%ld_arg_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_ld1 = OpLoad %u32 %ld_arg_${var}_gep1\n"
"%ld_arg_${var}_bc1 = OpBitcast %v2f16 %ld_arg_${var}_ld1\n"
"%ld_arg_${var}_shuffle = OpVectorShuffle %v4f16 %ld_arg_${var}_bc0 %ld_arg_${var}_bc1 0 1 2 3\n"
"OpReturnValue %ld_arg_${var}_shuffle\n"
"OpFunctionEnd\n";
const string loadM2x2F16FromUints =
"%ld_arg_${var} = OpFunction %m2x2f16 None %m2x2f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_ld0 = OpLoad %u32 %ld_arg_${var}_gep0\n"
"%ld_arg_${var}_bc0 = OpBitcast %v2f16 %ld_arg_${var}_ld0\n"
"%ld_arg_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_ld1 = OpLoad %u32 %ld_arg_${var}_gep1\n"
"%ld_arg_${var}_bc1 = OpBitcast %v2f16 %ld_arg_${var}_ld1\n"
"%ld_arg_${var}_cons = OpCompositeConstruct %m2x2f16 %ld_arg_${var}_bc0 %ld_arg_${var}_bc1\n"
"OpReturnValue %ld_arg_${var}_cons\n"
"OpFunctionEnd\n";
const string loadM2x3F16FromUints =
"%ld_arg_${var} = OpFunction %m2x3f16 None %m2x3f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_2\n"
"%ld_arg_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_3\n"
"%ld_arg_${var}_ld00 = OpLoad %u32 %ld_arg_${var}_gep00\n"
"%ld_arg_${var}_ld01 = OpLoad %u32 %ld_arg_${var}_gep01\n"
"%ld_arg_${var}_ld10 = OpLoad %u32 %ld_arg_${var}_gep10\n"
"%ld_arg_${var}_ld11 = OpLoad %u32 %ld_arg_${var}_gep11\n"
"%ld_arg_${var}_bc00 = OpBitcast %v2f16 %ld_arg_${var}_ld00\n"
"%ld_arg_${var}_bc01 = OpBitcast %v2f16 %ld_arg_${var}_ld01\n"
"%ld_arg_${var}_bc10 = OpBitcast %v2f16 %ld_arg_${var}_ld10\n"
"%ld_arg_${var}_bc11 = OpBitcast %v2f16 %ld_arg_${var}_ld11\n"
"%ld_arg_${var}_vec0 = OpVectorShuffle %v3f16 %ld_arg_${var}_bc00 %ld_arg_${var}_bc01 0 1 2\n"
"%ld_arg_${var}_vec1 = OpVectorShuffle %v3f16 %ld_arg_${var}_bc10 %ld_arg_${var}_bc11 0 1 2\n"
"%ld_arg_${var}_mat = OpCompositeConstruct %m2x3f16 %ld_arg_${var}_vec0 %ld_arg_${var}_vec1\n"
"OpReturnValue %ld_arg_${var}_mat\n"
"OpFunctionEnd\n";
const string loadM2x4F16FromUints =
"%ld_arg_${var} = OpFunction %m2x4f16 None %m2x4f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_2\n"
"%ld_arg_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_3\n"
"%ld_arg_${var}_ld00 = OpLoad %u32 %ld_arg_${var}_gep00\n"
"%ld_arg_${var}_ld01 = OpLoad %u32 %ld_arg_${var}_gep01\n"
"%ld_arg_${var}_ld10 = OpLoad %u32 %ld_arg_${var}_gep10\n"
"%ld_arg_${var}_ld11 = OpLoad %u32 %ld_arg_${var}_gep11\n"
"%ld_arg_${var}_bc00 = OpBitcast %v2f16 %ld_arg_${var}_ld00\n"
"%ld_arg_${var}_bc01 = OpBitcast %v2f16 %ld_arg_${var}_ld01\n"
"%ld_arg_${var}_bc10 = OpBitcast %v2f16 %ld_arg_${var}_ld10\n"
"%ld_arg_${var}_bc11 = OpBitcast %v2f16 %ld_arg_${var}_ld11\n"
"%ld_arg_${var}_vec0 = OpVectorShuffle %v4f16 %ld_arg_${var}_bc00 %ld_arg_${var}_bc01 0 1 2 3\n"
"%ld_arg_${var}_vec1 = OpVectorShuffle %v4f16 %ld_arg_${var}_bc10 %ld_arg_${var}_bc11 0 1 2 3\n"
"%ld_arg_${var}_mat = OpCompositeConstruct %m2x4f16 %ld_arg_${var}_vec0 %ld_arg_${var}_vec1\n"
"OpReturnValue %ld_arg_${var}_mat\n"
"OpFunctionEnd\n";
const string loadM3x2F16FromUints =
"%ld_arg_${var} = OpFunction %m3x2f16 None %m3x2f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_gep2 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_2\n"
"%ld_arg_${var}_ld0 = OpLoad %u32 %ld_arg_${var}_gep0\n"
"%ld_arg_${var}_ld1 = OpLoad %u32 %ld_arg_${var}_gep1\n"
"%ld_arg_${var}_ld2 = OpLoad %u32 %ld_arg_${var}_gep2\n"
"%ld_arg_${var}_bc0 = OpBitcast %v2f16 %ld_arg_${var}_ld0\n"
"%ld_arg_${var}_bc1 = OpBitcast %v2f16 %ld_arg_${var}_ld1\n"
"%ld_arg_${var}_bc2 = OpBitcast %v2f16 %ld_arg_${var}_ld2\n"
"%ld_arg_${var}_mat = OpCompositeConstruct %m3x2f16 %ld_arg_${var}_bc0 %ld_arg_${var}_bc1 %ld_arg_${var}_bc2\n"
"OpReturnValue %ld_arg_${var}_mat\n"
"OpFunctionEnd\n";
const string loadM3x3F16FromUints =
"%ld_arg_${var} = OpFunction %m3x3f16 None %m3x3f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_2\n"
"%ld_arg_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_3\n"
"%ld_arg_${var}_gep20 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_4\n"
"%ld_arg_${var}_gep21 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_5\n"
"%ld_arg_${var}_ld00 = OpLoad %u32 %ld_arg_${var}_gep00\n"
"%ld_arg_${var}_ld01 = OpLoad %u32 %ld_arg_${var}_gep01\n"
"%ld_arg_${var}_ld10 = OpLoad %u32 %ld_arg_${var}_gep10\n"
"%ld_arg_${var}_ld11 = OpLoad %u32 %ld_arg_${var}_gep11\n"
"%ld_arg_${var}_ld20 = OpLoad %u32 %ld_arg_${var}_gep20\n"
"%ld_arg_${var}_ld21 = OpLoad %u32 %ld_arg_${var}_gep21\n"
"%ld_arg_${var}_bc00 = OpBitcast %v2f16 %ld_arg_${var}_ld00\n"
"%ld_arg_${var}_bc01 = OpBitcast %v2f16 %ld_arg_${var}_ld01\n"
"%ld_arg_${var}_bc10 = OpBitcast %v2f16 %ld_arg_${var}_ld10\n"
"%ld_arg_${var}_bc11 = OpBitcast %v2f16 %ld_arg_${var}_ld11\n"
"%ld_arg_${var}_bc20 = OpBitcast %v2f16 %ld_arg_${var}_ld20\n"
"%ld_arg_${var}_bc21 = OpBitcast %v2f16 %ld_arg_${var}_ld21\n"
"%ld_arg_${var}_vec0 = OpVectorShuffle %v3f16 %ld_arg_${var}_bc00 %ld_arg_${var}_bc01 0 1 2\n"
"%ld_arg_${var}_vec1 = OpVectorShuffle %v3f16 %ld_arg_${var}_bc10 %ld_arg_${var}_bc11 0 1 2\n"
"%ld_arg_${var}_vec2 = OpVectorShuffle %v3f16 %ld_arg_${var}_bc20 %ld_arg_${var}_bc21 0 1 2\n"
"%ld_arg_${var}_mat = OpCompositeConstruct %m3x3f16 %ld_arg_${var}_vec0 %ld_arg_${var}_vec1 %ld_arg_${var}_vec2\n"
"OpReturnValue %ld_arg_${var}_mat\n"
"OpFunctionEnd\n";
const string loadM3x4F16FromUints =
"%ld_arg_${var} = OpFunction %m3x4f16 None %m3x4f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_2\n"
"%ld_arg_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_3\n"
"%ld_arg_${var}_gep20 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_4\n"
"%ld_arg_${var}_gep21 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_5\n"
"%ld_arg_${var}_ld00 = OpLoad %u32 %ld_arg_${var}_gep00\n"
"%ld_arg_${var}_ld01 = OpLoad %u32 %ld_arg_${var}_gep01\n"
"%ld_arg_${var}_ld10 = OpLoad %u32 %ld_arg_${var}_gep10\n"
"%ld_arg_${var}_ld11 = OpLoad %u32 %ld_arg_${var}_gep11\n"
"%ld_arg_${var}_ld20 = OpLoad %u32 %ld_arg_${var}_gep20\n"
"%ld_arg_${var}_ld21 = OpLoad %u32 %ld_arg_${var}_gep21\n"
"%ld_arg_${var}_bc00 = OpBitcast %v2f16 %ld_arg_${var}_ld00\n"
"%ld_arg_${var}_bc01 = OpBitcast %v2f16 %ld_arg_${var}_ld01\n"
"%ld_arg_${var}_bc10 = OpBitcast %v2f16 %ld_arg_${var}_ld10\n"
"%ld_arg_${var}_bc11 = OpBitcast %v2f16 %ld_arg_${var}_ld11\n"
"%ld_arg_${var}_bc20 = OpBitcast %v2f16 %ld_arg_${var}_ld20\n"
"%ld_arg_${var}_bc21 = OpBitcast %v2f16 %ld_arg_${var}_ld21\n"
"%ld_arg_${var}_vec0 = OpVectorShuffle %v4f16 %ld_arg_${var}_bc00 %ld_arg_${var}_bc01 0 1 2 3\n"
"%ld_arg_${var}_vec1 = OpVectorShuffle %v4f16 %ld_arg_${var}_bc10 %ld_arg_${var}_bc11 0 1 2 3\n"
"%ld_arg_${var}_vec2 = OpVectorShuffle %v4f16 %ld_arg_${var}_bc20 %ld_arg_${var}_bc21 0 1 2 3\n"
"%ld_arg_${var}_mat = OpCompositeConstruct %m3x4f16 %ld_arg_${var}_vec0 %ld_arg_${var}_vec1 %ld_arg_${var}_vec2\n"
"OpReturnValue %ld_arg_${var}_mat\n"
"OpFunctionEnd\n";
const string loadM4x2F16FromUints =
"%ld_arg_${var} = OpFunction %m4x2f16 None %m4x2f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_gep2 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_2\n"
"%ld_arg_${var}_gep3 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_3\n"
"%ld_arg_${var}_ld0 = OpLoad %u32 %ld_arg_${var}_gep0\n"
"%ld_arg_${var}_ld1 = OpLoad %u32 %ld_arg_${var}_gep1\n"
"%ld_arg_${var}_ld2 = OpLoad %u32 %ld_arg_${var}_gep2\n"
"%ld_arg_${var}_ld3 = OpLoad %u32 %ld_arg_${var}_gep3\n"
"%ld_arg_${var}_bc0 = OpBitcast %v2f16 %ld_arg_${var}_ld0\n"
"%ld_arg_${var}_bc1 = OpBitcast %v2f16 %ld_arg_${var}_ld1\n"
"%ld_arg_${var}_bc2 = OpBitcast %v2f16 %ld_arg_${var}_ld2\n"
"%ld_arg_${var}_bc3 = OpBitcast %v2f16 %ld_arg_${var}_ld3\n"
"%ld_arg_${var}_mat = OpCompositeConstruct %m4x2f16 %ld_arg_${var}_bc0 %ld_arg_${var}_bc1 %ld_arg_${var}_bc2 %ld_arg_${var}_bc3\n"
"OpReturnValue %ld_arg_${var}_mat\n"
"OpFunctionEnd\n";
const string loadM4x3F16FromUints =
"%ld_arg_${var} = OpFunction %m4x3f16 None %m4x3f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_2\n"
"%ld_arg_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_3\n"
"%ld_arg_${var}_gep20 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_4\n"
"%ld_arg_${var}_gep21 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_5\n"
"%ld_arg_${var}_gep30 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_6\n"
"%ld_arg_${var}_gep31 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_7\n"
"%ld_arg_${var}_ld00 = OpLoad %u32 %ld_arg_${var}_gep00\n"
"%ld_arg_${var}_ld01 = OpLoad %u32 %ld_arg_${var}_gep01\n"
"%ld_arg_${var}_ld10 = OpLoad %u32 %ld_arg_${var}_gep10\n"
"%ld_arg_${var}_ld11 = OpLoad %u32 %ld_arg_${var}_gep11\n"
"%ld_arg_${var}_ld20 = OpLoad %u32 %ld_arg_${var}_gep20\n"
"%ld_arg_${var}_ld21 = OpLoad %u32 %ld_arg_${var}_gep21\n"
"%ld_arg_${var}_ld30 = OpLoad %u32 %ld_arg_${var}_gep30\n"
"%ld_arg_${var}_ld31 = OpLoad %u32 %ld_arg_${var}_gep31\n"
"%ld_arg_${var}_bc00 = OpBitcast %v2f16 %ld_arg_${var}_ld00\n"
"%ld_arg_${var}_bc01 = OpBitcast %v2f16 %ld_arg_${var}_ld01\n"
"%ld_arg_${var}_bc10 = OpBitcast %v2f16 %ld_arg_${var}_ld10\n"
"%ld_arg_${var}_bc11 = OpBitcast %v2f16 %ld_arg_${var}_ld11\n"
"%ld_arg_${var}_bc20 = OpBitcast %v2f16 %ld_arg_${var}_ld20\n"
"%ld_arg_${var}_bc21 = OpBitcast %v2f16 %ld_arg_${var}_ld21\n"
"%ld_arg_${var}_bc30 = OpBitcast %v2f16 %ld_arg_${var}_ld30\n"
"%ld_arg_${var}_bc31 = OpBitcast %v2f16 %ld_arg_${var}_ld31\n"
"%ld_arg_${var}_vec0 = OpVectorShuffle %v3f16 %ld_arg_${var}_bc00 %ld_arg_${var}_bc01 0 1 2\n"
"%ld_arg_${var}_vec1 = OpVectorShuffle %v3f16 %ld_arg_${var}_bc10 %ld_arg_${var}_bc11 0 1 2\n"
"%ld_arg_${var}_vec2 = OpVectorShuffle %v3f16 %ld_arg_${var}_bc20 %ld_arg_${var}_bc21 0 1 2\n"
"%ld_arg_${var}_vec3 = OpVectorShuffle %v3f16 %ld_arg_${var}_bc30 %ld_arg_${var}_bc31 0 1 2\n"
"%ld_arg_${var}_mat = OpCompositeConstruct %m4x3f16 %ld_arg_${var}_vec0 %ld_arg_${var}_vec1 %ld_arg_${var}_vec2 %ld_arg_${var}_vec3\n"
"OpReturnValue %ld_arg_${var}_mat\n"
"OpFunctionEnd\n";
const string loadM4x4F16FromUints =
"%ld_arg_${var} = OpFunction %m4x4f16 None %m4x4f16_i32_fn\n"
"%ld_arg_${var}_param = OpFunctionParameter %i32\n"
"%ld_arg_${var}_entry = OpLabel\n"
"%ld_arg_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_0\n"
"%ld_arg_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_1\n"
"%ld_arg_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_2\n"
"%ld_arg_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_3\n"
"%ld_arg_${var}_gep20 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_4\n"
"%ld_arg_${var}_gep21 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_5\n"
"%ld_arg_${var}_gep30 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_6\n"
"%ld_arg_${var}_gep31 = OpAccessChain %up_u32 %${var} %c_u32_0 %ld_arg_${var}_param %c_u32_7\n"
"%ld_arg_${var}_ld00 = OpLoad %u32 %ld_arg_${var}_gep00\n"
"%ld_arg_${var}_ld01 = OpLoad %u32 %ld_arg_${var}_gep01\n"
"%ld_arg_${var}_ld10 = OpLoad %u32 %ld_arg_${var}_gep10\n"
"%ld_arg_${var}_ld11 = OpLoad %u32 %ld_arg_${var}_gep11\n"
"%ld_arg_${var}_ld20 = OpLoad %u32 %ld_arg_${var}_gep20\n"
"%ld_arg_${var}_ld21 = OpLoad %u32 %ld_arg_${var}_gep21\n"
"%ld_arg_${var}_ld30 = OpLoad %u32 %ld_arg_${var}_gep30\n"
"%ld_arg_${var}_ld31 = OpLoad %u32 %ld_arg_${var}_gep31\n"
"%ld_arg_${var}_bc00 = OpBitcast %v2f16 %ld_arg_${var}_ld00\n"
"%ld_arg_${var}_bc01 = OpBitcast %v2f16 %ld_arg_${var}_ld01\n"
"%ld_arg_${var}_bc10 = OpBitcast %v2f16 %ld_arg_${var}_ld10\n"
"%ld_arg_${var}_bc11 = OpBitcast %v2f16 %ld_arg_${var}_ld11\n"
"%ld_arg_${var}_bc20 = OpBitcast %v2f16 %ld_arg_${var}_ld20\n"
"%ld_arg_${var}_bc21 = OpBitcast %v2f16 %ld_arg_${var}_ld21\n"
"%ld_arg_${var}_bc30 = OpBitcast %v2f16 %ld_arg_${var}_ld30\n"
"%ld_arg_${var}_bc31 = OpBitcast %v2f16 %ld_arg_${var}_ld31\n"
"%ld_arg_${var}_vec0 = OpVectorShuffle %v4f16 %ld_arg_${var}_bc00 %ld_arg_${var}_bc01 0 1 2 3\n"
"%ld_arg_${var}_vec1 = OpVectorShuffle %v4f16 %ld_arg_${var}_bc10 %ld_arg_${var}_bc11 0 1 2 3\n"
"%ld_arg_${var}_vec2 = OpVectorShuffle %v4f16 %ld_arg_${var}_bc20 %ld_arg_${var}_bc21 0 1 2 3\n"
"%ld_arg_${var}_vec3 = OpVectorShuffle %v4f16 %ld_arg_${var}_bc30 %ld_arg_${var}_bc31 0 1 2 3\n"
"%ld_arg_${var}_mat = OpCompositeConstruct %m4x4f16 %ld_arg_${var}_vec0 %ld_arg_${var}_vec1 %ld_arg_${var}_vec2 %ld_arg_${var}_vec3\n"
"OpReturnValue %ld_arg_${var}_mat\n"
"OpFunctionEnd\n";
const string storeScalarF16AsUint =
// This version is sensitive to the initial value in the output buffer.
// The infrastructure sets all output buffer bits to one before invoking
// the shader so this version uses an atomic and to generate the correct
// zeroes.
"%st_fn_${var} = OpFunction %void None %void_f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_and_low = OpBitwiseAnd %u32 %st_fn_${var}_param2 %c_u32_1\n"
"%st_fn_${var}_zero_vec = OpBitcast %v2f16 %c_u32_0\n"
"%st_fn_${var}_insert = OpVectorInsertDynamic %v2f16 %st_fn_${var}_zero_vec %st_fn_${var}_param1 %st_fn_${var}_and_low\n"
"%st_fn_${var}_odd = OpIEqual %bool %st_fn_${var}_and_low %c_u32_1\n"
// Or 16 bits of ones into the half that was not populated with the result.
"%st_fn_${var}_sel = OpSelect %u32 %st_fn_${var}_odd %c_u32_low_ones %c_u32_high_ones\n"
"%st_fn_${var}_cast = OpBitcast %u32 %st_fn_${var}_insert\n"
"%st_fn_${var}_or = OpBitwiseOr %u32 %st_fn_${var}_cast %st_fn_${var}_sel\n"
"%st_fn_${var}_conv = OpBitcast %u32 %st_fn_${var}_param2\n"
"%st_fn_${var}_div = OpUDiv %u32 %st_fn_${var}_conv %c_u32_2\n"
"%st_fn_${var}_gep = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_div\n"
"%st_fn_${var}_and = OpAtomicAnd %u32 %st_fn_${var}_gep %c_u32_1 %c_u32_0 %st_fn_${var}_or\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeV2F16AsUint =
"%st_fn_${var} = OpFunction %void None %void_v2f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %v2f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_cast = OpBitcast %u32 %st_fn_${var}_param1\n"
"%st_fn_${var}_gep = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2\n"
"OpStore %st_fn_${var}_gep %st_fn_${var}_cast\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeV3F16AsUints =
// Since we allocate a vec4 worth of values, this case can be treated
// almost the same as a vec4 case. We will store some extra data that
// should not be compared.
"%st_fn_${var} = OpFunction %void None %void_v3f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %v3f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_shuffle0 = OpVectorShuffle %v2f16 %st_fn_${var}_param1 %st_fn_${var}_param1 0 1\n"
"%st_fn_${var}_shuffle1 = OpVectorShuffle %v2f16 %st_fn_${var}_param1 %st_fn_${var}_param1 2 3\n"
"%st_fn_${var}_bc0 = OpBitcast %u32 %st_fn_${var}_shuffle0\n"
"%st_fn_${var}_bc1 = OpBitcast %u32 %st_fn_${var}_shuffle1\n"
"%st_fn_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"OpStore %st_fn_${var}_gep0 %st_fn_${var}_bc0\n"
"OpStore %st_fn_${var}_gep1 %st_fn_${var}_bc1\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeV4F16AsUints =
"%st_fn_${var} = OpFunction %void None %void_v4f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %v4f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_shuffle0 = OpVectorShuffle %v2f16 %st_fn_${var}_param1 %st_fn_${var}_param1 0 1\n"
"%st_fn_${var}_shuffle1 = OpVectorShuffle %v2f16 %st_fn_${var}_param1 %st_fn_${var}_param1 2 3\n"
"%st_fn_${var}_bc0 = OpBitcast %u32 %st_fn_${var}_shuffle0\n"
"%st_fn_${var}_bc1 = OpBitcast %u32 %st_fn_${var}_shuffle1\n"
"%st_fn_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"OpStore %st_fn_${var}_gep0 %st_fn_${var}_bc0\n"
"OpStore %st_fn_${var}_gep1 %st_fn_${var}_bc1\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeM2x2F16AsUints =
"%st_fn_${var} = OpFunction %void None %void_m2x2f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %m2x2f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_ex0 = OpCompositeExtract %v2f16 %st_fn_${var}_param1 0\n"
"%st_fn_${var}_ex1 = OpCompositeExtract %v2f16 %st_fn_${var}_param1 1\n"
"%st_fn_${var}_bc0 = OpBitcast %u32 %st_fn_${var}_ex0\n"
"%st_fn_${var}_bc1 = OpBitcast %u32 %st_fn_${var}_ex1\n"
"%st_fn_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"OpStore %st_fn_${var}_gep0 %st_fn_${var}_bc0\n"
"OpStore %st_fn_${var}_gep1 %st_fn_${var}_bc1\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeM2x3F16AsUints =
// In the extracted elements for 01 and 11 the second element doesn't
// matter.
"%st_fn_${var} = OpFunction %void None %void_m2x3f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %m2x3f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_ex0 = OpCompositeExtract %v3f16 %st_fn_${var}_param1 0\n"
"%st_fn_${var}_ex1 = OpCompositeExtract %v3f16 %st_fn_${var}_param1 1\n"
"%st_fn_${var}_ele00 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 0 1\n"
"%st_fn_${var}_ele01 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 2 3\n"
"%st_fn_${var}_ele10 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 0 1\n"
"%st_fn_${var}_ele11 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 2 3\n"
"%st_fn_${var}_bc00 = OpBitcast %u32 %st_fn_${var}_ele00\n"
"%st_fn_${var}_bc01 = OpBitcast %u32 %st_fn_${var}_ele01\n"
"%st_fn_${var}_bc10 = OpBitcast %u32 %st_fn_${var}_ele10\n"
"%st_fn_${var}_bc11 = OpBitcast %u32 %st_fn_${var}_ele11\n"
"%st_fn_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"%st_fn_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_2\n"
"%st_fn_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_3\n"
"OpStore %st_fn_${var}_gep00 %st_fn_${var}_bc00\n"
"OpStore %st_fn_${var}_gep01 %st_fn_${var}_bc01\n"
"OpStore %st_fn_${var}_gep10 %st_fn_${var}_bc10\n"
"OpStore %st_fn_${var}_gep11 %st_fn_${var}_bc11\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeM2x4F16AsUints =
"%st_fn_${var} = OpFunction %void None %void_m2x4f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %m2x4f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_ex0 = OpCompositeExtract %v4f16 %st_fn_${var}_param1 0\n"
"%st_fn_${var}_ex1 = OpCompositeExtract %v4f16 %st_fn_${var}_param1 1\n"
"%st_fn_${var}_ele00 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 0 1\n"
"%st_fn_${var}_ele01 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 2 3\n"
"%st_fn_${var}_ele10 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 0 1\n"
"%st_fn_${var}_ele11 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 2 3\n"
"%st_fn_${var}_bc00 = OpBitcast %u32 %st_fn_${var}_ele00\n"
"%st_fn_${var}_bc01 = OpBitcast %u32 %st_fn_${var}_ele01\n"
"%st_fn_${var}_bc10 = OpBitcast %u32 %st_fn_${var}_ele10\n"
"%st_fn_${var}_bc11 = OpBitcast %u32 %st_fn_${var}_ele11\n"
"%st_fn_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"%st_fn_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_2\n"
"%st_fn_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_3\n"
"OpStore %st_fn_${var}_gep00 %st_fn_${var}_bc00\n"
"OpStore %st_fn_${var}_gep01 %st_fn_${var}_bc01\n"
"OpStore %st_fn_${var}_gep10 %st_fn_${var}_bc10\n"
"OpStore %st_fn_${var}_gep11 %st_fn_${var}_bc11\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeM3x2F16AsUints =
"%st_fn_${var} = OpFunction %void None %void_m3x2f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %m3x2f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_ex0 = OpCompositeExtract %v2f16 %st_fn_${var}_param1 0\n"
"%st_fn_${var}_ex1 = OpCompositeExtract %v2f16 %st_fn_${var}_param1 1\n"
"%st_fn_${var}_ex2 = OpCompositeExtract %v2f16 %st_fn_${var}_param1 2\n"
"%st_fn_${var}_bc0 = OpBitcast %u32 %st_fn_${var}_ex0\n"
"%st_fn_${var}_bc1 = OpBitcast %u32 %st_fn_${var}_ex1\n"
"%st_fn_${var}_bc2 = OpBitcast %u32 %st_fn_${var}_ex2\n"
"%st_fn_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"%st_fn_${var}_gep2 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_2\n"
"OpStore %st_fn_${var}_gep0 %st_fn_${var}_bc0\n"
"OpStore %st_fn_${var}_gep1 %st_fn_${var}_bc1\n"
"OpStore %st_fn_${var}_gep2 %st_fn_${var}_bc2\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeM3x3F16AsUints =
// The second element of the each broken down vec3 doesn't matter.
"%st_fn_${var} = OpFunction %void None %void_m3x3f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %m3x3f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_ex0 = OpCompositeExtract %v3f16 %st_fn_${var}_param1 0\n"
"%st_fn_${var}_ex1 = OpCompositeExtract %v3f16 %st_fn_${var}_param1 1\n"
"%st_fn_${var}_ex2 = OpCompositeExtract %v3f16 %st_fn_${var}_param1 2\n"
"%st_fn_${var}_ele00 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 0 1\n"
"%st_fn_${var}_ele01 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 2 3\n"
"%st_fn_${var}_ele10 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 0 1\n"
"%st_fn_${var}_ele11 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 2 3\n"
"%st_fn_${var}_ele20 = OpVectorShuffle %v2f16 %st_fn_${var}_ex2 %st_fn_${var}_ex2 0 1\n"
"%st_fn_${var}_ele21 = OpVectorShuffle %v2f16 %st_fn_${var}_ex2 %st_fn_${var}_ex2 2 3\n"
"%st_fn_${var}_bc00 = OpBitcast %u32 %st_fn_${var}_ele00\n"
"%st_fn_${var}_bc01 = OpBitcast %u32 %st_fn_${var}_ele01\n"
"%st_fn_${var}_bc10 = OpBitcast %u32 %st_fn_${var}_ele10\n"
"%st_fn_${var}_bc11 = OpBitcast %u32 %st_fn_${var}_ele11\n"
"%st_fn_${var}_bc20 = OpBitcast %u32 %st_fn_${var}_ele20\n"
"%st_fn_${var}_bc21 = OpBitcast %u32 %st_fn_${var}_ele21\n"
"%st_fn_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"%st_fn_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_2\n"
"%st_fn_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_3\n"
"%st_fn_${var}_gep20 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_4\n"
"%st_fn_${var}_gep21 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_5\n"
"OpStore %st_fn_${var}_gep00 %st_fn_${var}_bc00\n"
"OpStore %st_fn_${var}_gep01 %st_fn_${var}_bc01\n"
"OpStore %st_fn_${var}_gep10 %st_fn_${var}_bc10\n"
"OpStore %st_fn_${var}_gep11 %st_fn_${var}_bc11\n"
"OpStore %st_fn_${var}_gep20 %st_fn_${var}_bc20\n"
"OpStore %st_fn_${var}_gep21 %st_fn_${var}_bc21\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeM3x4F16AsUints =
"%st_fn_${var} = OpFunction %void None %void_m3x4f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %m3x4f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_ex0 = OpCompositeExtract %v4f16 %st_fn_${var}_param1 0\n"
"%st_fn_${var}_ex1 = OpCompositeExtract %v4f16 %st_fn_${var}_param1 1\n"
"%st_fn_${var}_ex2 = OpCompositeExtract %v4f16 %st_fn_${var}_param1 2\n"
"%st_fn_${var}_ele00 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 0 1\n"
"%st_fn_${var}_ele01 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 2 3\n"
"%st_fn_${var}_ele10 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 0 1\n"
"%st_fn_${var}_ele11 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 2 3\n"
"%st_fn_${var}_ele20 = OpVectorShuffle %v2f16 %st_fn_${var}_ex2 %st_fn_${var}_ex2 0 1\n"
"%st_fn_${var}_ele21 = OpVectorShuffle %v2f16 %st_fn_${var}_ex2 %st_fn_${var}_ex2 2 3\n"
"%st_fn_${var}_bc00 = OpBitcast %u32 %st_fn_${var}_ele00\n"
"%st_fn_${var}_bc01 = OpBitcast %u32 %st_fn_${var}_ele01\n"
"%st_fn_${var}_bc10 = OpBitcast %u32 %st_fn_${var}_ele10\n"
"%st_fn_${var}_bc11 = OpBitcast %u32 %st_fn_${var}_ele11\n"
"%st_fn_${var}_bc20 = OpBitcast %u32 %st_fn_${var}_ele20\n"
"%st_fn_${var}_bc21 = OpBitcast %u32 %st_fn_${var}_ele21\n"
"%st_fn_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"%st_fn_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_2\n"
"%st_fn_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_3\n"
"%st_fn_${var}_gep20 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_4\n"
"%st_fn_${var}_gep21 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_5\n"
"OpStore %st_fn_${var}_gep00 %st_fn_${var}_bc00\n"
"OpStore %st_fn_${var}_gep01 %st_fn_${var}_bc01\n"
"OpStore %st_fn_${var}_gep10 %st_fn_${var}_bc10\n"
"OpStore %st_fn_${var}_gep11 %st_fn_${var}_bc11\n"
"OpStore %st_fn_${var}_gep20 %st_fn_${var}_bc20\n"
"OpStore %st_fn_${var}_gep21 %st_fn_${var}_bc21\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeM4x2F16AsUints =
"%st_fn_${var} = OpFunction %void None %void_m4x2f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %m4x2f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_ex0 = OpCompositeExtract %v2f16 %st_fn_${var}_param1 0\n"
"%st_fn_${var}_ex1 = OpCompositeExtract %v2f16 %st_fn_${var}_param1 1\n"
"%st_fn_${var}_ex2 = OpCompositeExtract %v2f16 %st_fn_${var}_param1 2\n"
"%st_fn_${var}_ex3 = OpCompositeExtract %v2f16 %st_fn_${var}_param1 3\n"
"%st_fn_${var}_bc0 = OpBitcast %u32 %st_fn_${var}_ex0\n"
"%st_fn_${var}_bc1 = OpBitcast %u32 %st_fn_${var}_ex1\n"
"%st_fn_${var}_bc2 = OpBitcast %u32 %st_fn_${var}_ex2\n"
"%st_fn_${var}_bc3 = OpBitcast %u32 %st_fn_${var}_ex3\n"
"%st_fn_${var}_gep0 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep1 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"%st_fn_${var}_gep2 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_2\n"
"%st_fn_${var}_gep3 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_3\n"
"OpStore %st_fn_${var}_gep0 %st_fn_${var}_bc0\n"
"OpStore %st_fn_${var}_gep1 %st_fn_${var}_bc1\n"
"OpStore %st_fn_${var}_gep2 %st_fn_${var}_bc2\n"
"OpStore %st_fn_${var}_gep3 %st_fn_${var}_bc3\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeM4x3F16AsUints =
// The last element of each decomposed vec3 doesn't matter.
"%st_fn_${var} = OpFunction %void None %void_m4x3f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %m4x3f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_ex0 = OpCompositeExtract %v3f16 %st_fn_${var}_param1 0\n"
"%st_fn_${var}_ex1 = OpCompositeExtract %v3f16 %st_fn_${var}_param1 1\n"
"%st_fn_${var}_ex2 = OpCompositeExtract %v3f16 %st_fn_${var}_param1 2\n"
"%st_fn_${var}_ex3 = OpCompositeExtract %v3f16 %st_fn_${var}_param1 3\n"
"%st_fn_${var}_ele00 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 0 1\n"
"%st_fn_${var}_ele01 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 2 3\n"
"%st_fn_${var}_ele10 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 0 1\n"
"%st_fn_${var}_ele11 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 2 3\n"
"%st_fn_${var}_ele20 = OpVectorShuffle %v2f16 %st_fn_${var}_ex2 %st_fn_${var}_ex2 0 1\n"
"%st_fn_${var}_ele21 = OpVectorShuffle %v2f16 %st_fn_${var}_ex2 %st_fn_${var}_ex2 2 3\n"
"%st_fn_${var}_ele30 = OpVectorShuffle %v2f16 %st_fn_${var}_ex3 %st_fn_${var}_ex3 0 1\n"
"%st_fn_${var}_ele31 = OpVectorShuffle %v2f16 %st_fn_${var}_ex3 %st_fn_${var}_ex3 2 3\n"
"%st_fn_${var}_bc00 = OpBitcast %u32 %st_fn_${var}_ele00\n"
"%st_fn_${var}_bc01 = OpBitcast %u32 %st_fn_${var}_ele01\n"
"%st_fn_${var}_bc10 = OpBitcast %u32 %st_fn_${var}_ele10\n"
"%st_fn_${var}_bc11 = OpBitcast %u32 %st_fn_${var}_ele11\n"
"%st_fn_${var}_bc20 = OpBitcast %u32 %st_fn_${var}_ele20\n"
"%st_fn_${var}_bc21 = OpBitcast %u32 %st_fn_${var}_ele21\n"
"%st_fn_${var}_bc30 = OpBitcast %u32 %st_fn_${var}_ele30\n"
"%st_fn_${var}_bc31 = OpBitcast %u32 %st_fn_${var}_ele31\n"
"%st_fn_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"%st_fn_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_2\n"
"%st_fn_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_3\n"
"%st_fn_${var}_gep20 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_4\n"
"%st_fn_${var}_gep21 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_5\n"
"%st_fn_${var}_gep30 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_6\n"
"%st_fn_${var}_gep31 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_7\n"
"OpStore %st_fn_${var}_gep00 %st_fn_${var}_bc00\n"
"OpStore %st_fn_${var}_gep01 %st_fn_${var}_bc01\n"
"OpStore %st_fn_${var}_gep10 %st_fn_${var}_bc10\n"
"OpStore %st_fn_${var}_gep11 %st_fn_${var}_bc11\n"
"OpStore %st_fn_${var}_gep20 %st_fn_${var}_bc20\n"
"OpStore %st_fn_${var}_gep21 %st_fn_${var}_bc21\n"
"OpStore %st_fn_${var}_gep30 %st_fn_${var}_bc30\n"
"OpStore %st_fn_${var}_gep31 %st_fn_${var}_bc31\n"
"OpReturn\n"
"OpFunctionEnd\n";
const string storeM4x4F16AsUints =
"%st_fn_${var} = OpFunction %void None %void_m4x4f16_i32_fn\n"
"%st_fn_${var}_param1 = OpFunctionParameter %m4x4f16\n"
"%st_fn_${var}_param2 = OpFunctionParameter %i32\n"
"%st_fn_${var}_entry = OpLabel\n"
"%st_fn_${var}_ex0 = OpCompositeExtract %v4f16 %st_fn_${var}_param1 0\n"
"%st_fn_${var}_ex1 = OpCompositeExtract %v4f16 %st_fn_${var}_param1 1\n"
"%st_fn_${var}_ex2 = OpCompositeExtract %v4f16 %st_fn_${var}_param1 2\n"
"%st_fn_${var}_ex3 = OpCompositeExtract %v4f16 %st_fn_${var}_param1 3\n"
"%st_fn_${var}_ele00 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 0 1\n"
"%st_fn_${var}_ele01 = OpVectorShuffle %v2f16 %st_fn_${var}_ex0 %st_fn_${var}_ex0 2 3\n"
"%st_fn_${var}_ele10 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 0 1\n"
"%st_fn_${var}_ele11 = OpVectorShuffle %v2f16 %st_fn_${var}_ex1 %st_fn_${var}_ex1 2 3\n"
"%st_fn_${var}_ele20 = OpVectorShuffle %v2f16 %st_fn_${var}_ex2 %st_fn_${var}_ex2 0 1\n"
"%st_fn_${var}_ele21 = OpVectorShuffle %v2f16 %st_fn_${var}_ex2 %st_fn_${var}_ex2 2 3\n"
"%st_fn_${var}_ele30 = OpVectorShuffle %v2f16 %st_fn_${var}_ex3 %st_fn_${var}_ex3 0 1\n"
"%st_fn_${var}_ele31 = OpVectorShuffle %v2f16 %st_fn_${var}_ex3 %st_fn_${var}_ex3 2 3\n"
"%st_fn_${var}_bc00 = OpBitcast %u32 %st_fn_${var}_ele00\n"
"%st_fn_${var}_bc01 = OpBitcast %u32 %st_fn_${var}_ele01\n"
"%st_fn_${var}_bc10 = OpBitcast %u32 %st_fn_${var}_ele10\n"
"%st_fn_${var}_bc11 = OpBitcast %u32 %st_fn_${var}_ele11\n"
"%st_fn_${var}_bc20 = OpBitcast %u32 %st_fn_${var}_ele20\n"
"%st_fn_${var}_bc21 = OpBitcast %u32 %st_fn_${var}_ele21\n"
"%st_fn_${var}_bc30 = OpBitcast %u32 %st_fn_${var}_ele30\n"
"%st_fn_${var}_bc31 = OpBitcast %u32 %st_fn_${var}_ele31\n"
"%st_fn_${var}_gep00 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_0\n"
"%st_fn_${var}_gep01 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_1\n"
"%st_fn_${var}_gep10 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_2\n"
"%st_fn_${var}_gep11 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_3\n"
"%st_fn_${var}_gep20 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_4\n"
"%st_fn_${var}_gep21 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_5\n"
"%st_fn_${var}_gep30 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_6\n"
"%st_fn_${var}_gep31 = OpAccessChain %up_u32 %${var} %c_u32_0 %st_fn_${var}_param2 %c_u32_7\n"
"OpStore %st_fn_${var}_gep00 %st_fn_${var}_bc00\n"
"OpStore %st_fn_${var}_gep01 %st_fn_${var}_bc01\n"
"OpStore %st_fn_${var}_gep10 %st_fn_${var}_bc10\n"
"OpStore %st_fn_${var}_gep11 %st_fn_${var}_bc11\n"
"OpStore %st_fn_${var}_gep20 %st_fn_${var}_bc20\n"
"OpStore %st_fn_${var}_gep21 %st_fn_${var}_bc21\n"
"OpStore %st_fn_${var}_gep30 %st_fn_${var}_bc30\n"
"OpStore %st_fn_${var}_gep31 %st_fn_${var}_bc31\n"
"OpReturn\n"
"OpFunctionEnd\n";
template<typename T>
static void fillRandomScalars (de::Random& rnd, T minValue, T maxValue, void* dst, int numValues, int offset = 0)
{
T* const typedPtr = (T*)dst;
for (int ndx = 0; ndx < numValues; ndx++)
typedPtr[offset + ndx] = de::randomScalar<T>(rnd, minValue, maxValue);
}
// Filter is a function that returns true if a value should pass, false otherwise.
template<typename T, typename FilterT>
static void fillRandomScalars (de::Random& rnd, T minValue, T maxValue, void* dst, int numValues, FilterT filter, int offset = 0)
{
T* const typedPtr = (T*)dst;
T value;
for (int ndx = 0; ndx < numValues; ndx++)
{
do
value = de::randomScalar<T>(rnd, minValue, maxValue);
while (!filter(value));
typedPtr[offset + ndx] = value;
}
}
// Gets a 64-bit integer with a more logarithmic distribution
deInt64 randomInt64LogDistributed (de::Random& rnd)
{
deInt64 val = rnd.getUint64();
val &= (1ull << rnd.getInt(1, 63)) - 1;
if (rnd.getBool())
val = -val;
return val;
}
static void fillRandomInt64sLogDistributed (de::Random& rnd, vector<deInt64>& dst, int numValues)
{
for (int ndx = 0; ndx < numValues; ndx++)
dst[ndx] = randomInt64LogDistributed(rnd);
}
template<typename FilterT>
static void fillRandomInt64sLogDistributed (de::Random& rnd, vector<deInt64>& dst, int numValues, FilterT filter)
{
for (int ndx = 0; ndx < numValues; ndx++)
{
deInt64 value;
do {
value = randomInt64LogDistributed(rnd);
} while (!filter(value));
dst[ndx] = value;
}
}
inline bool filterNonNegative (const deInt64 value)
{
return value >= 0;
}
inline bool filterPositive (const deInt64 value)
{
return value > 0;
}
inline bool filterNotZero (const deInt64 value)
{
return value != 0;
}
static void floorAll (vector<float>& values)
{
for (size_t i = 0; i < values.size(); i++)
values[i] = deFloatFloor(values[i]);
}
static void floorAll (vector<Vec4>& values)
{
for (size_t i = 0; i < values.size(); i++)
values[i] = floor(values[i]);
}
struct CaseParameter
{
const char* name;
string param;
CaseParameter (const char* case_, const string& param_) : name(case_), param(param_) {}
};
// Assembly code used for testing LocalSize, OpNop, OpConstant{Null|Composite}, Op[No]Line, OpSource[Continued], OpSourceExtension, OpUndef is based on GLSL source code:
//
// #version 430
//
// layout(std140, set = 0, binding = 0) readonly buffer Input {
// float elements[];
// } input_data;
// layout(std140, set = 0, binding = 1) writeonly buffer Output {
// float elements[];
// } output_data;
//
// layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
//
// void main() {
// uint x = gl_GlobalInvocationID.x;
// output_data.elements[x] = -input_data.elements[x];
// }
static string getAsmForLocalSizeTest(bool useLiteralLocalSize, bool useLiteralLocalSizeId, bool useSpecConstantWorkgroupSize, IVec3 workGroupSize, deUint32 ndx)
{
std::ostringstream out;
out << "OpCapability Shader\n"
"OpMemoryModel Logical GLSL450\n";
if (useLiteralLocalSizeId)
{
out << "OpEntryPoint GLCompute %main \"main\" %id %indata %outdata\n"
"OpExecutionModeId %main LocalSizeId %const_0 %const_1 %const_2\n";
}
else
{
out << "OpEntryPoint GLCompute %main \"main\" %id\n";
if (useLiteralLocalSize)
{
out << "OpExecutionMode %main LocalSize "
<< workGroupSize.x() << " " << workGroupSize.y() << " " << workGroupSize.z() << "\n";
}
}
out << "OpSource GLSL 430\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n";
if (useSpecConstantWorkgroupSize)
{
out << "OpDecorate %spec_0 SpecId 100\n"
"OpDecorate %spec_1 SpecId 101\n"
"OpDecorate %spec_2 SpecId 102\n"
"OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize\n";
}
if (useLiteralLocalSizeId)
{
out << getComputeAsmInputOutputBufferTraits("Block")
<< getComputeAsmCommonTypes("StorageBuffer")
<< getComputeAsmInputOutputBuffer("StorageBuffer")
<< "%const_0 = OpConstant %u32 " << workGroupSize.x() << "\n"
"%const_1 = OpConstant %u32 " << workGroupSize.y() << "\n"
"%const_2 = OpConstant %u32 " << workGroupSize.z() << "\n";
}
else
{
out << getComputeAsmInputOutputBufferTraits()
<< getComputeAsmCommonTypes()
<< getComputeAsmInputOutputBuffer();
}
out << "%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0 \n";
if (useSpecConstantWorkgroupSize)
{
out << "%spec_0 = OpSpecConstant %u32 "<< workGroupSize.x() << "\n"
"%spec_1 = OpSpecConstant %u32 "<< workGroupSize.y() << "\n"
"%spec_2 = OpSpecConstant %u32 "<< workGroupSize.z() << "\n"
"%gl_WorkGroupSize = OpSpecConstantComposite %uvec3 %spec_0 %spec_1 %spec_2\n";
}
out << "%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%ndx = OpCompositeExtract %u32 %idval " << ndx << "\n"
"%inloc = OpAccessChain %f32ptr %indata %zero %ndx\n"
"%inval = OpLoad %f32 %inloc\n"
"%neg = OpFNegate %f32 %inval\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %ndx\n"
" OpStore %outloc %neg\n"
" OpReturn\n"
" OpFunctionEnd\n";
return out.str();
}
tcu::TestCaseGroup* createLocalSizeGroup(tcu::TestContext& testCtx, bool useLocalSizeId)
{
const char* groupName[]{ "localsize", "localsize_id" };
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, groupName[useLocalSizeId], ""));
ComputeShaderSpec spec;
de::Random rnd (deStringHash(group->getName()));
const deUint32 numElements = 64u;
vector<float> positiveFloats (numElements, 0);
vector<float> negativeFloats (numElements, 0);
fillRandomScalars(rnd, 1.f, 100.f, &positiveFloats[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
negativeFloats[ndx] = -positiveFloats[ndx];
spec.inputs.push_back(BufferSp(new Float32Buffer(positiveFloats)));
spec.outputs.push_back(BufferSp(new Float32Buffer(negativeFloats)));
if (useLocalSizeId)
{
spec.spirvVersion = SPIRV_VERSION_1_5;
spec.extensions.push_back("VK_KHR_maintenance4");
}
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.assembly = getAsmForLocalSizeTest(true, useLocalSizeId, false, IVec3(1, 1, 1), 0u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "literal_localsize", "", spec));
spec.assembly = getAsmForLocalSizeTest(true, useLocalSizeId, true, IVec3(1, 1, 1), 0u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "literal_and_specid_localsize", "", spec));
if (!useLocalSizeId) // dont repeat this test when useLocalSizeId is true
{
spec.assembly = getAsmForLocalSizeTest(false, false, true, IVec3(1, 1, 1), 0u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "specid_localsize", "", spec));
}
spec.numWorkGroups = IVec3(1, 1, 1);
spec.assembly = getAsmForLocalSizeTest(true, useLocalSizeId, false, IVec3(numElements, 1, 1), 0u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "literal_localsize_x", "", spec));
spec.assembly = getAsmForLocalSizeTest(true, useLocalSizeId, true, IVec3(numElements, 1, 1), 0u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "literal_and_specid_localsize_x", "", spec));
if (!useLocalSizeId) // dont repeat this test when useLocalSizeId is true
{
spec.assembly = getAsmForLocalSizeTest(false, false, true, IVec3(numElements, 1, 1), 0u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "specid_localsize_x", "", spec));
}
spec.assembly = getAsmForLocalSizeTest(true, useLocalSizeId, false, IVec3(1, numElements, 1), 1u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "literal_localsize_y", "", spec));
spec.assembly = getAsmForLocalSizeTest(true, useLocalSizeId, true, IVec3(1, numElements, 1), 1u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "literal_and_specid_localsize_y", "", spec));
if (!useLocalSizeId) // dont repeat this test when useLocalSizeId is true
{
spec.assembly = getAsmForLocalSizeTest(false, false, true, IVec3(1, numElements, 1), 1u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "specid_localsize_y", "", spec));
}
spec.assembly = getAsmForLocalSizeTest(true, useLocalSizeId, false, IVec3(1, 1, numElements), 2u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "literal_localsize_z", "", spec));
spec.assembly = getAsmForLocalSizeTest(true, useLocalSizeId, true, IVec3(1, 1, numElements), 2u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "literal_and_specid_localsize_z", "", spec));
if (!useLocalSizeId) // dont repeat this test when useLocalSizeId is true
{
spec.assembly = getAsmForLocalSizeTest(false, false, true, IVec3(1, 1, numElements), 2u);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "specid_localsize_z", "", spec));
}
return group.release();
}
tcu::TestCaseGroup* createOpNopGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opnop", "Test the OpNop instruction"));
ComputeShaderSpec spec;
de::Random rnd (deStringHash(group->getName()));
const int numElements = 100;
vector<float> positiveFloats (numElements, 0);
vector<float> negativeFloats (numElements, 0);
fillRandomScalars(rnd, 1.f, 100.f, &positiveFloats[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
negativeFloats[ndx] = -positiveFloats[ndx];
spec.assembly =
string(getComputeAsmShaderPreamble()) +
"OpSource GLSL 430\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
+ string(getComputeAsmInputOutputBufferTraits()) + string(getComputeAsmCommonTypes())
+ string(getComputeAsmInputOutputBuffer()) +
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
" OpNop\n" // Inside a function body
"%inloc = OpAccessChain %f32ptr %indata %zero %x\n"
"%inval = OpLoad %f32 %inloc\n"
"%neg = OpFNegate %f32 %inval\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %neg\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back(BufferSp(new Float32Buffer(positiveFloats)));
spec.outputs.push_back(BufferSp(new Float32Buffer(negativeFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "all", "OpNop appearing at different places", spec));
return group.release();
}
tcu::TestCaseGroup* createUnusedVariableComputeTests (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "unused_variables", "Compute shaders with unused variables"));
de::Random rnd (deStringHash(group->getName()));
const int numElements = 100;
vector<float> positiveFloats (numElements, 0);
vector<float> negativeFloats (numElements, 0);
fillRandomScalars(rnd, 1.f, 100.f, &positiveFloats[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
negativeFloats[ndx] = -positiveFloats[ndx];
const VariableLocation testLocations[] =
{
// Set Binding
{ 0, 5 },
{ 5, 5 },
};
for (size_t locationNdx = 0; locationNdx < DE_LENGTH_OF_ARRAY(testLocations); ++locationNdx)
{
const VariableLocation& location = testLocations[locationNdx];
// Unused variable.
{
ComputeShaderSpec spec;
spec.assembly =
string(getComputeAsmShaderPreamble()) +
"OpDecorate %id BuiltIn GlobalInvocationId\n"
+ getUnusedDecorations(location)
+ string(getComputeAsmInputOutputBufferTraits()) + string(getComputeAsmCommonTypes())
+ getUnusedTypesAndConstants()
+ string(getComputeAsmInputOutputBuffer())
+ getUnusedBuffer() +
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc = OpAccessChain %f32ptr %indata %zero %x\n"
"%inval = OpLoad %f32 %inloc\n"
"%neg = OpFNegate %f32 %inval\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %neg\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back(BufferSp(new Float32Buffer(positiveFloats)));
spec.outputs.push_back(BufferSp(new Float32Buffer(negativeFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
std::string testName = "variable_" + location.toString();
std::string testDescription = "Unused variable test with " + location.toDescription();
group->addChild(new SpvAsmComputeShaderCase(testCtx, testName.c_str(), testDescription.c_str(), spec));
}
// Unused function.
{
ComputeShaderSpec spec;
spec.assembly =
string(getComputeAsmShaderPreamble("", "", "", getUnusedEntryPoint())) +
"OpDecorate %id BuiltIn GlobalInvocationId\n"
+ getUnusedDecorations(location)
+ string(getComputeAsmInputOutputBufferTraits()) + string(getComputeAsmCommonTypes())
+ getUnusedTypesAndConstants() +
"%c_i32_0 = OpConstant %i32 0\n"
"%c_i32_1 = OpConstant %i32 1\n"
+ string(getComputeAsmInputOutputBuffer())
+ getUnusedBuffer() +
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc = OpAccessChain %f32ptr %indata %zero %x\n"
"%inval = OpLoad %f32 %inloc\n"
"%neg = OpFNegate %f32 %inval\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %neg\n"
" OpReturn\n"
" OpFunctionEnd\n"
+ getUnusedFunctionBody();
spec.inputs.push_back(BufferSp(new Float32Buffer(positiveFloats)));
spec.outputs.push_back(BufferSp(new Float32Buffer(negativeFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
std::string testName = "function_" + location.toString();
std::string testDescription = "Unused function test with " + location.toDescription();
group->addChild(new SpvAsmComputeShaderCase(testCtx, testName.c_str(), testDescription.c_str(), spec));
}
}
return group.release();
}
template<bool nanSupported>
bool compareFUnord (const std::vector<Resource>& inputs, const vector<AllocationSp>& outputAllocs, const std::vector<Resource>& expectedOutputs, TestLog& log)
{
if (outputAllocs.size() != 1)
return false;
vector<deUint8> input1Bytes;
vector<deUint8> input2Bytes;
vector<deUint8> expectedBytes;
inputs[0].getBytes(input1Bytes);
inputs[1].getBytes(input2Bytes);
expectedOutputs[0].getBytes(expectedBytes);
const deInt32* const expectedOutputAsInt = reinterpret_cast<const deInt32*>(&expectedBytes.front());
const deInt32* const outputAsInt = static_cast<const deInt32*>(outputAllocs[0]->getHostPtr());
const float* const input1AsFloat = reinterpret_cast<const float*>(&input1Bytes.front());
const float* const input2AsFloat = reinterpret_cast<const float*>(&input2Bytes.front());
bool returnValue = true;
for (size_t idx = 0; idx < expectedBytes.size() / sizeof(deInt32); ++idx)
{
if (!nanSupported && (tcu::Float32(input1AsFloat[idx]).isNaN() || tcu::Float32(input2AsFloat[idx]).isNaN()))
continue;
if (outputAsInt[idx] != expectedOutputAsInt[idx])
{
log << TestLog::Message << "ERROR: Sub-case failed. inputs: " << input1AsFloat[idx] << "," << input2AsFloat[idx] << " output: " << outputAsInt[idx]<< " expected output: " << expectedOutputAsInt[idx] << TestLog::EndMessage;
returnValue = false;
}
}
return returnValue;
}
typedef VkBool32 (*compareFuncType) (float, float);
struct OpFUnordCase
{
const char* name;
const char* opCode;
compareFuncType compareFunc;
OpFUnordCase (const char* _name, const char* _opCode, compareFuncType _compareFunc)
: name (_name)
, opCode (_opCode)
, compareFunc (_compareFunc) {}
};
#define ADD_OPFUNORD_CASE(NAME, OPCODE, OPERATOR) \
do { \
struct compare_##NAME { static VkBool32 compare(float x, float y) { return (x OPERATOR y) ? VK_TRUE : VK_FALSE; } }; \
cases.push_back(OpFUnordCase(#NAME, OPCODE, compare_##NAME::compare)); \
} while (deGetFalse())
tcu::TestCaseGroup* createOpFUnordGroup (tcu::TestContext& testCtx, const bool testWithNan)
{
const string nan = testWithNan ? "_nan" : "";
const string groupName = "opfunord" + nan;
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, groupName.c_str(), "Test the OpFUnord* opcodes"));
de::Random rnd (deStringHash(group->getName()));
const int numElements = 100;
vector<OpFUnordCase> cases;
string extensions = testWithNan ? "OpExtension \"SPV_KHR_float_controls\"\n" : "";
string capabilities = testWithNan ? "OpCapability SignedZeroInfNanPreserve\n" : "";
string exeModes = testWithNan ? "OpExecutionMode %main SignedZeroInfNanPreserve 32\n" : "";
const StringTemplate shaderTemplate (
string(getComputeAsmShaderPreamble(capabilities, extensions, exeModes)) +
"OpSource GLSL 430\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %buf2 BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 2\n"
"OpDecorate %f32arr ArrayStride 4\n"
"OpDecorate %i32arr ArrayStride 4\n"
"OpMemberDecorate %buf 0 Offset 0\n"
"OpMemberDecorate %buf2 0 Offset 0\n"
+ string(getComputeAsmCommonTypes()) +
"%buf = OpTypeStruct %f32arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%buf2 = OpTypeStruct %i32arr\n"
"%buf2ptr = OpTypePointer Uniform %buf2\n"
"%outdata = OpVariable %buf2ptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%consti1 = OpConstant %i32 1\n"
"%constf1 = OpConstant %f32 1.0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %f32ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %f32 %inloc1\n"
"%inloc2 = OpAccessChain %f32ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %f32 %inloc2\n"
"%outloc = OpAccessChain %i32ptr %outdata %zero %x\n"
"%result = ${OPCODE} %bool %inval1 %inval2\n"
"%int_res = OpSelect %i32 %result %consti1 %zero\n"
" OpStore %outloc %int_res\n"
" OpReturn\n"
" OpFunctionEnd\n");
ADD_OPFUNORD_CASE(equal, "OpFUnordEqual", ==);
ADD_OPFUNORD_CASE(less, "OpFUnordLessThan", <);
ADD_OPFUNORD_CASE(lessequal, "OpFUnordLessThanEqual", <=);
ADD_OPFUNORD_CASE(greater, "OpFUnordGreaterThan", >);
ADD_OPFUNORD_CASE(greaterequal, "OpFUnordGreaterThanEqual", >=);
ADD_OPFUNORD_CASE(notequal, "OpFUnordNotEqual", !=);
for (size_t caseNdx = 0; caseNdx < cases.size(); ++caseNdx)
{
map<string, string> specializations;
ComputeShaderSpec spec;
const float NaN = std::numeric_limits<float>::quiet_NaN();
vector<float> inputFloats1 (numElements, 0);
vector<float> inputFloats2 (numElements, 0);
vector<deInt32> expectedInts (numElements, 0);
specializations["OPCODE"] = cases[caseNdx].opCode;
spec.assembly = shaderTemplate.specialize(specializations);
fillRandomScalars(rnd, 1.f, 100.f, &inputFloats1[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
{
switch (ndx % 6)
{
case 0: inputFloats2[ndx] = inputFloats1[ndx] + 1.0f; break;
case 1: inputFloats2[ndx] = inputFloats1[ndx] - 1.0f; break;
case 2: inputFloats2[ndx] = inputFloats1[ndx]; break;
case 3: inputFloats2[ndx] = NaN; break;
case 4: inputFloats2[ndx] = inputFloats1[ndx]; inputFloats1[ndx] = NaN; break;
case 5: inputFloats2[ndx] = NaN; inputFloats1[ndx] = NaN; break;
}
expectedInts[ndx] = tcu::Float32(inputFloats1[ndx]).isNaN() || tcu::Float32(inputFloats2[ndx]).isNaN() || cases[caseNdx].compareFunc(inputFloats1[ndx], inputFloats2[ndx]);
}
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats1)));
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats2)));
spec.outputs.push_back(BufferSp(new Int32Buffer(expectedInts)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.verifyIO = testWithNan ? &compareFUnord<true> : &compareFUnord<false>;
if (testWithNan)
{
spec.extensions.push_back("VK_KHR_shader_float_controls");
spec.requestedVulkanFeatures.floatControlsProperties.shaderSignedZeroInfNanPreserveFloat32 = DE_TRUE;
}
group->addChild(new SpvAsmComputeShaderCase(testCtx, cases[caseNdx].name, cases[caseNdx].name, spec));
}
return group.release();
}
struct OpAtomicCase
{
const char* name;
const char* assembly;
const char* retValAssembly;
OpAtomicType opAtomic;
deInt32 numOutputElements;
OpAtomicCase(const char* _name, const char* _assembly, const char* _retValAssembly, OpAtomicType _opAtomic, deInt32 _numOutputElements)
: name (_name)
, assembly (_assembly)
, retValAssembly (_retValAssembly)
, opAtomic (_opAtomic)
, numOutputElements (_numOutputElements) {}
};
tcu::TestCaseGroup* createOpAtomicGroup (tcu::TestContext& testCtx, bool useStorageBuffer, int numElements = 65535, bool verifyReturnValues = false, bool volatileAtomic = false)
{
std::string groupName ("opatomic");
if (useStorageBuffer)
groupName += "_storage_buffer";
if (verifyReturnValues)
groupName += "_return_values";
if (volatileAtomic)
groupName += "_volatile";
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, groupName.c_str(), "Test the OpAtomic* opcodes"));
vector<OpAtomicCase> cases;
const StringTemplate shaderTemplate (
string("OpCapability Shader\n") +
(volatileAtomic ? "OpCapability VulkanMemoryModelKHR\n" : "") +
(useStorageBuffer ? "OpExtension \"SPV_KHR_storage_buffer_storage_class\"\n" : "") +
(volatileAtomic ? "OpExtension \"SPV_KHR_vulkan_memory_model\"\n" : "") +
(volatileAtomic ? "OpMemoryModel Logical VulkanKHR\n" : "OpMemoryModel Logical GLSL450\n") +
"OpEntryPoint GLCompute %main \"main\" %id\n"
"OpExecutionMode %main LocalSize 1 1 1\n" +
"OpSource GLSL 430\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf ${BLOCK_DECORATION}\n"
"OpDecorate %indata DescriptorSet 0\n"
"OpDecorate %indata Binding 0\n"
"OpDecorate %i32arr ArrayStride 4\n"
"OpMemberDecorate %buf 0 Offset 0\n"
"OpDecorate %sumbuf ${BLOCK_DECORATION}\n"
"OpDecorate %sum DescriptorSet 0\n"
"OpDecorate %sum Binding 1\n"
"OpMemberDecorate %sumbuf 0 Offset 0\n"
"${RETVAL_BUF_DECORATE}"
+ getComputeAsmCommonTypes("${BLOCK_POINTER_TYPE}") +
"%buf = OpTypeStruct %i32arr\n"
"%bufptr = OpTypePointer ${BLOCK_POINTER_TYPE} %buf\n"
"%indata = OpVariable %bufptr ${BLOCK_POINTER_TYPE}\n"
"%sumbuf = OpTypeStruct %i32arr\n"
"%sumbufptr = OpTypePointer ${BLOCK_POINTER_TYPE} %sumbuf\n"
"%sum = OpVariable %sumbufptr ${BLOCK_POINTER_TYPE}\n"
"${RETVAL_BUF_DECL}"
"%id = OpVariable %uvec3ptr Input\n"
"%minusone = OpConstant %i32 -1\n"
"%zero = OpConstant %i32 0\n"
"%one = OpConstant %u32 1\n"
"%two = OpConstant %i32 2\n"
"%five = OpConstant %i32 5\n"
"%volbit = OpConstant %i32 32768\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc = OpAccessChain %i32ptr %indata %zero %x\n"
"%inval = OpLoad %i32 %inloc\n"
"%outloc = OpAccessChain %i32ptr %sum %zero ${INDEX}\n"
"${INSTRUCTION}"
"${RETVAL_ASSEMBLY}"
" OpReturn\n"
" OpFunctionEnd\n");
#define ADD_OPATOMIC_CASE(NAME, ASSEMBLY, RETVAL_ASSEMBLY, OPATOMIC, NUM_OUTPUT_ELEMENTS) \
do { \
DE_ASSERT((NUM_OUTPUT_ELEMENTS) == 1 || (NUM_OUTPUT_ELEMENTS) == numElements); \
cases.push_back(OpAtomicCase(#NAME, ASSEMBLY, RETVAL_ASSEMBLY, OPATOMIC, NUM_OUTPUT_ELEMENTS)); \
} while (deGetFalse())
#define ADD_OPATOMIC_CASE_1(NAME, ASSEMBLY, RETVAL_ASSEMBLY, OPATOMIC) ADD_OPATOMIC_CASE(NAME, ASSEMBLY, RETVAL_ASSEMBLY, OPATOMIC, 1)
#define ADD_OPATOMIC_CASE_N(NAME, ASSEMBLY, RETVAL_ASSEMBLY, OPATOMIC) ADD_OPATOMIC_CASE(NAME, ASSEMBLY, RETVAL_ASSEMBLY, OPATOMIC, numElements)
ADD_OPATOMIC_CASE_1(iadd, "%retv = OpAtomicIAdd %i32 %outloc ${SCOPE} ${SEMANTICS} %inval\n",
" OpStore %retloc %retv\n", OPATOMIC_IADD );
ADD_OPATOMIC_CASE_1(isub, "%retv = OpAtomicISub %i32 %outloc ${SCOPE} ${SEMANTICS} %inval\n",
" OpStore %retloc %retv\n", OPATOMIC_ISUB );
ADD_OPATOMIC_CASE_1(iinc, "%retv = OpAtomicIIncrement %i32 %outloc ${SCOPE} ${SEMANTICS}\n",
" OpStore %retloc %retv\n", OPATOMIC_IINC );
ADD_OPATOMIC_CASE_1(idec, "%retv = OpAtomicIDecrement %i32 %outloc ${SCOPE} ${SEMANTICS}\n",
" OpStore %retloc %retv\n", OPATOMIC_IDEC );
if (!verifyReturnValues)
{
ADD_OPATOMIC_CASE_N(load, "%inval2 = OpAtomicLoad %i32 %inloc ${SCOPE} ${SEMANTICS}\n"
" OpStore %outloc %inval2\n", "", OPATOMIC_LOAD );
ADD_OPATOMIC_CASE_N(store, " OpAtomicStore %outloc ${SCOPE} ${SEMANTICS} %inval\n", "", OPATOMIC_STORE );
}
ADD_OPATOMIC_CASE_N(compex, "%even = OpSMod %i32 %inval %two\n"
" OpStore %outloc %even\n"
"%retv = OpAtomicCompareExchange %i32 %outloc ${SCOPE} ${SEMANTICS} ${SEMANTICS} %minusone %zero\n",
" OpStore %retloc %retv\n", OPATOMIC_COMPEX );
#undef ADD_OPATOMIC_CASE
#undef ADD_OPATOMIC_CASE_1
#undef ADD_OPATOMIC_CASE_N
for (size_t caseNdx = 0; caseNdx < cases.size(); ++caseNdx)
{
map<string, string> specializations;
ComputeShaderSpec spec;
vector<deInt32> inputInts (numElements, 0);
vector<deInt32> expected (cases[caseNdx].numOutputElements, -1);
if (volatileAtomic)
{
spec.extensions.push_back("VK_KHR_vulkan_memory_model");
spec.requestedVulkanFeatures.extVulkanMemoryModel.vulkanMemoryModel = true;
// volatile, queuefamily scope
specializations["SEMANTICS"] = "%volbit";
specializations["SCOPE"] = "%five";
}
else
{
// non-volatile, device scope
specializations["SEMANTICS"] = "%zero";
specializations["SCOPE"] = "%one";
}
specializations["INDEX"] = (cases[caseNdx].numOutputElements == 1) ? "%zero" : "%x";
specializations["INSTRUCTION"] = cases[caseNdx].assembly;
specializations["BLOCK_DECORATION"] = useStorageBuffer ? "Block" : "BufferBlock";
specializations["BLOCK_POINTER_TYPE"] = useStorageBuffer ? "StorageBuffer" : "Uniform";
if (verifyReturnValues)
{
const StringTemplate blockDecoration (
"\n"
"OpDecorate %retbuf ${BLOCK_DECORATION}\n"
"OpDecorate %ret DescriptorSet 0\n"
"OpDecorate %ret Binding 2\n"
"OpMemberDecorate %retbuf 0 Offset 0\n\n");
const StringTemplate blockDeclaration (
"\n"
"%retbuf = OpTypeStruct %i32arr\n"
"%retbufptr = OpTypePointer ${BLOCK_POINTER_TYPE} %retbuf\n"
"%ret = OpVariable %retbufptr ${BLOCK_POINTER_TYPE}\n\n");
specializations["RETVAL_ASSEMBLY"] =
"%retloc = OpAccessChain %i32ptr %ret %zero %x\n"
+ std::string(cases[caseNdx].retValAssembly);
specializations["RETVAL_BUF_DECORATE"] = blockDecoration.specialize(specializations);
specializations["RETVAL_BUF_DECL"] = blockDeclaration.specialize(specializations);
}
else
{
specializations["RETVAL_ASSEMBLY"] = "";
specializations["RETVAL_BUF_DECORATE"] = "";
specializations["RETVAL_BUF_DECL"] = "";
}
spec.assembly = shaderTemplate.specialize(specializations);
// Specialize one more time, to catch things that were in a template parameter
const StringTemplate assemblyTemplate(spec.assembly);
spec.assembly = assemblyTemplate.specialize(specializations);
if (useStorageBuffer)
spec.extensions.push_back("VK_KHR_storage_buffer_storage_class");
spec.inputs.push_back(BufferSp(new OpAtomicBuffer(numElements, cases[caseNdx].numOutputElements, cases[caseNdx].opAtomic, BUFFERTYPE_INPUT)));
spec.outputs.push_back(BufferSp(new OpAtomicBuffer(numElements, cases[caseNdx].numOutputElements, cases[caseNdx].opAtomic, BUFFERTYPE_EXPECTED)));
if (verifyReturnValues)
spec.outputs.push_back(BufferSp(new OpAtomicBuffer(numElements, cases[caseNdx].numOutputElements, cases[caseNdx].opAtomic, BUFFERTYPE_ATOMIC_RET)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
if (verifyReturnValues)
{
switch (cases[caseNdx].opAtomic)
{
case OPATOMIC_IADD:
spec.verifyIO = OpAtomicBuffer::compareWithRetvals<OPATOMIC_IADD>;
break;
case OPATOMIC_ISUB:
spec.verifyIO = OpAtomicBuffer::compareWithRetvals<OPATOMIC_ISUB>;
break;
case OPATOMIC_IINC:
spec.verifyIO = OpAtomicBuffer::compareWithRetvals<OPATOMIC_IINC>;
break;
case OPATOMIC_IDEC:
spec.verifyIO = OpAtomicBuffer::compareWithRetvals<OPATOMIC_IDEC>;
break;
case OPATOMIC_COMPEX:
spec.verifyIO = OpAtomicBuffer::compareWithRetvals<OPATOMIC_COMPEX>;
break;
default:
DE_FATAL("Unsupported OpAtomic type for return value verification");
}
}
group->addChild(new SpvAsmComputeShaderCase(testCtx, cases[caseNdx].name, cases[caseNdx].name, spec));
}
return group.release();
}
tcu::TestCaseGroup* createOpLineGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opline", "Test the OpLine instruction"));
ComputeShaderSpec spec;
de::Random rnd (deStringHash(group->getName()));
const int numElements = 100;
vector<float> positiveFloats (numElements, 0);
vector<float> negativeFloats (numElements, 0);
fillRandomScalars(rnd, 1.f, 100.f, &positiveFloats[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
negativeFloats[ndx] = -positiveFloats[ndx];
spec.assembly =
string(getComputeAsmShaderPreamble()) +
"%fname1 = OpString \"negateInputs.comp\"\n"
"%fname2 = OpString \"negateInputs\"\n"
"OpSource GLSL 430\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
+ string(getComputeAsmInputOutputBufferTraits()) +
"OpLine %fname1 0 0\n" // At the earliest possible position
+ string(getComputeAsmCommonTypes()) + string(getComputeAsmInputOutputBuffer()) +
"OpLine %fname1 0 1\n" // Multiple OpLines in sequence
"OpLine %fname2 1 0\n" // Different filenames
"OpLine %fname1 1000 100000\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"OpLine %fname1 1 1\n" // Before a function
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"OpLine %fname1 1 1\n" // In a function
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc = OpAccessChain %f32ptr %indata %zero %x\n"
"%inval = OpLoad %f32 %inloc\n"
"%neg = OpFNegate %f32 %inval\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %neg\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back(BufferSp(new Float32Buffer(positiveFloats)));
spec.outputs.push_back(BufferSp(new Float32Buffer(negativeFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "all", "OpLine appearing at different places", spec));
return group.release();
}
bool veryfiBinaryShader (const ProgramBinary& binary)
{
const size_t paternCount = 3u;
bool paternsCheck[paternCount] =
{
false, false, false
};
const string patersns[paternCount] =
{
"VULKAN CTS",
"Negative values",
"Date: 2017/09/21"
};
size_t paternNdx = 0u;
for (size_t ndx = 0u; ndx < binary.getSize(); ++ndx)
{
if (false == paternsCheck[paternNdx] &&
patersns[paternNdx][0] == static_cast<char>(binary.getBinary()[ndx]) &&
deMemoryEqual((const char*)&binary.getBinary()[ndx], &patersns[paternNdx][0], patersns[paternNdx].length()))
{
paternsCheck[paternNdx]= true;
paternNdx++;
if (paternNdx == paternCount)
break;
}
}
for (size_t ndx = 0u; ndx < paternCount; ++ndx)
{
if (!paternsCheck[ndx])
return false;
}
return true;
}
tcu::TestCaseGroup* createOpModuleProcessedGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opmoduleprocessed", "Test the OpModuleProcessed instruction"));
ComputeShaderSpec spec;
de::Random rnd (deStringHash(group->getName()));
const int numElements = 10;
vector<float> positiveFloats (numElements, 0);
vector<float> negativeFloats (numElements, 0);
fillRandomScalars(rnd, 1.f, 100.f, &positiveFloats[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
negativeFloats[ndx] = -positiveFloats[ndx];
spec.assembly =
string(getComputeAsmShaderPreamble()) +
"%fname = OpString \"negateInputs.comp\"\n"
"OpSource GLSL 430\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpModuleProcessed \"VULKAN CTS\"\n" //OpModuleProcessed;
"OpModuleProcessed \"Negative values\"\n"
"OpModuleProcessed \"Date: 2017/09/21\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
+ string(getComputeAsmInputOutputBufferTraits())
+ string(getComputeAsmCommonTypes()) + string(getComputeAsmInputOutputBuffer()) +
"OpLine %fname 0 1\n"
"OpLine %fname 1000 1\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc = OpAccessChain %f32ptr %indata %zero %x\n"
"%inval = OpLoad %f32 %inloc\n"
"%neg = OpFNegate %f32 %inval\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %neg\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back(BufferSp(new Float32Buffer(positiveFloats)));
spec.outputs.push_back(BufferSp(new Float32Buffer(negativeFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.verifyBinary = veryfiBinaryShader;
spec.spirvVersion = SPIRV_VERSION_1_3;
group->addChild(new SpvAsmComputeShaderCase(testCtx, "all", "OpModuleProcessed Tests", spec));
return group.release();
}
tcu::TestCaseGroup* createOpNoLineGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opnoline", "Test the OpNoLine instruction"));
ComputeShaderSpec spec;
de::Random rnd (deStringHash(group->getName()));
const int numElements = 100;
vector<float> positiveFloats (numElements, 0);
vector<float> negativeFloats (numElements, 0);
fillRandomScalars(rnd, 1.f, 100.f, &positiveFloats[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
negativeFloats[ndx] = -positiveFloats[ndx];
spec.assembly =
string(getComputeAsmShaderPreamble()) +
"%fname = OpString \"negateInputs.comp\"\n"
"OpSource GLSL 430\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
+ string(getComputeAsmInputOutputBufferTraits()) +
"OpNoLine\n" // At the earliest possible position, without preceding OpLine
+ string(getComputeAsmCommonTypes()) + string(getComputeAsmInputOutputBuffer()) +
"OpLine %fname 0 1\n"
"OpNoLine\n" // Immediately following a preceding OpLine
"OpLine %fname 1000 1\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"OpNoLine\n" // Contents after the previous OpLine
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"OpNoLine\n" // Multiple OpNoLine
"OpNoLine\n"
"OpNoLine\n"
"%inloc = OpAccessChain %f32ptr %indata %zero %x\n"
"%inval = OpLoad %f32 %inloc\n"
"%neg = OpFNegate %f32 %inval\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %neg\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back(BufferSp(new Float32Buffer(positiveFloats)));
spec.outputs.push_back(BufferSp(new Float32Buffer(negativeFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "all", "OpNoLine appearing at different places", spec));
return group.release();
}
// Compare instruction for the contraction compute case.
// Returns true if the output is what is expected from the test case.
bool compareNoContractCase(const std::vector<Resource>&, const vector<AllocationSp>& outputAllocs, const std::vector<Resource>& expectedOutputs, TestLog&)
{
if (outputAllocs.size() != 1)
return false;
// Only size is needed because we are not comparing the exact values.
size_t byteSize = expectedOutputs[0].getByteSize();
const float* outputAsFloat = static_cast<const float*>(outputAllocs[0]->getHostPtr());
for(size_t i = 0; i < byteSize / sizeof(float); ++i) {
if (outputAsFloat[i] != 0.f &&
outputAsFloat[i] != -ldexp(1, -24)) {
return false;
}
}
return true;
}
tcu::TestCaseGroup* createNoContractionGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "nocontraction", "Test the NoContraction decoration"));
vector<CaseParameter> cases;
const int numElements = 100;
vector<float> inputFloats1 (numElements, 0);
vector<float> inputFloats2 (numElements, 0);
vector<float> outputFloats (numElements, 0);
const StringTemplate shaderTemplate (
string(getComputeAsmShaderPreamble()) +
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"${DECORATION}\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 2\n"
"OpDecorate %f32arr ArrayStride 4\n"
"OpMemberDecorate %buf 0 Offset 0\n"
+ string(getComputeAsmCommonTypes()) +
"%buf = OpTypeStruct %f32arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%c_f_m1 = OpConstant %f32 -1.\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %f32ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %f32 %inloc1\n"
"%inloc2 = OpAccessChain %f32ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %f32 %inloc2\n"
"%mul = OpFMul %f32 %inval1 %inval2\n"
"%add = OpFAdd %f32 %mul %c_f_m1\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %add\n"
" OpReturn\n"
" OpFunctionEnd\n");
cases.push_back(CaseParameter("multiplication", "OpDecorate %mul NoContraction"));
cases.push_back(CaseParameter("addition", "OpDecorate %add NoContraction"));
cases.push_back(CaseParameter("both", "OpDecorate %mul NoContraction\nOpDecorate %add NoContraction"));
for (size_t ndx = 0; ndx < numElements; ++ndx)
{
inputFloats1[ndx] = 1.f + std::ldexp(1.f, -23); // 1 + 2^-23.
inputFloats2[ndx] = 1.f - std::ldexp(1.f, -23); // 1 - 2^-23.
// Result for (1 + 2^-23) * (1 - 2^-23) - 1. With NoContraction, the multiplication will be
// conducted separately and the result is rounded to 1, or 0x1.fffffcp-1
// So the final result will be 0.f or 0x1p-24.
// If the operation is combined into a precise fused multiply-add, then the result would be
// 2^-46 (0xa8800000).
outputFloats[ndx] = 0.f;
}
for (size_t caseNdx = 0; caseNdx < cases.size(); ++caseNdx)
{
map<string, string> specializations;
ComputeShaderSpec spec;
specializations["DECORATION"] = cases[caseNdx].param;
spec.assembly = shaderTemplate.specialize(specializations);
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats1)));
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats2)));
spec.outputs.push_back(BufferSp(new Float32Buffer(outputFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
// Check against the two possible answers based on rounding mode.
spec.verifyIO = &compareNoContractCase;
group->addChild(new SpvAsmComputeShaderCase(testCtx, cases[caseNdx].name, cases[caseNdx].name, spec));
}
return group.release();
}
bool compareFRem(const std::vector<Resource>&, const vector<AllocationSp>& outputAllocs, const std::vector<Resource>& expectedOutputs, TestLog&)
{
if (outputAllocs.size() != 1)
return false;
vector<deUint8> expectedBytes;
expectedOutputs[0].getBytes(expectedBytes);
const float* expectedOutputAsFloat = reinterpret_cast<const float*>(&expectedBytes.front());
const float* outputAsFloat = static_cast<const float*>(outputAllocs[0]->getHostPtr());
for (size_t idx = 0; idx < expectedBytes.size() / sizeof(float); ++idx)
{
const float f0 = expectedOutputAsFloat[idx];
const float f1 = outputAsFloat[idx];
// \todo relative error needs to be fairly high because FRem may be implemented as
// (roughly) frac(a/b)*b, so LSB errors can be magnified. But this should be fine for now.
if (deFloatAbs((f1 - f0) / f0) > 0.02)
return false;
}
return true;
}
tcu::TestCaseGroup* createOpFRemGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opfrem", "Test the OpFRem instruction"));
ComputeShaderSpec spec;
de::Random rnd (deStringHash(group->getName()));
const int numElements = 200;
vector<float> inputFloats1 (numElements, 0);
vector<float> inputFloats2 (numElements, 0);
vector<float> outputFloats (numElements, 0);
fillRandomScalars(rnd, -10000.f, 10000.f, &inputFloats1[0], numElements);
fillRandomScalars(rnd, -100.f, 100.f, &inputFloats2[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
{
// Guard against divisors near zero.
if (std::fabs(inputFloats2[ndx]) < 1e-3)
inputFloats2[ndx] = 8.f;
// The return value of std::fmod() has the same sign as its first operand, which is how OpFRem spec'd.
outputFloats[ndx] = std::fmod(inputFloats1[ndx], inputFloats2[ndx]);
}
spec.assembly =
string(getComputeAsmShaderPreamble()) +
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 2\n"
"OpDecorate %f32arr ArrayStride 4\n"
"OpMemberDecorate %buf 0 Offset 0\n"
+ string(getComputeAsmCommonTypes()) +
"%buf = OpTypeStruct %f32arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %f32ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %f32 %inloc1\n"
"%inloc2 = OpAccessChain %f32ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %f32 %inloc2\n"
"%rem = OpFRem %f32 %inval1 %inval2\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %rem\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats1)));
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats2)));
spec.outputs.push_back(BufferSp(new Float32Buffer(outputFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.verifyIO = &compareFRem;
group->addChild(new SpvAsmComputeShaderCase(testCtx, "all", "", spec));
return group.release();
}
bool compareNMin (const std::vector<Resource>&, const vector<AllocationSp>& outputAllocs, const std::vector<Resource>& expectedOutputs, TestLog&)
{
if (outputAllocs.size() != 1)
return false;
const BufferSp& expectedOutput (expectedOutputs[0].getBuffer());
std::vector<deUint8> data;
expectedOutput->getBytes(data);
const float* const expectedOutputAsFloat = reinterpret_cast<const float*>(&data.front());
const float* const outputAsFloat = static_cast<const float*>(outputAllocs[0]->getHostPtr());
for (size_t idx = 0; idx < expectedOutput->getByteSize() / sizeof(float); ++idx)
{
const float f0 = expectedOutputAsFloat[idx];
const float f1 = outputAsFloat[idx];
// For NMin, we accept NaN as output if both inputs were NaN.
// Otherwise the NaN is the wrong choise, as on architectures that
// do not handle NaN, those are huge values.
if (!(tcu::Float32(f1).isNaN() && tcu::Float32(f0).isNaN()) && deFloatAbs(f1 - f0) > 0.00001f)
return false;
}
return true;
}
tcu::TestCaseGroup* createOpNMinGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opnmin", "Test the OpNMin instruction"));
ComputeShaderSpec spec;
de::Random rnd (deStringHash(group->getName()));
const int numElements = 200;
vector<float> inputFloats1 (numElements, 0);
vector<float> inputFloats2 (numElements, 0);
vector<float> outputFloats (numElements, 0);
fillRandomScalars(rnd, -10000.f, 10000.f, &inputFloats1[0], numElements);
fillRandomScalars(rnd, -10000.f, 10000.f, &inputFloats2[0], numElements);
// Make the first case a full-NAN case.
inputFloats1[0] = TCU_NAN;
inputFloats2[0] = TCU_NAN;
for (size_t ndx = 0; ndx < numElements; ++ndx)
{
// By default, pick the smallest
outputFloats[ndx] = std::min(inputFloats1[ndx], inputFloats2[ndx]);
// Make half of the cases NaN cases
if ((ndx & 1) == 0)
{
// Alternate between the NaN operand
if ((ndx & 2) == 0)
{
outputFloats[ndx] = inputFloats2[ndx];
inputFloats1[ndx] = TCU_NAN;
}
else
{
outputFloats[ndx] = inputFloats1[ndx];
inputFloats2[ndx] = TCU_NAN;
}
}
}
spec.assembly =
"OpCapability Shader\n"
"%std450 = OpExtInstImport \"GLSL.std.450\"\n"
"OpMemoryModel Logical GLSL450\n"
"OpEntryPoint GLCompute %main \"main\" %id\n"
"OpExecutionMode %main LocalSize 1 1 1\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 2\n"
"OpDecorate %f32arr ArrayStride 4\n"
"OpMemberDecorate %buf 0 Offset 0\n"
+ string(getComputeAsmCommonTypes()) +
"%buf = OpTypeStruct %f32arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %f32ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %f32 %inloc1\n"
"%inloc2 = OpAccessChain %f32ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %f32 %inloc2\n"
"%rem = OpExtInst %f32 %std450 NMin %inval1 %inval2\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %rem\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats1)));
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats2)));
spec.outputs.push_back(BufferSp(new Float32Buffer(outputFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.verifyIO = &compareNMin;
group->addChild(new SpvAsmComputeShaderCase(testCtx, "all", "", spec));
return group.release();
}
bool compareNMax (const std::vector<Resource>&, const vector<AllocationSp>& outputAllocs, const std::vector<Resource>& expectedOutputs, TestLog&)
{
if (outputAllocs.size() != 1)
return false;
const BufferSp& expectedOutput = expectedOutputs[0].getBuffer();
std::vector<deUint8> data;
expectedOutput->getBytes(data);
const float* const expectedOutputAsFloat = reinterpret_cast<const float*>(&data.front());
const float* const outputAsFloat = static_cast<const float*>(outputAllocs[0]->getHostPtr());
for (size_t idx = 0; idx < expectedOutput->getByteSize() / sizeof(float); ++idx)
{
const float f0 = expectedOutputAsFloat[idx];
const float f1 = outputAsFloat[idx];
// For NMax, NaN is considered acceptable result, since in
// architectures that do not handle NaNs, those are huge values.
if (!tcu::Float32(f1).isNaN() && deFloatAbs(f1 - f0) > 0.00001f)
return false;
}
return true;
}
tcu::TestCaseGroup* createOpNMaxGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(testCtx, "opnmax", "Test the OpNMax instruction"));
ComputeShaderSpec spec;
de::Random rnd (deStringHash(group->getName()));
const int numElements = 200;
vector<float> inputFloats1 (numElements, 0);
vector<float> inputFloats2 (numElements, 0);
vector<float> outputFloats (numElements, 0);
fillRandomScalars(rnd, -10000.f, 10000.f, &inputFloats1[0], numElements);
fillRandomScalars(rnd, -10000.f, 10000.f, &inputFloats2[0], numElements);
// Make the first case a full-NAN case.
inputFloats1[0] = TCU_NAN;
inputFloats2[0] = TCU_NAN;
for (size_t ndx = 0; ndx < numElements; ++ndx)
{
// By default, pick the biggest
outputFloats[ndx] = std::max(inputFloats1[ndx], inputFloats2[ndx]);
// Make half of the cases NaN cases
if ((ndx & 1) == 0)
{
// Alternate between the NaN operand
if ((ndx & 2) == 0)
{
outputFloats[ndx] = inputFloats2[ndx];
inputFloats1[ndx] = TCU_NAN;
}
else
{
outputFloats[ndx] = inputFloats1[ndx];
inputFloats2[ndx] = TCU_NAN;
}
}
}
spec.assembly =
"OpCapability Shader\n"
"%std450 = OpExtInstImport \"GLSL.std.450\"\n"
"OpMemoryModel Logical GLSL450\n"
"OpEntryPoint GLCompute %main \"main\" %id\n"
"OpExecutionMode %main LocalSize 1 1 1\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 2\n"
"OpDecorate %f32arr ArrayStride 4\n"
"OpMemberDecorate %buf 0 Offset 0\n"
+ string(getComputeAsmCommonTypes()) +
"%buf = OpTypeStruct %f32arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %f32ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %f32 %inloc1\n"
"%inloc2 = OpAccessChain %f32ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %f32 %inloc2\n"
"%rem = OpExtInst %f32 %std450 NMax %inval1 %inval2\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %rem\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats1)));
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats2)));
spec.outputs.push_back(BufferSp(new Float32Buffer(outputFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.verifyIO = &compareNMax;
group->addChild(new SpvAsmComputeShaderCase(testCtx, "all", "", spec));
return group.release();
}
bool compareNClamp (const std::vector<Resource>&, const vector<AllocationSp>& outputAllocs, const std::vector<Resource>& expectedOutputs, TestLog&)
{
if (outputAllocs.size() != 1)
return false;
const BufferSp& expectedOutput = expectedOutputs[0].getBuffer();
std::vector<deUint8> data;
expectedOutput->getBytes(data);
const float* const expectedOutputAsFloat = reinterpret_cast<const float*>(&data.front());
const float* const outputAsFloat = static_cast<const float*>(outputAllocs[0]->getHostPtr());
for (size_t idx = 0; idx < expectedOutput->getByteSize() / sizeof(float) / 2; ++idx)
{
const float e0 = expectedOutputAsFloat[idx * 2];
const float e1 = expectedOutputAsFloat[idx * 2 + 1];
const float res = outputAsFloat[idx];
// For NClamp, we have two possible outcomes based on
// whether NaNs are handled or not.
// If either min or max value is NaN, the result is undefined,
// so this test doesn't stress those. If the clamped value is
// NaN, and NaNs are handled, the result is min; if NaNs are not
// handled, they are big values that result in max.
// If all three parameters are NaN, the result should be NaN.
if (!((tcu::Float32(e0).isNaN() && tcu::Float32(res).isNaN()) ||
(deFloatAbs(e0 - res) < 0.00001f) ||
(deFloatAbs(e1 - res) < 0.00001f)))
return false;
}
return true;
}
tcu::TestCaseGroup* createOpNClampGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opnclamp", "Test the OpNClamp instruction"));
ComputeShaderSpec spec;
de::Random rnd (deStringHash(group->getName()));
const int numElements = 200;
vector<float> inputFloats1 (numElements, 0);
vector<float> inputFloats2 (numElements, 0);
vector<float> inputFloats3 (numElements, 0);
vector<float> outputFloats (numElements * 2, 0);
fillRandomScalars(rnd, -10000.f, 10000.f, &inputFloats1[0], numElements);
fillRandomScalars(rnd, -10000.f, 10000.f, &inputFloats2[0], numElements);
fillRandomScalars(rnd, -10000.f, 10000.f, &inputFloats3[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
{
// Results are only defined if max value is bigger than min value.
if (inputFloats2[ndx] > inputFloats3[ndx])
{
float t = inputFloats2[ndx];
inputFloats2[ndx] = inputFloats3[ndx];
inputFloats3[ndx] = t;
}
// By default, do the clamp, setting both possible answers
float defaultRes = std::min(std::max(inputFloats1[ndx], inputFloats2[ndx]), inputFloats3[ndx]);
float maxResA = std::max(inputFloats1[ndx], inputFloats2[ndx]);
float maxResB = maxResA;
// Alternate between the NaN cases
if (ndx & 1)
{
inputFloats1[ndx] = TCU_NAN;
// If NaN is handled, the result should be same as the clamp minimum.
// If NaN is not handled, the result should clamp to the clamp maximum.
maxResA = inputFloats2[ndx];
maxResB = inputFloats3[ndx];
}
else
{
// Not a NaN case - only one legal result.
maxResA = defaultRes;
maxResB = defaultRes;
}
outputFloats[ndx * 2] = maxResA;
outputFloats[ndx * 2 + 1] = maxResB;
}
// Make the first case a full-NAN case.
inputFloats1[0] = TCU_NAN;
inputFloats2[0] = TCU_NAN;
inputFloats3[0] = TCU_NAN;
outputFloats[0] = TCU_NAN;
outputFloats[1] = TCU_NAN;
spec.assembly =
"OpCapability Shader\n"
"%std450 = OpExtInstImport \"GLSL.std.450\"\n"
"OpMemoryModel Logical GLSL450\n"
"OpEntryPoint GLCompute %main \"main\" %id\n"
"OpExecutionMode %main LocalSize 1 1 1\n"
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %indata3 DescriptorSet 0\n"
"OpDecorate %indata3 Binding 2\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 3\n"
"OpDecorate %f32arr ArrayStride 4\n"
"OpMemberDecorate %buf 0 Offset 0\n"
+ string(getComputeAsmCommonTypes()) +
"%buf = OpTypeStruct %f32arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%indata3 = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %f32ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %f32 %inloc1\n"
"%inloc2 = OpAccessChain %f32ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %f32 %inloc2\n"
"%inloc3 = OpAccessChain %f32ptr %indata3 %zero %x\n"
"%inval3 = OpLoad %f32 %inloc3\n"
"%rem = OpExtInst %f32 %std450 NClamp %inval1 %inval2 %inval3\n"
"%outloc = OpAccessChain %f32ptr %outdata %zero %x\n"
" OpStore %outloc %rem\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats1)));
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats2)));
spec.inputs.push_back(BufferSp(new Float32Buffer(inputFloats3)));
spec.outputs.push_back(BufferSp(new Float32Buffer(outputFloats)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.verifyIO = &compareNClamp;
group->addChild(new SpvAsmComputeShaderCase(testCtx, "all", "", spec));
return group.release();
}
tcu::TestCaseGroup* createOpSRemComputeGroup (tcu::TestContext& testCtx, qpTestResult negFailResult)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opsrem", "Test the OpSRem instruction"));
de::Random rnd (deStringHash(group->getName()));
const int numElements = 200;
const struct CaseParams
{
const char* name;
const char* failMessage; // customized status message
qpTestResult failResult; // override status on failure
int op1Min, op1Max; // operand ranges
int op2Min, op2Max;
} cases[] =
{
{ "positive", "Output doesn't match with expected", QP_TEST_RESULT_FAIL, 0, 65536, 0, 100 },
{ "all", "Inconsistent results, but within specification", negFailResult, -65536, 65536, -100, 100 }, // see below
};
// If either operand is negative the result is undefined. Some implementations may still return correct values.
for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(cases); ++caseNdx)
{
const CaseParams& params = cases[caseNdx];
ComputeShaderSpec spec;
vector<deInt32> inputInts1 (numElements, 0);
vector<deInt32> inputInts2 (numElements, 0);
vector<deInt32> outputInts (numElements, 0);
fillRandomScalars(rnd, params.op1Min, params.op1Max, &inputInts1[0], numElements);
fillRandomScalars(rnd, params.op2Min, params.op2Max, &inputInts2[0], numElements, filterNotZero);
for (int ndx = 0; ndx < numElements; ++ndx)
{
// The return value of std::fmod() has the same sign as its first operand, which is how OpFRem spec'd.
outputInts[ndx] = inputInts1[ndx] % inputInts2[ndx];
}
spec.assembly =
string(getComputeAsmShaderPreamble()) +
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 2\n"
"OpDecorate %i32arr ArrayStride 4\n"
"OpMemberDecorate %buf 0 Offset 0\n"
+ string(getComputeAsmCommonTypes()) +
"%buf = OpTypeStruct %i32arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %i32ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %i32 %inloc1\n"
"%inloc2 = OpAccessChain %i32ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %i32 %inloc2\n"
"%rem = OpSRem %i32 %inval1 %inval2\n"
"%outloc = OpAccessChain %i32ptr %outdata %zero %x\n"
" OpStore %outloc %rem\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back (BufferSp(new Int32Buffer(inputInts1)));
spec.inputs.push_back (BufferSp(new Int32Buffer(inputInts2)));
spec.outputs.push_back (BufferSp(new Int32Buffer(outputInts)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.failResult = params.failResult;
spec.failMessage = params.failMessage;
group->addChild(new SpvAsmComputeShaderCase(testCtx, params.name, "", spec));
}
return group.release();
}
tcu::TestCaseGroup* createOpSRemComputeGroup64 (tcu::TestContext& testCtx, qpTestResult negFailResult)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opsrem64", "Test the 64-bit OpSRem instruction"));
de::Random rnd (deStringHash(group->getName()));
const int numElements = 200;
const struct CaseParams
{
const char* name;
const char* failMessage; // customized status message
qpTestResult failResult; // override status on failure
bool positive;
} cases[] =
{
{ "positive", "Output doesn't match with expected", QP_TEST_RESULT_FAIL, true },
{ "all", "Inconsistent results, but within specification", negFailResult, false }, // see below
};
// If either operand is negative the result is undefined. Some implementations may still return correct values.
for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(cases); ++caseNdx)
{
const CaseParams& params = cases[caseNdx];
ComputeShaderSpec spec;
vector<deInt64> inputInts1 (numElements, 0);
vector<deInt64> inputInts2 (numElements, 0);
vector<deInt64> outputInts (numElements, 0);
if (params.positive)
{
fillRandomInt64sLogDistributed(rnd, inputInts1, numElements, filterNonNegative);
fillRandomInt64sLogDistributed(rnd, inputInts2, numElements, filterPositive);
}
else
{
fillRandomInt64sLogDistributed(rnd, inputInts1, numElements);
fillRandomInt64sLogDistributed(rnd, inputInts2, numElements, filterNotZero);
}
for (int ndx = 0; ndx < numElements; ++ndx)
{
// The return value of std::fmod() has the same sign as its first operand, which is how OpFRem spec'd.
outputInts[ndx] = inputInts1[ndx] % inputInts2[ndx];
}
spec.assembly =
"OpCapability Int64\n"
+ string(getComputeAsmShaderPreamble()) +
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 2\n"
"OpDecorate %i64arr ArrayStride 8\n"
"OpMemberDecorate %buf 0 Offset 0\n"
+ string(getComputeAsmCommonTypes())
+ string(getComputeAsmCommonInt64Types()) +
"%buf = OpTypeStruct %i64arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i64 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %i64ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %i64 %inloc1\n"
"%inloc2 = OpAccessChain %i64ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %i64 %inloc2\n"
"%rem = OpSRem %i64 %inval1 %inval2\n"
"%outloc = OpAccessChain %i64ptr %outdata %zero %x\n"
" OpStore %outloc %rem\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back (BufferSp(new Int64Buffer(inputInts1)));
spec.inputs.push_back (BufferSp(new Int64Buffer(inputInts2)));
spec.outputs.push_back (BufferSp(new Int64Buffer(outputInts)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.failResult = params.failResult;
spec.failMessage = params.failMessage;
spec.requestedVulkanFeatures.coreFeatures.shaderInt64 = VK_TRUE;
group->addChild(new SpvAsmComputeShaderCase(testCtx, params.name, "", spec));
}
return group.release();
}
tcu::TestCaseGroup* createOpSModComputeGroup (tcu::TestContext& testCtx, qpTestResult negFailResult)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opsmod", "Test the OpSMod instruction"));
de::Random rnd (deStringHash(group->getName()));
const int numElements = 200;
const struct CaseParams
{
const char* name;
const char* failMessage; // customized status message
qpTestResult failResult; // override status on failure
int op1Min, op1Max; // operand ranges
int op2Min, op2Max;
} cases[] =
{
{ "positive", "Output doesn't match with expected", QP_TEST_RESULT_FAIL, 0, 65536, 0, 100 },
{ "all", "Inconsistent results, but within specification", negFailResult, -65536, 65536, -100, 100 }, // see below
};
// If either operand is negative the result is undefined. Some implementations may still return correct values.
for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(cases); ++caseNdx)
{
const CaseParams& params = cases[caseNdx];
ComputeShaderSpec spec;
vector<deInt32> inputInts1 (numElements, 0);
vector<deInt32> inputInts2 (numElements, 0);
vector<deInt32> outputInts (numElements, 0);
fillRandomScalars(rnd, params.op1Min, params.op1Max, &inputInts1[0], numElements);
fillRandomScalars(rnd, params.op2Min, params.op2Max, &inputInts2[0], numElements, filterNotZero);
for (int ndx = 0; ndx < numElements; ++ndx)
{
deInt32 rem = inputInts1[ndx] % inputInts2[ndx];
if (rem == 0)
{
outputInts[ndx] = 0;
}
else if ((inputInts1[ndx] >= 0) == (inputInts2[ndx] >= 0))
{
// They have the same sign
outputInts[ndx] = rem;
}
else
{
// They have opposite sign. The remainder operation takes the
// sign inputInts1[ndx] but OpSMod is supposed to take ths sign
// of inputInts2[ndx]. Adding inputInts2[ndx] will ensure that
// the result has the correct sign and that it is still
// congruent to inputInts1[ndx] modulo inputInts2[ndx]
//
// See also http://mathforum.org/library/drmath/view/52343.html
outputInts[ndx] = rem + inputInts2[ndx];
}
}
spec.assembly =
string(getComputeAsmShaderPreamble()) +
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 2\n"
"OpDecorate %i32arr ArrayStride 4\n"
"OpMemberDecorate %buf 0 Offset 0\n"
+ string(getComputeAsmCommonTypes()) +
"%buf = OpTypeStruct %i32arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %i32ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %i32 %inloc1\n"
"%inloc2 = OpAccessChain %i32ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %i32 %inloc2\n"
"%rem = OpSMod %i32 %inval1 %inval2\n"
"%outloc = OpAccessChain %i32ptr %outdata %zero %x\n"
" OpStore %outloc %rem\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back (BufferSp(new Int32Buffer(inputInts1)));
spec.inputs.push_back (BufferSp(new Int32Buffer(inputInts2)));
spec.outputs.push_back (BufferSp(new Int32Buffer(outputInts)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.failResult = params.failResult;
spec.failMessage = params.failMessage;
group->addChild(new SpvAsmComputeShaderCase(testCtx, params.name, "", spec));
}
return group.release();
}
tcu::TestCaseGroup* createOpSModComputeGroup64 (tcu::TestContext& testCtx, qpTestResult negFailResult)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opsmod64", "Test the OpSMod instruction"));
de::Random rnd (deStringHash(group->getName()));
const int numElements = 200;
const struct CaseParams
{
const char* name;
const char* failMessage; // customized status message
qpTestResult failResult; // override status on failure
bool positive;
} cases[] =
{
{ "positive", "Output doesn't match with expected", QP_TEST_RESULT_FAIL, true },
{ "all", "Inconsistent results, but within specification", negFailResult, false }, // see below
};
// If either operand is negative the result is undefined. Some implementations may still return correct values.
for (int caseNdx = 0; caseNdx < DE_LENGTH_OF_ARRAY(cases); ++caseNdx)
{
const CaseParams& params = cases[caseNdx];
ComputeShaderSpec spec;
vector<deInt64> inputInts1 (numElements, 0);
vector<deInt64> inputInts2 (numElements, 0);
vector<deInt64> outputInts (numElements, 0);
if (params.positive)
{
fillRandomInt64sLogDistributed(rnd, inputInts1, numElements, filterNonNegative);
fillRandomInt64sLogDistributed(rnd, inputInts2, numElements, filterPositive);
}
else
{
fillRandomInt64sLogDistributed(rnd, inputInts1, numElements);
fillRandomInt64sLogDistributed(rnd, inputInts2, numElements, filterNotZero);
}
for (int ndx = 0; ndx < numElements; ++ndx)
{
deInt64 rem = inputInts1[ndx] % inputInts2[ndx];
if (rem == 0)
{
outputInts[ndx] = 0;
}
else if ((inputInts1[ndx] >= 0) == (inputInts2[ndx] >= 0))
{
// They have the same sign
outputInts[ndx] = rem;
}
else
{
// They have opposite sign. The remainder operation takes the
// sign inputInts1[ndx] but OpSMod is supposed to take ths sign
// of inputInts2[ndx]. Adding inputInts2[ndx] will ensure that
// the result has the correct sign and that it is still
// congruent to inputInts1[ndx] modulo inputInts2[ndx]
//
// See also http://mathforum.org/library/drmath/view/52343.html
outputInts[ndx] = rem + inputInts2[ndx];
}
}
spec.assembly =
"OpCapability Int64\n"
+ string(getComputeAsmShaderPreamble()) +
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %buf BufferBlock\n"
"OpDecorate %indata1 DescriptorSet 0\n"
"OpDecorate %indata1 Binding 0\n"
"OpDecorate %indata2 DescriptorSet 0\n"
"OpDecorate %indata2 Binding 1\n"
"OpDecorate %outdata DescriptorSet 0\n"
"OpDecorate %outdata Binding 2\n"
"OpDecorate %i64arr ArrayStride 8\n"
"OpMemberDecorate %buf 0 Offset 0\n"
+ string(getComputeAsmCommonTypes())
+ string(getComputeAsmCommonInt64Types()) +
"%buf = OpTypeStruct %i64arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata1 = OpVariable %bufptr Uniform\n"
"%indata2 = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i64 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc1 = OpAccessChain %i64ptr %indata1 %zero %x\n"
"%inval1 = OpLoad %i64 %inloc1\n"
"%inloc2 = OpAccessChain %i64ptr %indata2 %zero %x\n"
"%inval2 = OpLoad %i64 %inloc2\n"
"%rem = OpSMod %i64 %inval1 %inval2\n"
"%outloc = OpAccessChain %i64ptr %outdata %zero %x\n"
" OpStore %outloc %rem\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec.inputs.push_back (BufferSp(new Int64Buffer(inputInts1)));
spec.inputs.push_back (BufferSp(new Int64Buffer(inputInts2)));
spec.outputs.push_back (BufferSp(new Int64Buffer(outputInts)));
spec.numWorkGroups = IVec3(numElements, 1, 1);
spec.failResult = params.failResult;
spec.failMessage = params.failMessage;
spec.requestedVulkanFeatures.coreFeatures.shaderInt64 = VK_TRUE;
group->addChild(new SpvAsmComputeShaderCase(testCtx, params.name, "", spec));
}
return group.release();
}
// Copy contents in the input buffer to the output buffer.
tcu::TestCaseGroup* createOpCopyMemoryGroup (tcu::TestContext& testCtx)
{
de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "opcopymemory", "Test the OpCopyMemory instruction"));
de::Random rnd (deStringHash(group->getName()));
const int numElements = 100;
// The following case adds vec4(0., 0.5, 1.5, 2.5) to each of the elements in the input buffer and writes output to the output buffer.
ComputeShaderSpec spec1;
vector<Vec4> inputFloats1 (numElements);
vector<Vec4> outputFloats1 (numElements);
fillRandomScalars(rnd, -200.f, 200.f, &inputFloats1[0], numElements * 4);
// CPU might not use the same rounding mode as the GPU. Use whole numbers to avoid rounding differences.
floorAll(inputFloats1);
for (size_t ndx = 0; ndx < numElements; ++ndx)
outputFloats1[ndx] = inputFloats1[ndx] + Vec4(0.f, 0.5f, 1.5f, 2.5f);
spec1.assembly =
string(getComputeAsmShaderPreamble()) +
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %vec4arr ArrayStride 16\n"
+ string(getComputeAsmInputOutputBufferTraits()) + string(getComputeAsmCommonTypes()) +
"%vec4 = OpTypeVector %f32 4\n"
"%vec4ptr_u = OpTypePointer Uniform %vec4\n"
"%vec4ptr_f = OpTypePointer Function %vec4\n"
"%vec4arr = OpTypeRuntimeArray %vec4\n"
"%buf = OpTypeStruct %vec4arr\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%c_f_0 = OpConstant %f32 0.\n"
"%c_f_0_5 = OpConstant %f32 0.5\n"
"%c_f_1_5 = OpConstant %f32 1.5\n"
"%c_f_2_5 = OpConstant %f32 2.5\n"
"%c_vec4 = OpConstantComposite %vec4 %c_f_0 %c_f_0_5 %c_f_1_5 %c_f_2_5\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%v_vec4 = OpVariable %vec4ptr_f Function\n"
"%idval = OpLoad %uvec3 %id\n"
"%x = OpCompositeExtract %u32 %idval 0\n"
"%inloc = OpAccessChain %vec4ptr_u %indata %zero %x\n"
"%outloc = OpAccessChain %vec4ptr_u %outdata %zero %x\n"
" OpCopyMemory %v_vec4 %inloc\n"
"%v_vec4_val = OpLoad %vec4 %v_vec4\n"
"%add = OpFAdd %vec4 %v_vec4_val %c_vec4\n"
" OpStore %outloc %add\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec1.inputs.push_back(BufferSp(new Vec4Buffer(inputFloats1)));
spec1.outputs.push_back(BufferSp(new Vec4Buffer(outputFloats1)));
spec1.numWorkGroups = IVec3(numElements, 1, 1);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "vector", "OpCopyMemory elements of vector type", spec1));
// The following case copies a float[100] variable from the input buffer to the output buffer.
ComputeShaderSpec spec2;
vector<float> inputFloats2 (numElements);
vector<float> outputFloats2 (numElements);
fillRandomScalars(rnd, -200.f, 200.f, &inputFloats2[0], numElements);
for (size_t ndx = 0; ndx < numElements; ++ndx)
outputFloats2[ndx] = inputFloats2[ndx];
spec2.assembly =
string(getComputeAsmShaderPreamble()) +
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
"OpDecorate %f32arr100 ArrayStride 4\n"
+ string(getComputeAsmInputOutputBufferTraits()) + string(getComputeAsmCommonTypes()) +
"%hundred = OpConstant %u32 100\n"
"%f32arr100 = OpTypeArray %f32 %hundred\n"
"%f32arr100ptr_f = OpTypePointer Function %f32arr100\n"
"%f32arr100ptr_u = OpTypePointer Uniform %f32arr100\n"
"%buf = OpTypeStruct %f32arr100\n"
"%bufptr = OpTypePointer Uniform %buf\n"
"%indata = OpVariable %bufptr Uniform\n"
"%outdata = OpVariable %bufptr Uniform\n"
"%id = OpVariable %uvec3ptr Input\n"
"%zero = OpConstant %i32 0\n"
"%main = OpFunction %void None %voidf\n"
"%label = OpLabel\n"
"%var = OpVariable %f32arr100ptr_f Function\n"
"%inarr = OpAccessChain %f32arr100ptr_u %indata %zero\n"
"%outarr = OpAccessChain %f32arr100ptr_u %outdata %zero\n"
" OpCopyMemory %var %inarr\n"
" OpCopyMemory %outarr %var\n"
" OpReturn\n"
" OpFunctionEnd\n";
spec2.inputs.push_back(BufferSp(new Float32Buffer(inputFloats2)));
spec2.outputs.push_back(BufferSp(new Float32Buffer(outputFloats2)));
spec2.numWorkGroups = IVec3(1, 1, 1);
group->addChild(new SpvAsmComputeShaderCase(testCtx, "array", "OpCopyMemory elements of array type", spec2));
// The following case copies a struct{vec4, vec4, vec4, vec4} variable from the input buffer to the output buffer.
ComputeShaderSpec spec3;
vector<float> inputFloats3 (16);
vector<float> outputFloats3 (16);
fillRandomScalars(rnd, -200.f, 200.f, &inputFloats3[0], 16);
for (size_t ndx = 0; ndx < 16; ++ndx)
outputFloats3[ndx] = inputFloats3[ndx];
spec3.assembly =
string(getComputeAsmShaderPreamble()) +
"OpName %main \"main\"\n"
"OpName %id \"gl_GlobalInvocationID\"\n"
"OpDecorate %id BuiltIn GlobalInvocationId\n"
//"OpMemberDecorate %buf 0 Offset 0\n" - exists in getComputeAsmInputOutputBufferTraits
"OpMemberDecorate %buf 1 Offset 16\n"
"OpMemberDecorate %buf 2 Offset 32\n"
"OpMemberDecorate %buf 3 Offset 48\n"
+ string(getComputeAsmInputOutput