blob: 49cc838682e9a666ae56b2bb9a27b301974a8ffe [file] [log] [blame]
/*
* Copyright 2015-2021 Arm Limited
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* At your option, you may choose to accept this material under either:
* 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
* 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
*/
#include "spirv_cross.hpp"
#include "GLSL.std.450.h"
#include "spirv_cfg.hpp"
#include "spirv_common.hpp"
#include "spirv_parser.hpp"
#include <algorithm>
#include <cstring>
#include <utility>
using namespace std;
using namespace spv;
using namespace SPIRV_CROSS_NAMESPACE;
Compiler::Compiler(vector<uint32_t> ir_)
{
Parser parser(std::move(ir_));
parser.parse();
set_ir(std::move(parser.get_parsed_ir()));
}
Compiler::Compiler(const uint32_t *ir_, size_t word_count)
{
Parser parser(ir_, word_count);
parser.parse();
set_ir(std::move(parser.get_parsed_ir()));
}
Compiler::Compiler(const ParsedIR &ir_)
{
set_ir(ir_);
}
Compiler::Compiler(ParsedIR &&ir_)
{
set_ir(std::move(ir_));
}
void Compiler::set_ir(ParsedIR &&ir_)
{
ir = std::move(ir_);
parse_fixup();
}
void Compiler::set_ir(const ParsedIR &ir_)
{
ir = ir_;
parse_fixup();
}
string Compiler::compile()
{
return "";
}
bool Compiler::variable_storage_is_aliased(const SPIRVariable &v)
{
auto &type = get<SPIRType>(v.basetype);
bool ssbo = v.storage == StorageClassStorageBuffer ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool image = type.basetype == SPIRType::Image;
bool counter = type.basetype == SPIRType::AtomicCounter;
bool buffer_reference = type.storage == StorageClassPhysicalStorageBufferEXT;
bool is_restrict;
if (ssbo)
is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict);
else
is_restrict = has_decoration(v.self, DecorationRestrict);
return !is_restrict && (ssbo || image || counter || buffer_reference);
}
bool Compiler::block_is_pure(const SPIRBlock &block)
{
// This is a global side effect of the function.
if (block.terminator == SPIRBlock::Kill ||
block.terminator == SPIRBlock::TerminateRay ||
block.terminator == SPIRBlock::IgnoreIntersection ||
block.terminator == SPIRBlock::EmitMeshTasks)
return false;
for (auto &i : block.ops)
{
auto ops = stream(i);
auto op = static_cast<Op>(i.op);
switch (op)
{
case OpFunctionCall:
{
uint32_t func = ops[2];
if (!function_is_pure(get<SPIRFunction>(func)))
return false;
break;
}
case OpCopyMemory:
case OpStore:
{
auto &type = expression_type(ops[0]);
if (type.storage != StorageClassFunction)
return false;
break;
}
case OpImageWrite:
return false;
// Atomics are impure.
case OpAtomicLoad:
case OpAtomicStore:
case OpAtomicExchange:
case OpAtomicCompareExchange:
case OpAtomicCompareExchangeWeak:
case OpAtomicIIncrement:
case OpAtomicIDecrement:
case OpAtomicIAdd:
case OpAtomicISub:
case OpAtomicSMin:
case OpAtomicUMin:
case OpAtomicSMax:
case OpAtomicUMax:
case OpAtomicAnd:
case OpAtomicOr:
case OpAtomicXor:
return false;
// Geometry shader builtins modify global state.
case OpEndPrimitive:
case OpEmitStreamVertex:
case OpEndStreamPrimitive:
case OpEmitVertex:
return false;
// Mesh shader functions modify global state.
// (EmitMeshTasks is a terminator).
case OpSetMeshOutputsEXT:
return false;
// Barriers disallow any reordering, so we should treat blocks with barrier as writing.
case OpControlBarrier:
case OpMemoryBarrier:
return false;
// Ray tracing builtins are impure.
case OpReportIntersectionKHR:
case OpIgnoreIntersectionNV:
case OpTerminateRayNV:
case OpTraceNV:
case OpTraceRayKHR:
case OpExecuteCallableNV:
case OpExecuteCallableKHR:
case OpRayQueryInitializeKHR:
case OpRayQueryTerminateKHR:
case OpRayQueryGenerateIntersectionKHR:
case OpRayQueryConfirmIntersectionKHR:
case OpRayQueryProceedKHR:
// There are various getters in ray query, but they are considered pure.
return false;
// OpExtInst is potentially impure depending on extension, but GLSL builtins are at least pure.
case OpDemoteToHelperInvocationEXT:
// This is a global side effect of the function.
return false;
case OpExtInst:
{
uint32_t extension_set = ops[2];
if (get<SPIRExtension>(extension_set).ext == SPIRExtension::GLSL)
{
auto op_450 = static_cast<GLSLstd450>(ops[3]);
switch (op_450)
{
case GLSLstd450Modf:
case GLSLstd450Frexp:
{
auto &type = expression_type(ops[5]);
if (type.storage != StorageClassFunction)
return false;
break;
}
default:
break;
}
}
break;
}
default:
break;
}
}
return true;
}
string Compiler::to_name(uint32_t id, bool allow_alias) const
{
if (allow_alias && ir.ids[id].get_type() == TypeType)
{
// If this type is a simple alias, emit the
// name of the original type instead.
// We don't want to override the meta alias
// as that can be overridden by the reflection APIs after parse.
auto &type = get<SPIRType>(id);
if (type.type_alias)
{
// If the alias master has been specially packed, we will have emitted a clean variant as well,
// so skip the name aliasing here.
if (!has_extended_decoration(type.type_alias, SPIRVCrossDecorationBufferBlockRepacked))
return to_name(type.type_alias);
}
}
auto &alias = ir.get_name(id);
if (alias.empty())
return join("_", id);
else
return alias;
}
bool Compiler::function_is_pure(const SPIRFunction &func)
{
for (auto block : func.blocks)
{
if (!block_is_pure(get<SPIRBlock>(block)))
{
//fprintf(stderr, "Function %s is impure!\n", to_name(func.self).c_str());
return false;
}
}
//fprintf(stderr, "Function %s is pure!\n", to_name(func.self).c_str());
return true;
}
void Compiler::register_global_read_dependencies(const SPIRBlock &block, uint32_t id)
{
for (auto &i : block.ops)
{
auto ops = stream(i);
auto op = static_cast<Op>(i.op);
switch (op)
{
case OpFunctionCall:
{
uint32_t func = ops[2];
register_global_read_dependencies(get<SPIRFunction>(func), id);
break;
}
case OpLoad:
case OpImageRead:
{
// If we're in a storage class which does not get invalidated, adding dependencies here is no big deal.
auto *var = maybe_get_backing_variable(ops[2]);
if (var && var->storage != StorageClassFunction)
{
auto &type = get<SPIRType>(var->basetype);
// InputTargets are immutable.
if (type.basetype != SPIRType::Image && type.image.dim != DimSubpassData)
var->dependees.push_back(id);
}
break;
}
default:
break;
}
}
}
void Compiler::register_global_read_dependencies(const SPIRFunction &func, uint32_t id)
{
for (auto block : func.blocks)
register_global_read_dependencies(get<SPIRBlock>(block), id);
}
SPIRVariable *Compiler::maybe_get_backing_variable(uint32_t chain)
{
auto *var = maybe_get<SPIRVariable>(chain);
if (!var)
{
auto *cexpr = maybe_get<SPIRExpression>(chain);
if (cexpr)
var = maybe_get<SPIRVariable>(cexpr->loaded_from);
auto *access_chain = maybe_get<SPIRAccessChain>(chain);
if (access_chain)
var = maybe_get<SPIRVariable>(access_chain->loaded_from);
}
return var;
}
void Compiler::register_read(uint32_t expr, uint32_t chain, bool forwarded)
{
auto &e = get<SPIRExpression>(expr);
auto *var = maybe_get_backing_variable(chain);
if (var)
{
e.loaded_from = var->self;
// If the backing variable is immutable, we do not need to depend on the variable.
if (forwarded && !is_immutable(var->self))
var->dependees.push_back(e.self);
// If we load from a parameter, make sure we create "inout" if we also write to the parameter.
// The default is "in" however, so we never invalidate our compilation by reading.
if (var && var->parameter)
var->parameter->read_count++;
}
}
void Compiler::register_write(uint32_t chain)
{
auto *var = maybe_get<SPIRVariable>(chain);
if (!var)
{
// If we're storing through an access chain, invalidate the backing variable instead.
auto *expr = maybe_get<SPIRExpression>(chain);
if (expr && expr->loaded_from)
var = maybe_get<SPIRVariable>(expr->loaded_from);
auto *access_chain = maybe_get<SPIRAccessChain>(chain);
if (access_chain && access_chain->loaded_from)
var = maybe_get<SPIRVariable>(access_chain->loaded_from);
}
auto &chain_type = expression_type(chain);
if (var)
{
bool check_argument_storage_qualifier = true;
auto &type = expression_type(chain);
// If our variable is in a storage class which can alias with other buffers,
// invalidate all variables which depend on aliased variables. And if this is a
// variable pointer, then invalidate all variables regardless.
if (get_variable_data_type(*var).pointer)
{
flush_all_active_variables();
if (type.pointer_depth == 1)
{
// We have a backing variable which is a pointer-to-pointer type.
// We are storing some data through a pointer acquired through that variable,
// but we are not writing to the value of the variable itself,
// i.e., we are not modifying the pointer directly.
// If we are storing a non-pointer type (pointer_depth == 1),
// we know that we are storing some unrelated data.
// A case here would be
// void foo(Foo * const *arg) {
// Foo *bar = *arg;
// bar->unrelated = 42;
// }
// arg, the argument is constant.
check_argument_storage_qualifier = false;
}
}
if (type.storage == StorageClassPhysicalStorageBufferEXT || variable_storage_is_aliased(*var))
flush_all_aliased_variables();
else if (var)
flush_dependees(*var);
// We tried to write to a parameter which is not marked with out qualifier, force a recompile.
if (check_argument_storage_qualifier && var->parameter && var->parameter->write_count == 0)
{
var->parameter->write_count++;
force_recompile();
}
}
else if (chain_type.pointer)
{
// If we stored through a variable pointer, then we don't know which
// variable we stored to. So *all* expressions after this point need to
// be invalidated.
// FIXME: If we can prove that the variable pointer will point to
// only certain variables, we can invalidate only those.
flush_all_active_variables();
}
// If chain_type.pointer is false, we're not writing to memory backed variables, but temporaries instead.
// This can happen in copy_logical_type where we unroll complex reads and writes to temporaries.
}
void Compiler::flush_dependees(SPIRVariable &var)
{
for (auto expr : var.dependees)
invalid_expressions.insert(expr);
var.dependees.clear();
}
void Compiler::flush_all_aliased_variables()
{
for (auto aliased : aliased_variables)
flush_dependees(get<SPIRVariable>(aliased));
}
void Compiler::flush_all_atomic_capable_variables()
{
for (auto global : global_variables)
flush_dependees(get<SPIRVariable>(global));
flush_all_aliased_variables();
}
void Compiler::flush_control_dependent_expressions(uint32_t block_id)
{
auto &block = get<SPIRBlock>(block_id);
for (auto &expr : block.invalidate_expressions)
invalid_expressions.insert(expr);
block.invalidate_expressions.clear();
}
void Compiler::flush_all_active_variables()
{
// Invalidate all temporaries we read from variables in this block since they were forwarded.
// Invalidate all temporaries we read from globals.
for (auto &v : current_function->local_variables)
flush_dependees(get<SPIRVariable>(v));
for (auto &arg : current_function->arguments)
flush_dependees(get<SPIRVariable>(arg.id));
for (auto global : global_variables)
flush_dependees(get<SPIRVariable>(global));
flush_all_aliased_variables();
}
uint32_t Compiler::expression_type_id(uint32_t id) const
{
switch (ir.ids[id].get_type())
{
case TypeVariable:
return get<SPIRVariable>(id).basetype;
case TypeExpression:
return get<SPIRExpression>(id).expression_type;
case TypeConstant:
return get<SPIRConstant>(id).constant_type;
case TypeConstantOp:
return get<SPIRConstantOp>(id).basetype;
case TypeUndef:
return get<SPIRUndef>(id).basetype;
case TypeCombinedImageSampler:
return get<SPIRCombinedImageSampler>(id).combined_type;
case TypeAccessChain:
return get<SPIRAccessChain>(id).basetype;
default:
SPIRV_CROSS_THROW("Cannot resolve expression type.");
}
}
const SPIRType &Compiler::expression_type(uint32_t id) const
{
return get<SPIRType>(expression_type_id(id));
}
bool Compiler::expression_is_lvalue(uint32_t id) const
{
auto &type = expression_type(id);
switch (type.basetype)
{
case SPIRType::SampledImage:
case SPIRType::Image:
case SPIRType::Sampler:
return false;
default:
return true;
}
}
bool Compiler::is_immutable(uint32_t id) const
{
if (ir.ids[id].get_type() == TypeVariable)
{
auto &var = get<SPIRVariable>(id);
// Anything we load from the UniformConstant address space is guaranteed to be immutable.
bool pointer_to_const = var.storage == StorageClassUniformConstant;
return pointer_to_const || var.phi_variable || !expression_is_lvalue(id);
}
else if (ir.ids[id].get_type() == TypeAccessChain)
return get<SPIRAccessChain>(id).immutable;
else if (ir.ids[id].get_type() == TypeExpression)
return get<SPIRExpression>(id).immutable;
else if (ir.ids[id].get_type() == TypeConstant || ir.ids[id].get_type() == TypeConstantOp ||
ir.ids[id].get_type() == TypeUndef)
return true;
else
return false;
}
static inline bool storage_class_is_interface(spv::StorageClass storage)
{
switch (storage)
{
case StorageClassInput:
case StorageClassOutput:
case StorageClassUniform:
case StorageClassUniformConstant:
case StorageClassAtomicCounter:
case StorageClassPushConstant:
case StorageClassStorageBuffer:
return true;
default:
return false;
}
}
bool Compiler::is_hidden_variable(const SPIRVariable &var, bool include_builtins) const
{
if ((is_builtin_variable(var) && !include_builtins) || var.remapped_variable)
return true;
// Combined image samplers are always considered active as they are "magic" variables.
if (find_if(begin(combined_image_samplers), end(combined_image_samplers), [&var](const CombinedImageSampler &samp) {
return samp.combined_id == var.self;
}) != end(combined_image_samplers))
{
return false;
}
// In SPIR-V 1.4 and up we must also use the active variable interface to disable global variables
// which are not part of the entry point.
if (ir.get_spirv_version() >= 0x10400 && var.storage != spv::StorageClassGeneric &&
var.storage != spv::StorageClassFunction && !interface_variable_exists_in_entry_point(var.self))
{
return true;
}
return check_active_interface_variables && storage_class_is_interface(var.storage) &&
active_interface_variables.find(var.self) == end(active_interface_variables);
}
bool Compiler::is_builtin_type(const SPIRType &type) const
{
auto *type_meta = ir.find_meta(type.self);
// We can have builtin structs as well. If one member of a struct is builtin, the struct must also be builtin.
if (type_meta)
for (auto &m : type_meta->members)
if (m.builtin)
return true;
return false;
}
bool Compiler::is_builtin_variable(const SPIRVariable &var) const
{
auto *m = ir.find_meta(var.self);
if (var.compat_builtin || (m && m->decoration.builtin))
return true;
else
return is_builtin_type(get<SPIRType>(var.basetype));
}
bool Compiler::is_member_builtin(const SPIRType &type, uint32_t index, BuiltIn *builtin) const
{
auto *type_meta = ir.find_meta(type.self);
if (type_meta)
{
auto &memb = type_meta->members;
if (index < memb.size() && memb[index].builtin)
{
if (builtin)
*builtin = memb[index].builtin_type;
return true;
}
}
return false;
}
bool Compiler::is_scalar(const SPIRType &type) const
{
return type.basetype != SPIRType::Struct && type.vecsize == 1 && type.columns == 1;
}
bool Compiler::is_vector(const SPIRType &type) const
{
return type.vecsize > 1 && type.columns == 1;
}
bool Compiler::is_matrix(const SPIRType &type) const
{
return type.vecsize > 1 && type.columns > 1;
}
bool Compiler::is_array(const SPIRType &type) const
{
return !type.array.empty();
}
ShaderResources Compiler::get_shader_resources() const
{
return get_shader_resources(nullptr);
}
ShaderResources Compiler::get_shader_resources(const unordered_set<VariableID> &active_variables) const
{
return get_shader_resources(&active_variables);
}
bool Compiler::InterfaceVariableAccessHandler::handle(Op opcode, const uint32_t *args, uint32_t length)
{
uint32_t variable = 0;
switch (opcode)
{
// Need this first, otherwise, GCC complains about unhandled switch statements.
default:
break;
case OpFunctionCall:
{
// Invalid SPIR-V.
if (length < 3)
return false;
uint32_t count = length - 3;
args += 3;
for (uint32_t i = 0; i < count; i++)
{
auto *var = compiler.maybe_get<SPIRVariable>(args[i]);
if (var && storage_class_is_interface(var->storage))
variables.insert(args[i]);
}
break;
}
case OpSelect:
{
// Invalid SPIR-V.
if (length < 5)
return false;
uint32_t count = length - 3;
args += 3;
for (uint32_t i = 0; i < count; i++)
{
auto *var = compiler.maybe_get<SPIRVariable>(args[i]);
if (var && storage_class_is_interface(var->storage))
variables.insert(args[i]);
}
break;
}
case OpPhi:
{
// Invalid SPIR-V.
if (length < 2)
return false;
uint32_t count = length - 2;
args += 2;
for (uint32_t i = 0; i < count; i += 2)
{
auto *var = compiler.maybe_get<SPIRVariable>(args[i]);
if (var && storage_class_is_interface(var->storage))
variables.insert(args[i]);
}
break;
}
case OpAtomicStore:
case OpStore:
// Invalid SPIR-V.
if (length < 1)
return false;
variable = args[0];
break;
case OpCopyMemory:
{
if (length < 2)
return false;
auto *var = compiler.maybe_get<SPIRVariable>(args[0]);
if (var && storage_class_is_interface(var->storage))
variables.insert(args[0]);
var = compiler.maybe_get<SPIRVariable>(args[1]);
if (var && storage_class_is_interface(var->storage))
variables.insert(args[1]);
break;
}
case OpExtInst:
{
if (length < 3)
return false;
auto &extension_set = compiler.get<SPIRExtension>(args[2]);
switch (extension_set.ext)
{
case SPIRExtension::GLSL:
{
auto op = static_cast<GLSLstd450>(args[3]);
switch (op)
{
case GLSLstd450InterpolateAtCentroid:
case GLSLstd450InterpolateAtSample:
case GLSLstd450InterpolateAtOffset:
{
auto *var = compiler.maybe_get<SPIRVariable>(args[4]);
if (var && storage_class_is_interface(var->storage))
variables.insert(args[4]);
break;
}
case GLSLstd450Modf:
case GLSLstd450Fract:
{
auto *var = compiler.maybe_get<SPIRVariable>(args[5]);
if (var && storage_class_is_interface(var->storage))
variables.insert(args[5]);
break;
}
default:
break;
}
break;
}
case SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter:
{
enum AMDShaderExplicitVertexParameter
{
InterpolateAtVertexAMD = 1
};
auto op = static_cast<AMDShaderExplicitVertexParameter>(args[3]);
switch (op)
{
case InterpolateAtVertexAMD:
{
auto *var = compiler.maybe_get<SPIRVariable>(args[4]);
if (var && storage_class_is_interface(var->storage))
variables.insert(args[4]);
break;
}
default:
break;
}
break;
}
default:
break;
}
break;
}
case OpAccessChain:
case OpInBoundsAccessChain:
case OpPtrAccessChain:
case OpLoad:
case OpCopyObject:
case OpImageTexelPointer:
case OpAtomicLoad:
case OpAtomicExchange:
case OpAtomicCompareExchange:
case OpAtomicCompareExchangeWeak:
case OpAtomicIIncrement:
case OpAtomicIDecrement:
case OpAtomicIAdd:
case OpAtomicISub:
case OpAtomicSMin:
case OpAtomicUMin:
case OpAtomicSMax:
case OpAtomicUMax:
case OpAtomicAnd:
case OpAtomicOr:
case OpAtomicXor:
case OpArrayLength:
// Invalid SPIR-V.
if (length < 3)
return false;
variable = args[2];
break;
}
if (variable)
{
auto *var = compiler.maybe_get<SPIRVariable>(variable);
if (var && storage_class_is_interface(var->storage))
variables.insert(variable);
}
return true;
}
unordered_set<VariableID> Compiler::get_active_interface_variables() const
{
// Traverse the call graph and find all interface variables which are in use.
unordered_set<VariableID> variables;
InterfaceVariableAccessHandler handler(*this, variables);
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), handler);
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, const SPIRVariable &var) {
if (var.storage != StorageClassOutput)
return;
if (!interface_variable_exists_in_entry_point(var.self))
return;
// An output variable which is just declared (but uninitialized) might be read by subsequent stages
// so we should force-enable these outputs,
// since compilation will fail if a subsequent stage attempts to read from the variable in question.
// Also, make sure we preserve output variables which are only initialized, but never accessed by any code.
if (var.initializer != ID(0) || get_execution_model() != ExecutionModelFragment)
variables.insert(var.self);
});
// If we needed to create one, we'll need it.
if (dummy_sampler_id)
variables.insert(dummy_sampler_id);
return variables;
}
void Compiler::set_enabled_interface_variables(std::unordered_set<VariableID> active_variables)
{
active_interface_variables = std::move(active_variables);
check_active_interface_variables = true;
}
ShaderResources Compiler::get_shader_resources(const unordered_set<VariableID> *active_variables) const
{
ShaderResources res;
bool ssbo_instance_name = reflection_ssbo_instance_name_is_significant();
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, const SPIRVariable &var) {
auto &type = this->get<SPIRType>(var.basetype);
// It is possible for uniform storage classes to be passed as function parameters, so detect
// that. To detect function parameters, check of StorageClass of variable is function scope.
if (var.storage == StorageClassFunction || !type.pointer)
return;
if (active_variables && active_variables->find(var.self) == end(*active_variables))
return;
// In SPIR-V 1.4 and up, every global must be present in the entry point interface list,
// not just IO variables.
bool active_in_entry_point = true;
if (ir.get_spirv_version() < 0x10400)
{
if (var.storage == StorageClassInput || var.storage == StorageClassOutput)
active_in_entry_point = interface_variable_exists_in_entry_point(var.self);
}
else
active_in_entry_point = interface_variable_exists_in_entry_point(var.self);
if (!active_in_entry_point)
return;
bool is_builtin = is_builtin_variable(var);
if (is_builtin)
{
if (var.storage != StorageClassInput && var.storage != StorageClassOutput)
return;
auto &list = var.storage == StorageClassInput ? res.builtin_inputs : res.builtin_outputs;
BuiltInResource resource;
if (has_decoration(type.self, DecorationBlock))
{
resource.resource = { var.self, var.basetype, type.self,
get_remapped_declared_block_name(var.self, false) };
for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++)
{
resource.value_type_id = type.member_types[i];
resource.builtin = BuiltIn(get_member_decoration(type.self, i, DecorationBuiltIn));
list.push_back(resource);
}
}
else
{
bool strip_array =
!has_decoration(var.self, DecorationPatch) && (
get_execution_model() == ExecutionModelTessellationControl ||
(get_execution_model() == ExecutionModelTessellationEvaluation &&
var.storage == StorageClassInput));
resource.resource = { var.self, var.basetype, type.self, get_name(var.self) };
if (strip_array && !type.array.empty())
resource.value_type_id = get_variable_data_type(var).parent_type;
else
resource.value_type_id = get_variable_data_type_id(var);
assert(resource.value_type_id);
resource.builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn));
list.push_back(std::move(resource));
}
return;
}
// Input
if (var.storage == StorageClassInput)
{
if (has_decoration(type.self, DecorationBlock))
{
res.stage_inputs.push_back(
{ var.self, var.basetype, type.self,
get_remapped_declared_block_name(var.self, false) });
}
else
res.stage_inputs.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Subpass inputs
else if (var.storage == StorageClassUniformConstant && type.image.dim == DimSubpassData)
{
res.subpass_inputs.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Outputs
else if (var.storage == StorageClassOutput)
{
if (has_decoration(type.self, DecorationBlock))
{
res.stage_outputs.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, false) });
}
else
res.stage_outputs.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// UBOs
else if (type.storage == StorageClassUniform && has_decoration(type.self, DecorationBlock))
{
res.uniform_buffers.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, false) });
}
// Old way to declare SSBOs.
else if (type.storage == StorageClassUniform && has_decoration(type.self, DecorationBufferBlock))
{
res.storage_buffers.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, ssbo_instance_name) });
}
// Modern way to declare SSBOs.
else if (type.storage == StorageClassStorageBuffer)
{
res.storage_buffers.push_back(
{ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, ssbo_instance_name) });
}
// Push constant blocks
else if (type.storage == StorageClassPushConstant)
{
// There can only be one push constant block, but keep the vector in case this restriction is lifted
// in the future.
res.push_constant_buffers.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
else if (type.storage == StorageClassShaderRecordBufferKHR)
{
res.shader_record_buffers.push_back({ var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self, ssbo_instance_name) });
}
// Images
else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Image &&
type.image.sampled == 2)
{
res.storage_images.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Separate images
else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Image &&
type.image.sampled == 1)
{
res.separate_images.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Separate samplers
else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Sampler)
{
res.separate_samplers.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Textures
else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::SampledImage)
{
res.sampled_images.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Atomic counters
else if (type.storage == StorageClassAtomicCounter)
{
res.atomic_counters.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
// Acceleration structures
else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::AccelerationStructure)
{
res.acceleration_structures.push_back({ var.self, var.basetype, type.self, get_name(var.self) });
}
});
return res;
}
bool Compiler::type_is_block_like(const SPIRType &type) const
{
if (type.basetype != SPIRType::Struct)
return false;
if (has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock))
{
return true;
}
// Block-like types may have Offset decorations.
for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++)
if (has_member_decoration(type.self, i, DecorationOffset))
return true;
return false;
}
void Compiler::parse_fixup()
{
// Figure out specialization constants for work group sizes.
for (auto id_ : ir.ids_for_constant_or_variable)
{
auto &id = ir.ids[id_];
if (id.get_type() == TypeConstant)
{
auto &c = id.get<SPIRConstant>();
if (has_decoration(c.self, DecorationBuiltIn) &&
BuiltIn(get_decoration(c.self, DecorationBuiltIn)) == BuiltInWorkgroupSize)
{
// In current SPIR-V, there can be just one constant like this.
// All entry points will receive the constant value.
// WorkgroupSize take precedence over LocalSizeId.
for (auto &entry : ir.entry_points)
{
entry.second.workgroup_size.constant = c.self;
entry.second.workgroup_size.x = c.scalar(0, 0);
entry.second.workgroup_size.y = c.scalar(0, 1);
entry.second.workgroup_size.z = c.scalar(0, 2);
}
}
}
else if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
if (var.storage == StorageClassPrivate || var.storage == StorageClassWorkgroup ||
var.storage == StorageClassTaskPayloadWorkgroupEXT ||
var.storage == StorageClassOutput)
{
global_variables.push_back(var.self);
}
if (variable_storage_is_aliased(var))
aliased_variables.push_back(var.self);
}
}
}
void Compiler::update_name_cache(unordered_set<string> &cache_primary, const unordered_set<string> &cache_secondary,
string &name)
{
if (name.empty())
return;
const auto find_name = [&](const string &n) -> bool {
if (cache_primary.find(n) != end(cache_primary))
return true;
if (&cache_primary != &cache_secondary)
if (cache_secondary.find(n) != end(cache_secondary))
return true;
return false;
};
const auto insert_name = [&](const string &n) { cache_primary.insert(n); };
if (!find_name(name))
{
insert_name(name);
return;
}
uint32_t counter = 0;
auto tmpname = name;
bool use_linked_underscore = true;
if (tmpname == "_")
{
// We cannot just append numbers, as we will end up creating internally reserved names.
// Make it like _0_<counter> instead.
tmpname += "0";
}
else if (tmpname.back() == '_')
{
// The last_character is an underscore, so we don't need to link in underscore.
// This would violate double underscore rules.
use_linked_underscore = false;
}
// If there is a collision (very rare),
// keep tacking on extra identifier until it's unique.
do
{
counter++;
name = tmpname + (use_linked_underscore ? "_" : "") + convert_to_string(counter);
} while (find_name(name));
insert_name(name);
}
void Compiler::update_name_cache(unordered_set<string> &cache, string &name)
{
update_name_cache(cache, cache, name);
}
void Compiler::set_name(ID id, const std::string &name)
{
ir.set_name(id, name);
}
const SPIRType &Compiler::get_type(TypeID id) const
{
return get<SPIRType>(id);
}
const SPIRType &Compiler::get_type_from_variable(VariableID id) const
{
return get<SPIRType>(get<SPIRVariable>(id).basetype);
}
uint32_t Compiler::get_pointee_type_id(uint32_t type_id) const
{
auto *p_type = &get<SPIRType>(type_id);
if (p_type->pointer)
{
assert(p_type->parent_type);
type_id = p_type->parent_type;
}
return type_id;
}
const SPIRType &Compiler::get_pointee_type(const SPIRType &type) const
{
auto *p_type = &type;
if (p_type->pointer)
{
assert(p_type->parent_type);
p_type = &get<SPIRType>(p_type->parent_type);
}
return *p_type;
}
const SPIRType &Compiler::get_pointee_type(uint32_t type_id) const
{
return get_pointee_type(get<SPIRType>(type_id));
}
uint32_t Compiler::get_variable_data_type_id(const SPIRVariable &var) const
{
if (var.phi_variable)
return var.basetype;
return get_pointee_type_id(var.basetype);
}
SPIRType &Compiler::get_variable_data_type(const SPIRVariable &var)
{
return get<SPIRType>(get_variable_data_type_id(var));
}
const SPIRType &Compiler::get_variable_data_type(const SPIRVariable &var) const
{
return get<SPIRType>(get_variable_data_type_id(var));
}
SPIRType &Compiler::get_variable_element_type(const SPIRVariable &var)
{
SPIRType *type = &get_variable_data_type(var);
if (is_array(*type))
type = &get<SPIRType>(type->parent_type);
return *type;
}
const SPIRType &Compiler::get_variable_element_type(const SPIRVariable &var) const
{
const SPIRType *type = &get_variable_data_type(var);
if (is_array(*type))
type = &get<SPIRType>(type->parent_type);
return *type;
}
bool Compiler::is_sampled_image_type(const SPIRType &type)
{
return (type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage) && type.image.sampled == 1 &&
type.image.dim != DimBuffer;
}
void Compiler::set_member_decoration_string(TypeID id, uint32_t index, spv::Decoration decoration,
const std::string &argument)
{
ir.set_member_decoration_string(id, index, decoration, argument);
}
void Compiler::set_member_decoration(TypeID id, uint32_t index, Decoration decoration, uint32_t argument)
{
ir.set_member_decoration(id, index, decoration, argument);
}
void Compiler::set_member_name(TypeID id, uint32_t index, const std::string &name)
{
ir.set_member_name(id, index, name);
}
const std::string &Compiler::get_member_name(TypeID id, uint32_t index) const
{
return ir.get_member_name(id, index);
}
void Compiler::set_qualified_name(uint32_t id, const string &name)
{
ir.meta[id].decoration.qualified_alias = name;
}
void Compiler::set_member_qualified_name(uint32_t type_id, uint32_t index, const std::string &name)
{
ir.meta[type_id].members.resize(max(ir.meta[type_id].members.size(), size_t(index) + 1));
ir.meta[type_id].members[index].qualified_alias = name;
}
const string &Compiler::get_member_qualified_name(TypeID type_id, uint32_t index) const
{
auto *m = ir.find_meta(type_id);
if (m && index < m->members.size())
return m->members[index].qualified_alias;
else
return ir.get_empty_string();
}
uint32_t Compiler::get_member_decoration(TypeID id, uint32_t index, Decoration decoration) const
{
return ir.get_member_decoration(id, index, decoration);
}
const Bitset &Compiler::get_member_decoration_bitset(TypeID id, uint32_t index) const
{
return ir.get_member_decoration_bitset(id, index);
}
bool Compiler::has_member_decoration(TypeID id, uint32_t index, Decoration decoration) const
{
return ir.has_member_decoration(id, index, decoration);
}
void Compiler::unset_member_decoration(TypeID id, uint32_t index, Decoration decoration)
{
ir.unset_member_decoration(id, index, decoration);
}
void Compiler::set_decoration_string(ID id, spv::Decoration decoration, const std::string &argument)
{
ir.set_decoration_string(id, decoration, argument);
}
void Compiler::set_decoration(ID id, Decoration decoration, uint32_t argument)
{
ir.set_decoration(id, decoration, argument);
}
void Compiler::set_extended_decoration(uint32_t id, ExtendedDecorations decoration, uint32_t value)
{
auto &dec = ir.meta[id].decoration;
dec.extended.flags.set(decoration);
dec.extended.values[decoration] = value;
}
void Compiler::set_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration,
uint32_t value)
{
ir.meta[type].members.resize(max(ir.meta[type].members.size(), size_t(index) + 1));
auto &dec = ir.meta[type].members[index];
dec.extended.flags.set(decoration);
dec.extended.values[decoration] = value;
}
static uint32_t get_default_extended_decoration(ExtendedDecorations decoration)
{
switch (decoration)
{
case SPIRVCrossDecorationResourceIndexPrimary:
case SPIRVCrossDecorationResourceIndexSecondary:
case SPIRVCrossDecorationResourceIndexTertiary:
case SPIRVCrossDecorationResourceIndexQuaternary:
case SPIRVCrossDecorationInterfaceMemberIndex:
return ~(0u);
default:
return 0;
}
}
uint32_t Compiler::get_extended_decoration(uint32_t id, ExtendedDecorations decoration) const
{
auto *m = ir.find_meta(id);
if (!m)
return 0;
auto &dec = m->decoration;
if (!dec.extended.flags.get(decoration))
return get_default_extended_decoration(decoration);
return dec.extended.values[decoration];
}
uint32_t Compiler::get_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const
{
auto *m = ir.find_meta(type);
if (!m)
return 0;
if (index >= m->members.size())
return 0;
auto &dec = m->members[index];
if (!dec.extended.flags.get(decoration))
return get_default_extended_decoration(decoration);
return dec.extended.values[decoration];
}
bool Compiler::has_extended_decoration(uint32_t id, ExtendedDecorations decoration) const
{
auto *m = ir.find_meta(id);
if (!m)
return false;
auto &dec = m->decoration;
return dec.extended.flags.get(decoration);
}
bool Compiler::has_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration) const
{
auto *m = ir.find_meta(type);
if (!m)
return false;
if (index >= m->members.size())
return false;
auto &dec = m->members[index];
return dec.extended.flags.get(decoration);
}
void Compiler::unset_extended_decoration(uint32_t id, ExtendedDecorations decoration)
{
auto &dec = ir.meta[id].decoration;
dec.extended.flags.clear(decoration);
dec.extended.values[decoration] = 0;
}
void Compiler::unset_extended_member_decoration(uint32_t type, uint32_t index, ExtendedDecorations decoration)
{
ir.meta[type].members.resize(max(ir.meta[type].members.size(), size_t(index) + 1));
auto &dec = ir.meta[type].members[index];
dec.extended.flags.clear(decoration);
dec.extended.values[decoration] = 0;
}
StorageClass Compiler::get_storage_class(VariableID id) const
{
return get<SPIRVariable>(id).storage;
}
const std::string &Compiler::get_name(ID id) const
{
return ir.get_name(id);
}
const std::string Compiler::get_fallback_name(ID id) const
{
return join("_", id);
}
const std::string Compiler::get_block_fallback_name(VariableID id) const
{
auto &var = get<SPIRVariable>(id);
if (get_name(id).empty())
return join("_", get<SPIRType>(var.basetype).self, "_", id);
else
return get_name(id);
}
const Bitset &Compiler::get_decoration_bitset(ID id) const
{
return ir.get_decoration_bitset(id);
}
bool Compiler::has_decoration(ID id, Decoration decoration) const
{
return ir.has_decoration(id, decoration);
}
const string &Compiler::get_decoration_string(ID id, Decoration decoration) const
{
return ir.get_decoration_string(id, decoration);
}
const string &Compiler::get_member_decoration_string(TypeID id, uint32_t index, Decoration decoration) const
{
return ir.get_member_decoration_string(id, index, decoration);
}
uint32_t Compiler::get_decoration(ID id, Decoration decoration) const
{
return ir.get_decoration(id, decoration);
}
void Compiler::unset_decoration(ID id, Decoration decoration)
{
ir.unset_decoration(id, decoration);
}
bool Compiler::get_binary_offset_for_decoration(VariableID id, spv::Decoration decoration, uint32_t &word_offset) const
{
auto *m = ir.find_meta(id);
if (!m)
return false;
auto &word_offsets = m->decoration_word_offset;
auto itr = word_offsets.find(decoration);
if (itr == end(word_offsets))
return false;
word_offset = itr->second;
return true;
}
bool Compiler::block_is_noop(const SPIRBlock &block) const
{
if (block.terminator != SPIRBlock::Direct)
return false;
auto &child = get<SPIRBlock>(block.next_block);
// If this block participates in PHI, the block isn't really noop.
for (auto &phi : block.phi_variables)
if (phi.parent == block.self || phi.parent == child.self)
return false;
for (auto &phi : child.phi_variables)
if (phi.parent == block.self)
return false;
// Verify all instructions have no semantic impact.
for (auto &i : block.ops)
{
auto op = static_cast<Op>(i.op);
switch (op)
{
// Non-Semantic instructions.
case OpLine:
case OpNoLine:
break;
case OpExtInst:
{
auto *ops = stream(i);
auto ext = get<SPIRExtension>(ops[2]).ext;
bool ext_is_nonsemantic_only =
ext == SPIRExtension::NonSemanticShaderDebugInfo ||
ext == SPIRExtension::SPV_debug_info ||
ext == SPIRExtension::NonSemanticGeneric;
if (!ext_is_nonsemantic_only)
return false;
break;
}
default:
return false;
}
}
return true;
}
bool Compiler::block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method method) const
{
// Tried and failed.
if (block.disable_block_optimization || block.complex_continue)
return false;
if (method == SPIRBlock::MergeToSelectForLoop || method == SPIRBlock::MergeToSelectContinueForLoop)
{
// Try to detect common for loop pattern
// which the code backend can use to create cleaner code.
// for(;;) { if (cond) { some_body; } else { break; } }
// is the pattern we're looking for.
const auto *false_block = maybe_get<SPIRBlock>(block.false_block);
const auto *true_block = maybe_get<SPIRBlock>(block.true_block);
const auto *merge_block = maybe_get<SPIRBlock>(block.merge_block);
bool false_block_is_merge = block.false_block == block.merge_block ||
(false_block && merge_block && execution_is_noop(*false_block, *merge_block));
bool true_block_is_merge = block.true_block == block.merge_block ||
(true_block && merge_block && execution_is_noop(*true_block, *merge_block));
bool positive_candidate =
block.true_block != block.merge_block && block.true_block != block.self && false_block_is_merge;
bool negative_candidate =
block.false_block != block.merge_block && block.false_block != block.self && true_block_is_merge;
bool ret = block.terminator == SPIRBlock::Select && block.merge == SPIRBlock::MergeLoop &&
(positive_candidate || negative_candidate);
if (ret && positive_candidate && method == SPIRBlock::MergeToSelectContinueForLoop)
ret = block.true_block == block.continue_block;
else if (ret && negative_candidate && method == SPIRBlock::MergeToSelectContinueForLoop)
ret = block.false_block == block.continue_block;
// If we have OpPhi which depends on branches which came from our own block,
// we need to flush phi variables in else block instead of a trivial break,
// so we cannot assume this is a for loop candidate.
if (ret)
{
for (auto &phi : block.phi_variables)
if (phi.parent == block.self)
return false;
auto *merge = maybe_get<SPIRBlock>(block.merge_block);
if (merge)
for (auto &phi : merge->phi_variables)
if (phi.parent == block.self)
return false;
}
return ret;
}
else if (method == SPIRBlock::MergeToDirectForLoop)
{
// Empty loop header that just sets up merge target
// and branches to loop body.
bool ret = block.terminator == SPIRBlock::Direct && block.merge == SPIRBlock::MergeLoop && block_is_noop(block);
if (!ret)
return false;
auto &child = get<SPIRBlock>(block.next_block);
const auto *false_block = maybe_get<SPIRBlock>(child.false_block);
const auto *true_block = maybe_get<SPIRBlock>(child.true_block);
const auto *merge_block = maybe_get<SPIRBlock>(block.merge_block);
bool false_block_is_merge = child.false_block == block.merge_block ||
(false_block && merge_block && execution_is_noop(*false_block, *merge_block));
bool true_block_is_merge = child.true_block == block.merge_block ||
(true_block && merge_block && execution_is_noop(*true_block, *merge_block));
bool positive_candidate =
child.true_block != block.merge_block && child.true_block != block.self && false_block_is_merge;
bool negative_candidate =
child.false_block != block.merge_block && child.false_block != block.self && true_block_is_merge;
ret = child.terminator == SPIRBlock::Select && child.merge == SPIRBlock::MergeNone &&
(positive_candidate || negative_candidate);
if (ret)
{
auto *merge = maybe_get<SPIRBlock>(block.merge_block);
if (merge)
for (auto &phi : merge->phi_variables)
if (phi.parent == block.self || phi.parent == child.false_block)
return false;
}
return ret;
}
else
return false;
}
bool Compiler::execution_is_noop(const SPIRBlock &from, const SPIRBlock &to) const
{
if (!execution_is_branchless(from, to))
return false;
auto *start = &from;
for (;;)
{
if (start->self == to.self)
return true;
if (!block_is_noop(*start))
return false;
auto &next = get<SPIRBlock>(start->next_block);
start = &next;
}
}
bool Compiler::execution_is_branchless(const SPIRBlock &from, const SPIRBlock &to) const
{
auto *start = &from;
for (;;)
{
if (start->self == to.self)
return true;
if (start->terminator == SPIRBlock::Direct && start->merge == SPIRBlock::MergeNone)
start = &get<SPIRBlock>(start->next_block);
else
return false;
}
}
bool Compiler::execution_is_direct_branch(const SPIRBlock &from, const SPIRBlock &to) const
{
return from.terminator == SPIRBlock::Direct && from.merge == SPIRBlock::MergeNone && from.next_block == to.self;
}
SPIRBlock::ContinueBlockType Compiler::continue_block_type(const SPIRBlock &block) const
{
// The block was deemed too complex during code emit, pick conservative fallback paths.
if (block.complex_continue)
return SPIRBlock::ComplexLoop;
// In older glslang output continue block can be equal to the loop header.
// In this case, execution is clearly branchless, so just assume a while loop header here.
if (block.merge == SPIRBlock::MergeLoop)
return SPIRBlock::WhileLoop;
if (block.loop_dominator == BlockID(SPIRBlock::NoDominator))
{
// Continue block is never reached from CFG.
return SPIRBlock::ComplexLoop;
}
auto &dominator = get<SPIRBlock>(block.loop_dominator);
if (execution_is_noop(block, dominator))
return SPIRBlock::WhileLoop;
else if (execution_is_branchless(block, dominator))
return SPIRBlock::ForLoop;
else
{
const auto *false_block = maybe_get<SPIRBlock>(block.false_block);
const auto *true_block = maybe_get<SPIRBlock>(block.true_block);
const auto *merge_block = maybe_get<SPIRBlock>(dominator.merge_block);
// If we need to flush Phi in this block, we cannot have a DoWhile loop.
bool flush_phi_to_false = false_block && flush_phi_required(block.self, block.false_block);
bool flush_phi_to_true = true_block && flush_phi_required(block.self, block.true_block);
if (flush_phi_to_false || flush_phi_to_true)
return SPIRBlock::ComplexLoop;
bool positive_do_while = block.true_block == dominator.self &&
(block.false_block == dominator.merge_block ||
(false_block && merge_block && execution_is_noop(*false_block, *merge_block)));
bool negative_do_while = block.false_block == dominator.self &&
(block.true_block == dominator.merge_block ||
(true_block && merge_block && execution_is_noop(*true_block, *merge_block)));
if (block.merge == SPIRBlock::MergeNone && block.terminator == SPIRBlock::Select &&
(positive_do_while || negative_do_while))
{
return SPIRBlock::DoWhileLoop;
}
else
return SPIRBlock::ComplexLoop;
}
}
const SmallVector<SPIRBlock::Case> &Compiler::get_case_list(const SPIRBlock &block) const
{
uint32_t width = 0;
// First we check if we can get the type directly from the block.condition
// since it can be a SPIRConstant or a SPIRVariable.
if (const auto *constant = maybe_get<SPIRConstant>(block.condition))
{
const auto &type = get<SPIRType>(constant->constant_type);
width = type.width;
}
else if (const auto *var = maybe_get<SPIRVariable>(block.condition))
{
const auto &type = get<SPIRType>(var->basetype);
width = type.width;
}
else if (const auto *undef = maybe_get<SPIRUndef>(block.condition))
{
const auto &type = get<SPIRType>(undef->basetype);
width = type.width;
}
else
{
auto search = ir.load_type_width.find(block.condition);
if (search == ir.load_type_width.end())
{
SPIRV_CROSS_THROW("Use of undeclared variable on a switch statement.");
}
width = search->second;
}
if (width > 32)
return block.cases_64bit;
return block.cases_32bit;
}
bool Compiler::traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHandler &handler) const
{
handler.set_current_block(block);
handler.rearm_current_block(block);
// Ideally, perhaps traverse the CFG instead of all blocks in order to eliminate dead blocks,
// but this shouldn't be a problem in practice unless the SPIR-V is doing insane things like recursing
// inside dead blocks ...
for (auto &i : block.ops)
{
auto ops = stream(i);
auto op = static_cast<Op>(i.op);
if (!handler.handle(op, ops, i.length))
return false;
if (op == OpFunctionCall)
{
auto &func = get<SPIRFunction>(ops[2]);
if (handler.follow_function_call(func))
{
if (!handler.begin_function_scope(ops, i.length))
return false;
if (!traverse_all_reachable_opcodes(get<SPIRFunction>(ops[2]), handler))
return false;
if (!handler.end_function_scope(ops, i.length))
return false;
handler.rearm_current_block(block);
}
}
}
if (!handler.handle_terminator(block))
return false;
return true;
}
bool Compiler::traverse_all_reachable_opcodes(const SPIRFunction &func, OpcodeHandler &handler) const
{
for (auto block : func.blocks)
if (!traverse_all_reachable_opcodes(get<SPIRBlock>(block), handler))
return false;
return true;
}
uint32_t Compiler::type_struct_member_offset(const SPIRType &type, uint32_t index) const
{
auto *type_meta = ir.find_meta(type.self);
if (type_meta)
{
// Decoration must be set in valid SPIR-V, otherwise throw.
auto &dec = type_meta->members[index];
if (dec.decoration_flags.get(DecorationOffset))
return dec.offset;
else
SPIRV_CROSS_THROW("Struct member does not have Offset set.");
}
else
SPIRV_CROSS_THROW("Struct member does not have Offset set.");
}
uint32_t Compiler::type_struct_member_array_stride(const SPIRType &type, uint32_t index) const
{
auto *type_meta = ir.find_meta(type.member_types[index]);
if (type_meta)
{
// Decoration must be set in valid SPIR-V, otherwise throw.
// ArrayStride is part of the array type not OpMemberDecorate.
auto &dec = type_meta->decoration;
if (dec.decoration_flags.get(DecorationArrayStride))
return dec.array_stride;
else
SPIRV_CROSS_THROW("Struct member does not have ArrayStride set.");
}
else
SPIRV_CROSS_THROW("Struct member does not have ArrayStride set.");
}
uint32_t Compiler::type_struct_member_matrix_stride(const SPIRType &type, uint32_t index) const
{
auto *type_meta = ir.find_meta(type.self);
if (type_meta)
{
// Decoration must be set in valid SPIR-V, otherwise throw.
// MatrixStride is part of OpMemberDecorate.
auto &dec = type_meta->members[index];
if (dec.decoration_flags.get(DecorationMatrixStride))
return dec.matrix_stride;
else
SPIRV_CROSS_THROW("Struct member does not have MatrixStride set.");
}
else
SPIRV_CROSS_THROW("Struct member does not have MatrixStride set.");
}
size_t Compiler::get_declared_struct_size(const SPIRType &type) const
{
if (type.member_types.empty())
SPIRV_CROSS_THROW("Declared struct in block cannot be empty.");
// Offsets can be declared out of order, so we need to deduce the actual size
// based on last member instead.
uint32_t member_index = 0;
size_t highest_offset = 0;
for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++)
{
size_t offset = type_struct_member_offset(type, i);
if (offset > highest_offset)
{
highest_offset = offset;
member_index = i;
}
}
size_t size = get_declared_struct_member_size(type, member_index);
return highest_offset + size;
}
size_t Compiler::get_declared_struct_size_runtime_array(const SPIRType &type, size_t array_size) const
{
if (type.member_types.empty())
SPIRV_CROSS_THROW("Declared struct in block cannot be empty.");
size_t size = get_declared_struct_size(type);
auto &last_type = get<SPIRType>(type.member_types.back());
if (!last_type.array.empty() && last_type.array_size_literal[0] && last_type.array[0] == 0) // Runtime array
size += array_size * type_struct_member_array_stride(type, uint32_t(type.member_types.size() - 1));
return size;
}
uint32_t Compiler::evaluate_spec_constant_u32(const SPIRConstantOp &spec) const
{
auto &result_type = get<SPIRType>(spec.basetype);
if (result_type.basetype != SPIRType::UInt && result_type.basetype != SPIRType::Int &&
result_type.basetype != SPIRType::Boolean)
{
SPIRV_CROSS_THROW(
"Only 32-bit integers and booleans are currently supported when evaluating specialization constants.\n");
}
if (!is_scalar(result_type))
SPIRV_CROSS_THROW("Spec constant evaluation must be a scalar.\n");
uint32_t value = 0;
const auto eval_u32 = [&](uint32_t id) -> uint32_t {
auto &type = expression_type(id);
if (type.basetype != SPIRType::UInt && type.basetype != SPIRType::Int && type.basetype != SPIRType::Boolean)
{
SPIRV_CROSS_THROW("Only 32-bit integers and booleans are currently supported when evaluating "
"specialization constants.\n");
}
if (!is_scalar(type))
SPIRV_CROSS_THROW("Spec constant evaluation must be a scalar.\n");
if (const auto *c = this->maybe_get<SPIRConstant>(id))
return c->scalar();
else
return evaluate_spec_constant_u32(this->get<SPIRConstantOp>(id));
};
#define binary_spec_op(op, binary_op) \
case Op##op: \
value = eval_u32(spec.arguments[0]) binary_op eval_u32(spec.arguments[1]); \
break
#define binary_spec_op_cast(op, binary_op, type) \
case Op##op: \
value = uint32_t(type(eval_u32(spec.arguments[0])) binary_op type(eval_u32(spec.arguments[1]))); \
break
// Support the basic opcodes which are typically used when computing array sizes.
switch (spec.opcode)
{
binary_spec_op(IAdd, +);
binary_spec_op(ISub, -);
binary_spec_op(IMul, *);
binary_spec_op(BitwiseAnd, &);
binary_spec_op(BitwiseOr, |);
binary_spec_op(BitwiseXor, ^);
binary_spec_op(LogicalAnd, &);
binary_spec_op(LogicalOr, |);
binary_spec_op(ShiftLeftLogical, <<);
binary_spec_op(ShiftRightLogical, >>);
binary_spec_op_cast(ShiftRightArithmetic, >>, int32_t);
binary_spec_op(LogicalEqual, ==);
binary_spec_op(LogicalNotEqual, !=);
binary_spec_op(IEqual, ==);
binary_spec_op(INotEqual, !=);
binary_spec_op(ULessThan, <);
binary_spec_op(ULessThanEqual, <=);
binary_spec_op(UGreaterThan, >);
binary_spec_op(UGreaterThanEqual, >=);
binary_spec_op_cast(SLessThan, <, int32_t);
binary_spec_op_cast(SLessThanEqual, <=, int32_t);
binary_spec_op_cast(SGreaterThan, >, int32_t);
binary_spec_op_cast(SGreaterThanEqual, >=, int32_t);
#undef binary_spec_op
#undef binary_spec_op_cast
case OpLogicalNot:
value = uint32_t(!eval_u32(spec.arguments[0]));
break;
case OpNot:
value = ~eval_u32(spec.arguments[0]);
break;
case OpSNegate:
value = uint32_t(-int32_t(eval_u32(spec.arguments[0])));
break;
case OpSelect:
value = eval_u32(spec.arguments[0]) ? eval_u32(spec.arguments[1]) : eval_u32(spec.arguments[2]);
break;
case OpUMod:
{
uint32_t a = eval_u32(spec.arguments[0]);
uint32_t b = eval_u32(spec.arguments[1]);
if (b == 0)
SPIRV_CROSS_THROW("Undefined behavior in UMod, b == 0.\n");
value = a % b;
break;
}
case OpSRem:
{
auto a = int32_t(eval_u32(spec.arguments[0]));
auto b = int32_t(eval_u32(spec.arguments[1]));
if (b == 0)
SPIRV_CROSS_THROW("Undefined behavior in SRem, b == 0.\n");
value = a % b;
break;
}
case OpSMod:
{
auto a = int32_t(eval_u32(spec.arguments[0]));
auto b = int32_t(eval_u32(spec.arguments[1]));
if (b == 0)
SPIRV_CROSS_THROW("Undefined behavior in SMod, b == 0.\n");
auto v = a % b;
// Makes sure we match the sign of b, not a.
if ((b < 0 && v > 0) || (b > 0 && v < 0))
v += b;
value = v;
break;
}
case OpUDiv:
{
uint32_t a = eval_u32(spec.arguments[0]);
uint32_t b = eval_u32(spec.arguments[1]);
if (b == 0)
SPIRV_CROSS_THROW("Undefined behavior in UDiv, b == 0.\n");
value = a / b;
break;
}
case OpSDiv:
{
auto a = int32_t(eval_u32(spec.arguments[0]));
auto b = int32_t(eval_u32(spec.arguments[1]));
if (b == 0)
SPIRV_CROSS_THROW("Undefined behavior in SDiv, b == 0.\n");
value = a / b;
break;
}
default:
SPIRV_CROSS_THROW("Unsupported spec constant opcode for evaluation.\n");
}
return value;
}
uint32_t Compiler::evaluate_constant_u32(uint32_t id) const
{
if (const auto *c = maybe_get<SPIRConstant>(id))
return c->scalar();
else
return evaluate_spec_constant_u32(get<SPIRConstantOp>(id));
}
size_t Compiler::get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const
{
if (struct_type.member_types.empty())
SPIRV_CROSS_THROW("Declared struct in block cannot be empty.");
auto &flags = get_member_decoration_bitset(struct_type.self, index);
auto &type = get<SPIRType>(struct_type.member_types[index]);
switch (type.basetype)
{
case SPIRType::Unknown:
case SPIRType::Void:
case SPIRType::Boolean: // Bools are purely logical, and cannot be used for externally visible types.
case SPIRType::AtomicCounter:
case SPIRType::Image:
case SPIRType::SampledImage:
case SPIRType::Sampler:
SPIRV_CROSS_THROW("Querying size for object with opaque size.");
default:
break;
}
if (type.pointer && type.storage == StorageClassPhysicalStorageBuffer)
{
// Check if this is a top-level pointer type, and not an array of pointers.
if (type.pointer_depth > get<SPIRType>(type.parent_type).pointer_depth)
return 8;
}
if (!type.array.empty())
{
// For arrays, we can use ArrayStride to get an easy check.
bool array_size_literal = type.array_size_literal.back();
uint32_t array_size = array_size_literal ? type.array.back() : evaluate_constant_u32(type.array.back());
return type_struct_member_array_stride(struct_type, index) * array_size;
}
else if (type.basetype == SPIRType::Struct)
{
return get_declared_struct_size(type);
}
else
{
unsigned vecsize = type.vecsize;
unsigned columns = type.columns;
// Vectors.
if (columns == 1)
{
size_t component_size = type.width / 8;
return vecsize * component_size;
}
else
{
uint32_t matrix_stride = type_struct_member_matrix_stride(struct_type, index);
// Per SPIR-V spec, matrices must be tightly packed and aligned up for vec3 accesses.
if (flags.get(DecorationRowMajor))
return matrix_stride * vecsize;
else if (flags.get(DecorationColMajor))
return matrix_stride * columns;
else
SPIRV_CROSS_THROW("Either row-major or column-major must be declared for matrices.");
}
}
}
bool Compiler::BufferAccessHandler::handle(Op opcode, const uint32_t *args, uint32_t length)
{
if (opcode != OpAccessChain && opcode != OpInBoundsAccessChain && opcode != OpPtrAccessChain)
return true;
bool ptr_chain = (opcode == OpPtrAccessChain);
// Invalid SPIR-V.
if (length < (ptr_chain ? 5u : 4u))
return false;
if (args[2] != id)
return true;
// Don't bother traversing the entire access chain tree yet.
// If we access a struct member, assume we access the entire member.
uint32_t index = compiler.get<SPIRConstant>(args[ptr_chain ? 4 : 3]).scalar();
// Seen this index already.
if (seen.find(index) != end(seen))
return true;
seen.insert(index);
auto &type = compiler.expression_type(id);
uint32_t offset = compiler.type_struct_member_offset(type, index);
size_t range;
// If we have another member in the struct, deduce the range by looking at the next member.
// This is okay since structs in SPIR-V can have padding, but Offset decoration must be
// monotonically increasing.
// Of course, this doesn't take into account if the SPIR-V for some reason decided to add
// very large amounts of padding, but that's not really a big deal.
if (index + 1 < type.member_types.size())
{
range = compiler.type_struct_member_offset(type, index + 1) - offset;
}
else
{
// No padding, so just deduce it from the size of the member directly.
range = compiler.get_declared_struct_member_size(type, index);
}
ranges.push_back({ index, offset, range });
return true;
}
SmallVector<BufferRange> Compiler::get_active_buffer_ranges(VariableID id) const
{
SmallVector<BufferRange> ranges;
BufferAccessHandler handler(*this, ranges, id);
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), handler);
return ranges;
}
bool Compiler::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const
{
if (a.basetype != b.basetype)
return false;
if (a.width != b.width)
return false;
if (a.vecsize != b.vecsize)
return false;
if (a.columns != b.columns)
return false;
if (a.array.size() != b.array.size())
return false;
size_t array_count = a.array.size();
if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0)
return false;
if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage)
{
if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0)
return false;
}
if (a.member_types.size() != b.member_types.size())
return false;
size_t member_types = a.member_types.size();
for (size_t i = 0; i < member_types; i++)
{
if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i])))
return false;
}
return true;
}
const Bitset &Compiler::get_execution_mode_bitset() const
{
return get_entry_point().flags;
}
void Compiler::set_execution_mode(ExecutionMode mode, uint32_t arg0, uint32_t arg1, uint32_t arg2)
{
auto &execution = get_entry_point();
execution.flags.set(mode);
switch (mode)
{
case ExecutionModeLocalSize:
execution.workgroup_size.x = arg0;
execution.workgroup_size.y = arg1;
execution.workgroup_size.z = arg2;
break;
case ExecutionModeLocalSizeId:
execution.workgroup_size.id_x = arg0;
execution.workgroup_size.id_y = arg1;
execution.workgroup_size.id_z = arg2;
break;
case ExecutionModeInvocations:
execution.invocations = arg0;
break;
case ExecutionModeOutputVertices:
execution.output_vertices = arg0;
break;
case ExecutionModeOutputPrimitivesEXT:
execution.output_primitives = arg0;
break;
default:
break;
}
}
void Compiler::unset_execution_mode(ExecutionMode mode)
{
auto &execution = get_entry_point();
execution.flags.clear(mode);
}
uint32_t Compiler::get_work_group_size_specialization_constants(SpecializationConstant &x, SpecializationConstant &y,
SpecializationConstant &z) const
{
auto &execution = get_entry_point();
x = { 0, 0 };
y = { 0, 0 };
z = { 0, 0 };
// WorkgroupSize builtin takes precedence over LocalSize / LocalSizeId.
if (execution.workgroup_size.constant != 0)
{
auto &c = get<SPIRConstant>(execution.workgroup_size.constant);
if (c.m.c[0].id[0] != ID(0))
{
x.id = c.m.c[0].id[0];
x.constant_id = get_decoration(c.m.c[0].id[0], DecorationSpecId);
}
if (c.m.c[0].id[1] != ID(0))
{
y.id = c.m.c[0].id[1];
y.constant_id = get_decoration(c.m.c[0].id[1], DecorationSpecId);
}
if (c.m.c[0].id[2] != ID(0))
{
z.id = c.m.c[0].id[2];
z.constant_id = get_decoration(c.m.c[0].id[2], DecorationSpecId);
}
}
else if (execution.flags.get(ExecutionModeLocalSizeId))
{
auto &cx = get<SPIRConstant>(execution.workgroup_size.id_x);
if (cx.specialization)
{
x.id = execution.workgroup_size.id_x;
x.constant_id = get_decoration(execution.workgroup_size.id_x, DecorationSpecId);
}
auto &cy = get<SPIRConstant>(execution.workgroup_size.id_y);
if (cy.specialization)
{
y.id = execution.workgroup_size.id_y;
y.constant_id = get_decoration(execution.workgroup_size.id_y, DecorationSpecId);
}
auto &cz = get<SPIRConstant>(execution.workgroup_size.id_z);
if (cz.specialization)
{
z.id = execution.workgroup_size.id_z;
z.constant_id = get_decoration(execution.workgroup_size.id_z, DecorationSpecId);
}
}
return execution.workgroup_size.constant;
}
uint32_t Compiler::get_execution_mode_argument(spv::ExecutionMode mode, uint32_t index) const
{
auto &execution = get_entry_point();
switch (mode)
{
case ExecutionModeLocalSizeId:
if (execution.flags.get(ExecutionModeLocalSizeId))
{
switch (index)
{
case 0:
return execution.workgroup_size.id_x;
case 1:
return execution.workgroup_size.id_y;
case 2:
return execution.workgroup_size.id_z;
default:
return 0;
}
}
else
return 0;
case ExecutionModeLocalSize:
switch (index)
{
case 0:
if (execution.flags.get(ExecutionModeLocalSizeId) && execution.workgroup_size.id_x != 0)
return get<SPIRConstant>(execution.workgroup_size.id_x).scalar();
else
return execution.workgroup_size.x;
case 1:
if (execution.flags.get(ExecutionModeLocalSizeId) && execution.workgroup_size.id_y != 0)
return get<SPIRConstant>(execution.workgroup_size.id_y).scalar();
else
return execution.workgroup_size.y;
case 2:
if (execution.flags.get(ExecutionModeLocalSizeId) && execution.workgroup_size.id_z != 0)
return get<SPIRConstant>(execution.workgroup_size.id_z).scalar();
else
return execution.workgroup_size.z;
default:
return 0;
}
case ExecutionModeInvocations:
return execution.invocations;
case ExecutionModeOutputVertices:
return execution.output_vertices;
case ExecutionModeOutputPrimitivesEXT:
return execution.output_primitives;
default:
return 0;
}
}
ExecutionModel Compiler::get_execution_model() const
{
auto &execution = get_entry_point();
return execution.model;
}
bool Compiler::is_tessellation_shader(ExecutionModel model)
{
return model == ExecutionModelTessellationControl || model == ExecutionModelTessellationEvaluation;
}
bool Compiler::is_vertex_like_shader() const
{
auto model = get_execution_model();
return model == ExecutionModelVertex || model == ExecutionModelGeometry ||
model == ExecutionModelTessellationControl || model == ExecutionModelTessellationEvaluation;
}
bool Compiler::is_tessellation_shader() const
{
return is_tessellation_shader(get_execution_model());
}
bool Compiler::is_tessellating_triangles() const
{
return get_execution_mode_bitset().get(ExecutionModeTriangles);
}
void Compiler::set_remapped_variable_state(VariableID id, bool remap_enable)
{
get<SPIRVariable>(id).remapped_variable = remap_enable;
}
bool Compiler::get_remapped_variable_state(VariableID id) const
{
return get<SPIRVariable>(id).remapped_variable;
}
void Compiler::set_subpass_input_remapped_components(VariableID id, uint32_t components)
{
get<SPIRVariable>(id).remapped_components = components;
}
uint32_t Compiler::get_subpass_input_remapped_components(VariableID id) const
{
return get<SPIRVariable>(id).remapped_components;
}
void Compiler::add_implied_read_expression(SPIRExpression &e, uint32_t source)
{
auto itr = find(begin(e.implied_read_expressions), end(e.implied_read_expressions), ID(source));
if (itr == end(e.implied_read_expressions))
e.implied_read_expressions.push_back(source);
}
void Compiler::add_implied_read_expression(SPIRAccessChain &e, uint32_t source)
{
auto itr = find(begin(e.implied_read_expressions), end(e.implied_read_expressions), ID(source));
if (itr == end(e.implied_read_expressions))
e.implied_read_expressions.push_back(source);
}
void Compiler::add_active_interface_variable(uint32_t var_id)
{
active_interface_variables.insert(var_id);
// In SPIR-V 1.4 and up we must also track the interface variable in the entry point.
if (ir.get_spirv_version() >= 0x10400)
{
auto &vars = get_entry_point().interface_variables;
if (find(begin(vars), end(vars), VariableID(var_id)) == end(vars))
vars.push_back(var_id);
}
}
void Compiler::inherit_expression_dependencies(uint32_t dst, uint32_t source_expression)
{
// Don't inherit any expression dependencies if the expression in dst
// is not a forwarded temporary.
if (forwarded_temporaries.find(dst) == end(forwarded_temporaries) ||
forced_temporaries.find(dst) != end(forced_temporaries))
{
return;
}
auto &e = get<SPIRExpression>(dst);
auto *phi = maybe_get<SPIRVariable>(source_expression);
if (phi && phi->phi_variable)
{
// We have used a phi variable, which can change at the end of the block,
// so make sure we take a dependency on this phi variable.
phi->dependees.push_back(dst);
}
auto *s = maybe_get<SPIRExpression>(source_expression);
if (!s)
return;
auto &e_deps = e.expression_dependencies;
auto &s_deps = s->expression_dependencies;
// If we depend on a expression, we also depend on all sub-dependencies from source.
e_deps.push_back(source_expression);
e_deps.insert(end(e_deps), begin(s_deps), end(s_deps));
// Eliminate duplicated dependencies.
sort(begin(e_deps), end(e_deps));
e_deps.erase(unique(begin(e_deps), end(e_deps)), end(e_deps));
}
SmallVector<EntryPoint> Compiler::get_entry_points_and_stages() const
{
SmallVector<EntryPoint> entries;
for (auto &entry : ir.entry_points)
entries.push_back({ entry.second.orig_name, entry.second.model });
return entries;
}
void Compiler::rename_entry_point(const std::string &old_name, const std::string &new_name, spv::ExecutionModel model)
{
auto &entry = get_entry_point(old_name, model);
entry.orig_name = new_name;
entry.name = new_name;
}
void Compiler::set_entry_point(const std::string &name, spv::ExecutionModel model)
{
auto &entry = get_entry_point(name, model);
ir.default_entry_point = entry.self;
}
SPIREntryPoint &Compiler::get_first_entry_point(const std::string &name)
{
auto itr = find_if(
begin(ir.entry_points), end(ir.entry_points),
[&](const std::pair<uint32_t, SPIREntryPoint> &entry) -> bool { return entry.second.orig_name == name; });
if (itr == end(ir.entry_points))
SPIRV_CROSS_THROW("Entry point does not exist.");
return itr->second;
}
const SPIREntryPoint &Compiler::get_first_entry_point(const std::string &name) const
{
auto itr = find_if(
begin(ir.entry_points), end(ir.entry_points),
[&](const std::pair<uint32_t, SPIREntryPoint> &entry) -> bool { return entry.second.orig_name == name; });
if (itr == end(ir.entry_points))
SPIRV_CROSS_THROW("Entry point does not exist.");
return itr->second;
}
SPIREntryPoint &Compiler::get_entry_point(const std::string &name, ExecutionModel model)
{
auto itr = find_if(begin(ir.entry_points), end(ir.entry_points),
[&](const std::pair<uint32_t, SPIREntryPoint> &entry) -> bool {
return entry.second.orig_name == name && entry.second.model == model;
});
if (itr == end(ir.entry_points))
SPIRV_CROSS_THROW("Entry point does not exist.");
return itr->second;
}
const SPIREntryPoint &Compiler::get_entry_point(const std::string &name, ExecutionModel model) const
{
auto itr = find_if(begin(ir.entry_points), end(ir.entry_points),
[&](const std::pair<uint32_t, SPIREntryPoint> &entry) -> bool {
return entry.second.orig_name == name && entry.second.model == model;
});
if (itr == end(ir.entry_points))
SPIRV_CROSS_THROW("Entry point does not exist.");
return itr->second;
}
const string &Compiler::get_cleansed_entry_point_name(const std::string &name, ExecutionModel model) const
{
return get_entry_point(name, model).name;
}
const SPIREntryPoint &Compiler::get_entry_point() const
{
return ir.entry_points.find(ir.default_entry_point)->second;
}
SPIREntryPoint &Compiler::get_entry_point()
{
return ir.entry_points.find(ir.default_entry_point)->second;
}
bool Compiler::interface_variable_exists_in_entry_point(uint32_t id) const
{
auto &var = get<SPIRVariable>(id);
if (ir.get_spirv_version() < 0x10400)
{
if (var.storage != StorageClassInput && var.storage != StorageClassOutput &&
var.storage != StorageClassUniformConstant)
SPIRV_CROSS_THROW("Only Input, Output variables and Uniform constants are part of a shader linking interface.");
// This is to avoid potential problems with very old glslang versions which did
// not emit input/output interfaces properly.
// We can assume they only had a single entry point, and single entry point
// shaders could easily be assumed to use every interface variable anyways.
if (ir.entry_points.size() <= 1)
return true;
}
// In SPIR-V 1.4 and later, all global resource variables must be present.
auto &execution = get_entry_point();
return find(begin(execution.interface_variables), end(execution.interface_variables), VariableID(id)) !=
end(execution.interface_variables);
}
void Compiler::CombinedImageSamplerHandler::push_remap_parameters(const SPIRFunction &func, const uint32_t *args,
uint32_t length)
{
// If possible, pipe through a remapping table so that parameters know
// which variables they actually bind to in this scope.
unordered_map<uint32_t, uint32_t> remapping;
for (uint32_t i = 0; i < length; i++)
remapping[func.arguments[i].id] = remap_parameter(args[i]);
parameter_remapping.push(std::move(remapping));
}
void Compiler::CombinedImageSamplerHandler::pop_remap_parameters()
{
parameter_remapping.pop();
}
uint32_t Compiler::CombinedImageSamplerHandler::remap_parameter(uint32_t id)
{
auto *var = compiler.maybe_get_backing_variable(id);
if (var)
id = var->self;
if (parameter_remapping.empty())
return id;
auto &remapping = parameter_remapping.top();
auto itr = remapping.find(id);
if (itr != end(remapping))
return itr->second;
else
return id;
}
bool Compiler::CombinedImageSamplerHandler::begin_function_scope(const uint32_t *args, uint32_t length)
{
if (length < 3)
return false;
auto &callee = compiler.get<SPIRFunction>(args[2]);
args += 3;
length -= 3;
push_remap_parameters(callee, args, length);
functions.push(&callee);
return true;
}
bool Compiler::CombinedImageSamplerHandler::end_function_scope(const uint32_t *args, uint32_t length)
{
if (length < 3)
return false;
auto &callee = compiler.get<SPIRFunction>(args[2]);
args += 3;
// There are two types of cases we have to handle,
// a callee might call sampler2D(texture2D, sampler) directly where
// one or more parameters originate from parameters.
// Alternatively, we need to provide combined image samplers to our callees,
// and in this case we need to add those as well.
pop_remap_parameters();
// Our callee has now been processed at least once.
// No point in doing it again.
callee.do_combined_parameters = false;
auto &params = functions.top()->combined_parameters;
functions.pop();
if (functions.empty())
return true;
auto &caller = *functions.top();
if (caller.do_combined_parameters)
{
for (auto &param : params)
{
VariableID image_id = param.global_image ? param.image_id : VariableID(args[param.image_id]);
VariableID sampler_id = param.global_sampler ? param.sampler_id : VariableID(args[param.sampler_id]);
auto *i = compiler.maybe_get_backing_variable(image_id);
auto *s = compiler.maybe_get_backing_variable(sampler_id);
if (i)
image_id = i->self;
if (s)
sampler_id = s->self;
register_combined_image_sampler(caller, 0, image_id, sampler_id, param.depth);
}
}
return true;
}
void Compiler::CombinedImageSamplerHandler::register_combined_image_sampler(SPIRFunction &caller,
VariableID combined_module_id,
VariableID image_id, VariableID sampler_id,
bool depth)
{
// We now have a texture ID and a sampler ID which will either be found as a global
// or a parameter in our own function. If both are global, they will not need a parameter,
// otherwise, add it to our list.
SPIRFunction::CombinedImageSamplerParameter param = {
0u, image_id, sampler_id, true, true, depth,
};
auto texture_itr = find_if(begin(caller.arguments), end(caller.arguments),
[image_id](const SPIRFunction::Parameter &p) { return p.id == image_id; });
auto sampler_itr = find_if(begin(caller.arguments), end(caller.arguments),
[sampler_id](const SPIRFunction::Parameter &p) { return p.id == sampler_id; });
if (texture_itr != end(caller.arguments))
{
param.global_image = false;
param.image_id = uint32_t(texture_itr - begin(caller.arguments));
}
if (sampler_itr != end(caller.arguments))
{
param.global_sampler = false;
param.sampler_id = uint32_t(sampler_itr - begin(caller.arguments));
}
if (param.global_image && param.global_sampler)
return;
auto itr = find_if(begin(caller.combined_parameters), end(caller.combined_parameters),
[&param](const SPIRFunction::CombinedImageSamplerParameter &p) {
return param.image_id == p.image_id && param.sampler_id == p.sampler_id &&
param.global_image == p.global_image && param.global_sampler == p.global_sampler;
});
if (itr == end(caller.combined_parameters))
{
uint32_t id = compiler.ir.increase_bound_by(3);
auto type_id = id + 0;
auto ptr_type_id = id + 1;
auto combined_id = id + 2;
auto &base = compiler.expression_type(image_id);
auto &type = compiler.set<SPIRType>(type_id);
auto &ptr_type = compiler.set<SPIRType>(ptr_type_id);
type = base;
type.self = type_id;
type.basetype = SPIRType::SampledImage;
type.pointer = false;
type.storage = StorageClassGeneric;
type.image.depth = depth;
ptr_type = type;
ptr_type.pointer = true;
ptr_type.storage = StorageClassUniformConstant;
ptr_type.parent_type = type_id;
// Build new variable.
compiler.set<SPIRVariable>(combined_id, ptr_type_id, StorageClassFunction, 0);
// Inherit RelaxedPrecision.
// If any of OpSampledImage, underlying image or sampler are marked, inherit the decoration.
bool relaxed_precision =
compiler.has_decoration(sampler_id, DecorationRelaxedPrecision) ||
compiler.has_decoration(image_id, DecorationRelaxedPrecision) ||
(combined_module_id && compiler.has_decoration(combined_module_id, DecorationRelaxedPrecision));
if (relaxed_precision)
compiler.set_decoration(combined_id, DecorationRelaxedPrecision);
param.id = combined_id;
compiler.set_name(combined_id,
join("SPIRV_Cross_Combined", compiler.to_name(image_id), compiler.to_name(sampler_id)));
caller.combined_parameters.push_back(param);
caller.shadow_arguments.push_back({ ptr_type_id, combined_id, 0u, 0u, true });
}
}
bool Compiler::DummySamplerForCombinedImageHandler::handle(Op opcode, const uint32_t *args, uint32_t length)
{
if (need_dummy_sampler)
{
// No need to traverse further, we know the result.
return false;
}
switch (opcode)
{
case OpLoad:
{
if (length < 3)
return false;
uint32_t result_type = args[0];
auto &type = compiler.get<SPIRType>(result_type);
bool separate_image =
type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer;
// If not separate image, don't bother.
if (!separate_image)
return true;
uint32_t id = args[1];
uint32_t ptr = args[2];
compiler.set<SPIRExpression>(id, "", result_type, true);
compiler.register_read(id, ptr, true);
break;
}
case OpImageFetch:
case OpImageQuerySizeLod:
case OpImageQuerySize:
case OpImageQueryLevels:
case OpImageQuerySamples:
{
// If we are fetching or querying LOD from a plain OpTypeImage, we must pre-combine with our dummy sampler.
auto *var = compiler.maybe_get_backing_variable(args[2]);
if (var)
{
auto &type = compiler.get<SPIRType>(var->basetype);
if (type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer)
need_dummy_sampler = true;
}
break;
}
case OpInBoundsAccessChain:
case OpAccessChain:
case OpPtrAccessChain:
{
if (length < 3)
return false;
uint32_t result_type = args[0];
auto &type = compiler.get<SPIRType>(result_type);
bool separate_image =
type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer;
if (!separate_image)
return true;
uint32_t id = args[1];
uint32_t ptr = args[2];
compiler.set<SPIRExpression>(id, "", result_type, true);
compiler.register_read(id, ptr, true);
// Other backends might use SPIRAccessChain for this later.
compiler.ir.ids[id].set_allow_type_rewrite();
break;
}
default:
break;
}
return true;
}
bool Compiler::CombinedImageSamplerHandler::handle(Op opcode, const uint32_t *args, uint32_t length)
{
// We need to figure out where samplers and images are loaded from, so do only the bare bones compilation we need.
bool is_fetch = false;
switch (opcode)
{
case OpLoad:
{
if (length < 3)
return false;