blob: f4ceab6aef56688f0f5df70f4e7e04ae0e7c0953 [file] [log] [blame] [edit]
/*
* Copyright 2015-2021 Arm Limited
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* At your option, you may choose to accept this material under either:
* 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
* 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
*/
#include "spirv_glsl.hpp"
#include "GLSL.std.450.h"
#include "spirv_common.hpp"
#include <algorithm>
#include <assert.h>
#include <cmath>
#include <limits>
#include <locale.h>
#include <utility>
#ifndef _WIN32
#include <langinfo.h>
#endif
#include <locale.h>
using namespace spv;
using namespace SPIRV_CROSS_NAMESPACE;
using namespace std;
enum ExtraSubExpressionType
{
// Create masks above any legal ID range to allow multiple address spaces into the extra_sub_expressions map.
EXTRA_SUB_EXPRESSION_TYPE_STREAM_OFFSET = 0x10000000,
EXTRA_SUB_EXPRESSION_TYPE_AUX = 0x20000000
};
static bool is_unsigned_opcode(Op op)
{
// Don't have to be exhaustive, only relevant for legacy target checking ...
switch (op)
{
case OpShiftRightLogical:
case OpUGreaterThan:
case OpUGreaterThanEqual:
case OpULessThan:
case OpULessThanEqual:
case OpUConvert:
case OpUDiv:
case OpUMod:
case OpUMulExtended:
case OpConvertUToF:
case OpConvertFToU:
return true;
default:
return false;
}
}
static bool is_unsigned_glsl_opcode(GLSLstd450 op)
{
// Don't have to be exhaustive, only relevant for legacy target checking ...
switch (op)
{
case GLSLstd450UClamp:
case GLSLstd450UMin:
case GLSLstd450UMax:
case GLSLstd450FindUMsb:
return true;
default:
return false;
}
}
static bool packing_is_vec4_padded(BufferPackingStandard packing)
{
switch (packing)
{
case BufferPackingHLSLCbuffer:
case BufferPackingHLSLCbufferPackOffset:
case BufferPackingStd140:
case BufferPackingStd140EnhancedLayout:
return true;
default:
return false;
}
}
static bool packing_is_hlsl(BufferPackingStandard packing)
{
switch (packing)
{
case BufferPackingHLSLCbuffer:
case BufferPackingHLSLCbufferPackOffset:
return true;
default:
return false;
}
}
static bool packing_has_flexible_offset(BufferPackingStandard packing)
{
switch (packing)
{
case BufferPackingStd140:
case BufferPackingStd430:
case BufferPackingScalar:
case BufferPackingHLSLCbuffer:
return false;
default:
return true;
}
}
static bool packing_is_scalar(BufferPackingStandard packing)
{
switch (packing)
{
case BufferPackingScalar:
case BufferPackingScalarEnhancedLayout:
return true;
default:
return false;
}
}
static BufferPackingStandard packing_to_substruct_packing(BufferPackingStandard packing)
{
switch (packing)
{
case BufferPackingStd140EnhancedLayout:
return BufferPackingStd140;
case BufferPackingStd430EnhancedLayout:
return BufferPackingStd430;
case BufferPackingHLSLCbufferPackOffset:
return BufferPackingHLSLCbuffer;
case BufferPackingScalarEnhancedLayout:
return BufferPackingScalar;
default:
return packing;
}
}
void CompilerGLSL::init()
{
if (ir.source.known)
{
options.es = ir.source.es;
options.version = ir.source.version;
}
// Query the locale to see what the decimal point is.
// We'll rely on fixing it up ourselves in the rare case we have a comma-as-decimal locale
// rather than setting locales ourselves. Settings locales in a safe and isolated way is rather
// tricky.
#ifdef _WIN32
// On Windows, localeconv uses thread-local storage, so it should be fine.
const struct lconv *conv = localeconv();
if (conv && conv->decimal_point)
current_locale_radix_character = *conv->decimal_point;
#elif defined(__ANDROID__) && __ANDROID_API__ < 26
// nl_langinfo is not supported on this platform, fall back to the worse alternative.
const struct lconv *conv = localeconv();
if (conv && conv->decimal_point)
current_locale_radix_character = *conv->decimal_point;
#else
// localeconv, the portable function is not MT safe ...
const char *decimal_point = nl_langinfo(RADIXCHAR);
if (decimal_point && *decimal_point != '\0')
current_locale_radix_character = *decimal_point;
#endif
}
static const char *to_pls_layout(PlsFormat format)
{
switch (format)
{
case PlsR11FG11FB10F:
return "layout(r11f_g11f_b10f) ";
case PlsR32F:
return "layout(r32f) ";
case PlsRG16F:
return "layout(rg16f) ";
case PlsRGB10A2:
return "layout(rgb10_a2) ";
case PlsRGBA8:
return "layout(rgba8) ";
case PlsRG16:
return "layout(rg16) ";
case PlsRGBA8I:
return "layout(rgba8i)";
case PlsRG16I:
return "layout(rg16i) ";
case PlsRGB10A2UI:
return "layout(rgb10_a2ui) ";
case PlsRGBA8UI:
return "layout(rgba8ui) ";
case PlsRG16UI:
return "layout(rg16ui) ";
case PlsR32UI:
return "layout(r32ui) ";
default:
return "";
}
}
static SPIRType::BaseType pls_format_to_basetype(PlsFormat format)
{
switch (format)
{
default:
case PlsR11FG11FB10F:
case PlsR32F:
case PlsRG16F:
case PlsRGB10A2:
case PlsRGBA8:
case PlsRG16:
return SPIRType::Float;
case PlsRGBA8I:
case PlsRG16I:
return SPIRType::Int;
case PlsRGB10A2UI:
case PlsRGBA8UI:
case PlsRG16UI:
case PlsR32UI:
return SPIRType::UInt;
}
}
static uint32_t pls_format_to_components(PlsFormat format)
{
switch (format)
{
default:
case PlsR32F:
case PlsR32UI:
return 1;
case PlsRG16F:
case PlsRG16:
case PlsRG16UI:
case PlsRG16I:
return 2;
case PlsR11FG11FB10F:
return 3;
case PlsRGB10A2:
case PlsRGBA8:
case PlsRGBA8I:
case PlsRGB10A2UI:
case PlsRGBA8UI:
return 4;
}
}
const char *CompilerGLSL::vector_swizzle(int vecsize, int index)
{
static const char *const swizzle[4][4] = {
{ ".x", ".y", ".z", ".w" },
{ ".xy", ".yz", ".zw", nullptr },
{ ".xyz", ".yzw", nullptr, nullptr },
#if defined(__GNUC__) && (__GNUC__ == 9)
// This works around a GCC 9 bug, see details in https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90947.
// This array ends up being compiled as all nullptrs, tripping the assertions below.
{ "", nullptr, nullptr, "$" },
#else
{ "", nullptr, nullptr, nullptr },
#endif
};
assert(vecsize >= 1 && vecsize <= 4);
assert(index >= 0 && index < 4);
assert(swizzle[vecsize - 1][index]);
return swizzle[vecsize - 1][index];
}
void CompilerGLSL::reset(uint32_t iteration_count)
{
// Sanity check the iteration count to be robust against a certain class of bugs where
// we keep forcing recompilations without making clear forward progress.
// In buggy situations we will loop forever, or loop for an unbounded number of iterations.
// Certain types of recompilations are considered to make forward progress,
// but in almost all situations, we'll never see more than 3 iterations.
// It is highly context-sensitive when we need to force recompilation,
// and it is not practical with the current architecture
// to resolve everything up front.
if (iteration_count >= options.force_recompile_max_debug_iterations && !is_force_recompile_forward_progress)
SPIRV_CROSS_THROW("Maximum compilation loops detected and no forward progress was made. Must be a SPIRV-Cross bug!");
// We do some speculative optimizations which should pretty much always work out,
// but just in case the SPIR-V is rather weird, recompile until it's happy.
// This typically only means one extra pass.
clear_force_recompile();
// Clear invalid expression tracking.
invalid_expressions.clear();
composite_insert_overwritten.clear();
current_function = nullptr;
// Clear temporary usage tracking.
expression_usage_counts.clear();
forwarded_temporaries.clear();
suppressed_usage_tracking.clear();
// Ensure that we declare phi-variable copies even if the original declaration isn't deferred
flushed_phi_variables.clear();
current_emitting_switch_stack.clear();
reset_name_caches();
ir.for_each_typed_id<SPIRFunction>([&](uint32_t, SPIRFunction &func) {
func.active = false;
func.flush_undeclared = true;
});
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) { var.dependees.clear(); });
ir.reset_all_of_type<SPIRExpression>();
ir.reset_all_of_type<SPIRAccessChain>();
statement_count = 0;
indent = 0;
current_loop_level = 0;
}
void CompilerGLSL::remap_pls_variables()
{
for (auto &input : pls_inputs)
{
auto &var = get<SPIRVariable>(input.id);
bool input_is_target = false;
if (var.storage == StorageClassUniformConstant)
{
auto &type = get<SPIRType>(var.basetype);
input_is_target = type.image.dim == DimSubpassData;
}
if (var.storage != StorageClassInput && !input_is_target)
SPIRV_CROSS_THROW("Can only use in and target variables for PLS inputs.");
var.remapped_variable = true;
}
for (auto &output : pls_outputs)
{
auto &var = get<SPIRVariable>(output.id);
if (var.storage != StorageClassOutput)
SPIRV_CROSS_THROW("Can only use out variables for PLS outputs.");
var.remapped_variable = true;
}
}
void CompilerGLSL::remap_ext_framebuffer_fetch(uint32_t input_attachment_index, uint32_t color_location, bool coherent)
{
subpass_to_framebuffer_fetch_attachment.push_back({ input_attachment_index, color_location });
inout_color_attachments.push_back({ color_location, coherent });
}
bool CompilerGLSL::location_is_framebuffer_fetch(uint32_t location) const
{
return std::find_if(begin(inout_color_attachments), end(inout_color_attachments),
[&](const std::pair<uint32_t, bool> &elem) {
return elem.first == location;
}) != end(inout_color_attachments);
}
bool CompilerGLSL::location_is_non_coherent_framebuffer_fetch(uint32_t location) const
{
return std::find_if(begin(inout_color_attachments), end(inout_color_attachments),
[&](const std::pair<uint32_t, bool> &elem) {
return elem.first == location && !elem.second;
}) != end(inout_color_attachments);
}
void CompilerGLSL::find_static_extensions()
{
ir.for_each_typed_id<SPIRType>([&](uint32_t, const SPIRType &type) {
if (type.basetype == SPIRType::Double)
{
if (options.es)
SPIRV_CROSS_THROW("FP64 not supported in ES profile.");
if (!options.es && options.version < 400)
require_extension_internal("GL_ARB_gpu_shader_fp64");
}
else if (type.basetype == SPIRType::Int64 || type.basetype == SPIRType::UInt64)
{
if (options.es && options.version < 310) // GL_NV_gpu_shader5 fallback requires 310.
SPIRV_CROSS_THROW("64-bit integers not supported in ES profile before version 310.");
require_extension_internal("GL_ARB_gpu_shader_int64");
}
else if (type.basetype == SPIRType::Half)
{
require_extension_internal("GL_EXT_shader_explicit_arithmetic_types_float16");
if (options.vulkan_semantics)
require_extension_internal("GL_EXT_shader_16bit_storage");
}
else if (type.basetype == SPIRType::SByte || type.basetype == SPIRType::UByte)
{
require_extension_internal("GL_EXT_shader_explicit_arithmetic_types_int8");
if (options.vulkan_semantics)
require_extension_internal("GL_EXT_shader_8bit_storage");
}
else if (type.basetype == SPIRType::Short || type.basetype == SPIRType::UShort)
{
require_extension_internal("GL_EXT_shader_explicit_arithmetic_types_int16");
if (options.vulkan_semantics)
require_extension_internal("GL_EXT_shader_16bit_storage");
}
});
auto &execution = get_entry_point();
switch (execution.model)
{
case ExecutionModelGLCompute:
if (!options.es && options.version < 430)
require_extension_internal("GL_ARB_compute_shader");
if (options.es && options.version < 310)
SPIRV_CROSS_THROW("At least ESSL 3.10 required for compute shaders.");
break;
case ExecutionModelGeometry:
if (options.es && options.version < 320)
require_extension_internal("GL_EXT_geometry_shader");
if (!options.es && options.version < 150)
require_extension_internal("GL_ARB_geometry_shader4");
if (execution.flags.get(ExecutionModeInvocations) && execution.invocations != 1)
{
// Instanced GS is part of 400 core or this extension.
if (!options.es && options.version < 400)
require_extension_internal("GL_ARB_gpu_shader5");
}
break;
case ExecutionModelTessellationEvaluation:
case ExecutionModelTessellationControl:
if (options.es && options.version < 320)
require_extension_internal("GL_EXT_tessellation_shader");
if (!options.es && options.version < 400)
require_extension_internal("GL_ARB_tessellation_shader");
break;
case ExecutionModelRayGenerationKHR:
case ExecutionModelIntersectionKHR:
case ExecutionModelAnyHitKHR:
case ExecutionModelClosestHitKHR:
case ExecutionModelMissKHR:
case ExecutionModelCallableKHR:
// NV enums are aliases.
if (options.es || options.version < 460)
SPIRV_CROSS_THROW("Ray tracing shaders require non-es profile with version 460 or above.");
if (!options.vulkan_semantics)
SPIRV_CROSS_THROW("Ray tracing requires Vulkan semantics.");
// Need to figure out if we should target KHR or NV extension based on capabilities.
for (auto &cap : ir.declared_capabilities)
{
if (cap == CapabilityRayTracingKHR || cap == CapabilityRayQueryKHR ||
cap == CapabilityRayTraversalPrimitiveCullingKHR)
{
ray_tracing_is_khr = true;
break;
}
}
if (ray_tracing_is_khr)
{
// In KHR ray tracing we pass payloads by pointer instead of location,
// so make sure we assign locations properly.
ray_tracing_khr_fixup_locations();
require_extension_internal("GL_EXT_ray_tracing");
}
else
require_extension_internal("GL_NV_ray_tracing");
break;
case ExecutionModelMeshEXT:
case ExecutionModelTaskEXT:
if (options.es || options.version < 450)
SPIRV_CROSS_THROW("Mesh shaders require GLSL 450 or above.");
if (!options.vulkan_semantics)
SPIRV_CROSS_THROW("Mesh shaders require Vulkan semantics.");
require_extension_internal("GL_EXT_mesh_shader");
break;
default:
break;
}
if (!pls_inputs.empty() || !pls_outputs.empty())
{
if (execution.model != ExecutionModelFragment)
SPIRV_CROSS_THROW("Can only use GL_EXT_shader_pixel_local_storage in fragment shaders.");
require_extension_internal("GL_EXT_shader_pixel_local_storage");
}
if (!inout_color_attachments.empty())
{
if (execution.model != ExecutionModelFragment)
SPIRV_CROSS_THROW("Can only use GL_EXT_shader_framebuffer_fetch in fragment shaders.");
if (options.vulkan_semantics)
SPIRV_CROSS_THROW("Cannot use EXT_shader_framebuffer_fetch in Vulkan GLSL.");
bool has_coherent = false;
bool has_incoherent = false;
for (auto &att : inout_color_attachments)
{
if (att.second)
has_coherent = true;
else
has_incoherent = true;
}
if (has_coherent)
require_extension_internal("GL_EXT_shader_framebuffer_fetch");
if (has_incoherent)
require_extension_internal("GL_EXT_shader_framebuffer_fetch_non_coherent");
}
if (options.separate_shader_objects && !options.es && options.version < 410)
require_extension_internal("GL_ARB_separate_shader_objects");
if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT)
{
if (!options.vulkan_semantics)
SPIRV_CROSS_THROW("GL_EXT_buffer_reference is only supported in Vulkan GLSL.");
if (options.es && options.version < 320)
SPIRV_CROSS_THROW("GL_EXT_buffer_reference requires ESSL 320.");
else if (!options.es && options.version < 450)
SPIRV_CROSS_THROW("GL_EXT_buffer_reference requires GLSL 450.");
require_extension_internal("GL_EXT_buffer_reference");
}
else if (ir.addressing_model != AddressingModelLogical)
{
SPIRV_CROSS_THROW("Only Logical and PhysicalStorageBuffer64EXT addressing models are supported.");
}
// Check for nonuniform qualifier and passthrough.
// Instead of looping over all decorations to find this, just look at capabilities.
for (auto &cap : ir.declared_capabilities)
{
switch (cap)
{
case CapabilityShaderNonUniformEXT:
if (!options.vulkan_semantics)
require_extension_internal("GL_NV_gpu_shader5");
else
require_extension_internal("GL_EXT_nonuniform_qualifier");
break;
case CapabilityRuntimeDescriptorArrayEXT:
if (!options.vulkan_semantics)
SPIRV_CROSS_THROW("GL_EXT_nonuniform_qualifier is only supported in Vulkan GLSL.");
require_extension_internal("GL_EXT_nonuniform_qualifier");
break;
case CapabilityGeometryShaderPassthroughNV:
if (execution.model == ExecutionModelGeometry)
{
require_extension_internal("GL_NV_geometry_shader_passthrough");
execution.geometry_passthrough = true;
}
break;
case CapabilityVariablePointers:
case CapabilityVariablePointersStorageBuffer:
SPIRV_CROSS_THROW("VariablePointers capability is not supported in GLSL.");
case CapabilityMultiView:
if (options.vulkan_semantics)
require_extension_internal("GL_EXT_multiview");
else
{
require_extension_internal("GL_OVR_multiview2");
if (options.ovr_multiview_view_count == 0)
SPIRV_CROSS_THROW("ovr_multiview_view_count must be non-zero when using GL_OVR_multiview2.");
if (get_execution_model() != ExecutionModelVertex)
SPIRV_CROSS_THROW("OVR_multiview2 can only be used with Vertex shaders.");
}
break;
case CapabilityRayQueryKHR:
if (options.es || options.version < 460 || !options.vulkan_semantics)
SPIRV_CROSS_THROW("RayQuery requires Vulkan GLSL 460.");
require_extension_internal("GL_EXT_ray_query");
ray_tracing_is_khr = true;
break;
case CapabilityRayTraversalPrimitiveCullingKHR:
if (options.es || options.version < 460 || !options.vulkan_semantics)
SPIRV_CROSS_THROW("RayQuery requires Vulkan GLSL 460.");
require_extension_internal("GL_EXT_ray_flags_primitive_culling");
ray_tracing_is_khr = true;
break;
default:
break;
}
}
if (options.ovr_multiview_view_count)
{
if (options.vulkan_semantics)
SPIRV_CROSS_THROW("OVR_multiview2 cannot be used with Vulkan semantics.");
if (get_execution_model() != ExecutionModelVertex)
SPIRV_CROSS_THROW("OVR_multiview2 can only be used with Vertex shaders.");
require_extension_internal("GL_OVR_multiview2");
}
// KHR one is likely to get promoted at some point, so if we don't see an explicit SPIR-V extension, assume KHR.
for (auto &ext : ir.declared_extensions)
if (ext == "SPV_NV_fragment_shader_barycentric")
barycentric_is_nv = true;
}
void CompilerGLSL::require_polyfill(Polyfill polyfill, bool relaxed)
{
uint32_t &polyfills = (relaxed && options.es) ? required_polyfills_relaxed : required_polyfills;
if ((polyfills & polyfill) == 0)
{
polyfills |= polyfill;
force_recompile();
}
}
void CompilerGLSL::ray_tracing_khr_fixup_locations()
{
uint32_t location = 0;
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
// Incoming payload storage can also be used for tracing.
if (var.storage != StorageClassRayPayloadKHR && var.storage != StorageClassCallableDataKHR &&
var.storage != StorageClassIncomingRayPayloadKHR && var.storage != StorageClassIncomingCallableDataKHR)
return;
if (is_hidden_variable(var))
return;
set_decoration(var.self, DecorationLocation, location++);
});
}
string CompilerGLSL::compile()
{
ir.fixup_reserved_names();
if (!options.vulkan_semantics)
{
// only NV_gpu_shader5 supports divergent indexing on OpenGL, and it does so without extra qualifiers
backend.nonuniform_qualifier = "";
backend.needs_row_major_load_workaround = options.enable_row_major_load_workaround;
}
backend.allow_precision_qualifiers = options.vulkan_semantics || options.es;
backend.force_gl_in_out_block = true;
backend.supports_extensions = true;
backend.use_array_constructor = true;
backend.workgroup_size_is_hidden = true;
backend.requires_relaxed_precision_analysis = options.es || options.vulkan_semantics;
backend.support_precise_qualifier =
(!options.es && options.version >= 400) || (options.es && options.version >= 320);
if (is_legacy_es())
backend.support_case_fallthrough = false;
// Scan the SPIR-V to find trivial uses of extensions.
fixup_anonymous_struct_names();
fixup_type_alias();
reorder_type_alias();
build_function_control_flow_graphs_and_analyze();
find_static_extensions();
fixup_image_load_store_access();
update_active_builtins();
analyze_image_and_sampler_usage();
analyze_interlocked_resource_usage();
if (!inout_color_attachments.empty())
emit_inout_fragment_outputs_copy_to_subpass_inputs();
// Shaders might cast unrelated data to pointers of non-block types.
// Find all such instances and make sure we can cast the pointers to a synthesized block type.
if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT)
analyze_non_block_pointer_types();
uint32_t pass_count = 0;
do
{
reset(pass_count);
buffer.reset();
emit_header();
emit_resources();
emit_extension_workarounds(get_execution_model());
if (required_polyfills != 0)
emit_polyfills(required_polyfills, false);
if (options.es && required_polyfills_relaxed != 0)
emit_polyfills(required_polyfills_relaxed, true);
emit_function(get<SPIRFunction>(ir.default_entry_point), Bitset());
pass_count++;
} while (is_forcing_recompilation());
// Implement the interlocked wrapper function at the end.
// The body was implemented in lieu of main().
if (interlocked_is_complex)
{
statement("void main()");
begin_scope();
statement("// Interlocks were used in a way not compatible with GLSL, this is very slow.");
statement("SPIRV_Cross_beginInvocationInterlock();");
statement("spvMainInterlockedBody();");
statement("SPIRV_Cross_endInvocationInterlock();");
end_scope();
}
// Entry point in GLSL is always main().
get_entry_point().name = "main";
return buffer.str();
}
std::string CompilerGLSL::get_partial_source()
{
return buffer.str();
}
void CompilerGLSL::build_workgroup_size(SmallVector<string> &arguments, const SpecializationConstant &wg_x,
const SpecializationConstant &wg_y, const SpecializationConstant &wg_z)
{
auto &execution = get_entry_point();
bool builtin_workgroup = execution.workgroup_size.constant != 0;
bool use_local_size_id = !builtin_workgroup && execution.flags.get(ExecutionModeLocalSizeId);
if (wg_x.id)
{
if (options.vulkan_semantics)
arguments.push_back(join("local_size_x_id = ", wg_x.constant_id));
else
arguments.push_back(join("local_size_x = ", get<SPIRConstant>(wg_x.id).specialization_constant_macro_name));
}
else if (use_local_size_id && execution.workgroup_size.id_x)
arguments.push_back(join("local_size_x = ", get<SPIRConstant>(execution.workgroup_size.id_x).scalar()));
else
arguments.push_back(join("local_size_x = ", execution.workgroup_size.x));
if (wg_y.id)
{
if (options.vulkan_semantics)
arguments.push_back(join("local_size_y_id = ", wg_y.constant_id));
else
arguments.push_back(join("local_size_y = ", get<SPIRConstant>(wg_y.id).specialization_constant_macro_name));
}
else if (use_local_size_id && execution.workgroup_size.id_y)
arguments.push_back(join("local_size_y = ", get<SPIRConstant>(execution.workgroup_size.id_y).scalar()));
else
arguments.push_back(join("local_size_y = ", execution.workgroup_size.y));
if (wg_z.id)
{
if (options.vulkan_semantics)
arguments.push_back(join("local_size_z_id = ", wg_z.constant_id));
else
arguments.push_back(join("local_size_z = ", get<SPIRConstant>(wg_z.id).specialization_constant_macro_name));
}
else if (use_local_size_id && execution.workgroup_size.id_z)
arguments.push_back(join("local_size_z = ", get<SPIRConstant>(execution.workgroup_size.id_z).scalar()));
else
arguments.push_back(join("local_size_z = ", execution.workgroup_size.z));
}
void CompilerGLSL::request_subgroup_feature(ShaderSubgroupSupportHelper::Feature feature)
{
if (options.vulkan_semantics)
{
auto khr_extension = ShaderSubgroupSupportHelper::get_KHR_extension_for_feature(feature);
require_extension_internal(ShaderSubgroupSupportHelper::get_extension_name(khr_extension));
}
else
{
if (!shader_subgroup_supporter.is_feature_requested(feature))
force_recompile();
shader_subgroup_supporter.request_feature(feature);
}
}
void CompilerGLSL::emit_header()
{
auto &execution = get_entry_point();
statement("#version ", options.version, options.es && options.version > 100 ? " es" : "");
if (!options.es && options.version < 420)
{
// Needed for binding = # on UBOs, etc.
if (options.enable_420pack_extension)
{
statement("#ifdef GL_ARB_shading_language_420pack");
statement("#extension GL_ARB_shading_language_420pack : require");
statement("#endif");
}
// Needed for: layout(early_fragment_tests) in;
if (execution.flags.get(ExecutionModeEarlyFragmentTests))
require_extension_internal("GL_ARB_shader_image_load_store");
}
// Needed for: layout(post_depth_coverage) in;
if (execution.flags.get(ExecutionModePostDepthCoverage))
require_extension_internal("GL_ARB_post_depth_coverage");
// Needed for: layout({pixel,sample}_interlock_[un]ordered) in;
bool interlock_used = execution.flags.get(ExecutionModePixelInterlockOrderedEXT) ||
execution.flags.get(ExecutionModePixelInterlockUnorderedEXT) ||
execution.flags.get(ExecutionModeSampleInterlockOrderedEXT) ||
execution.flags.get(ExecutionModeSampleInterlockUnorderedEXT);
if (interlock_used)
{
if (options.es)
{
if (options.version < 310)
SPIRV_CROSS_THROW("At least ESSL 3.10 required for fragment shader interlock.");
require_extension_internal("GL_NV_fragment_shader_interlock");
}
else
{
if (options.version < 420)
require_extension_internal("GL_ARB_shader_image_load_store");
require_extension_internal("GL_ARB_fragment_shader_interlock");
}
}
for (auto &ext : forced_extensions)
{
if (ext == "GL_ARB_gpu_shader_int64")
{
statement("#if defined(GL_ARB_gpu_shader_int64)");
statement("#extension GL_ARB_gpu_shader_int64 : require");
if (!options.vulkan_semantics || options.es)
{
statement("#elif defined(GL_NV_gpu_shader5)");
statement("#extension GL_NV_gpu_shader5 : require");
}
statement("#else");
statement("#error No extension available for 64-bit integers.");
statement("#endif");
}
else if (ext == "GL_EXT_shader_explicit_arithmetic_types_float16")
{
// Special case, this extension has a potential fallback to another vendor extension in normal GLSL.
// GL_AMD_gpu_shader_half_float is a superset, so try that first.
statement("#if defined(GL_AMD_gpu_shader_half_float)");
statement("#extension GL_AMD_gpu_shader_half_float : require");
if (!options.vulkan_semantics)
{
statement("#elif defined(GL_NV_gpu_shader5)");
statement("#extension GL_NV_gpu_shader5 : require");
}
else
{
statement("#elif defined(GL_EXT_shader_explicit_arithmetic_types_float16)");
statement("#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require");
}
statement("#else");
statement("#error No extension available for FP16.");
statement("#endif");
}
else if (ext == "GL_EXT_shader_explicit_arithmetic_types_int8")
{
if (options.vulkan_semantics)
statement("#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require");
else
{
statement("#if defined(GL_EXT_shader_explicit_arithmetic_types_int8)");
statement("#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require");
statement("#elif defined(GL_NV_gpu_shader5)");
statement("#extension GL_NV_gpu_shader5 : require");
statement("#else");
statement("#error No extension available for Int8.");
statement("#endif");
}
}
else if (ext == "GL_EXT_shader_explicit_arithmetic_types_int16")
{
if (options.vulkan_semantics)
statement("#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require");
else
{
statement("#if defined(GL_EXT_shader_explicit_arithmetic_types_int16)");
statement("#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require");
statement("#elif defined(GL_AMD_gpu_shader_int16)");
statement("#extension GL_AMD_gpu_shader_int16 : require");
statement("#elif defined(GL_NV_gpu_shader5)");
statement("#extension GL_NV_gpu_shader5 : require");
statement("#else");
statement("#error No extension available for Int16.");
statement("#endif");
}
}
else if (ext == "GL_ARB_post_depth_coverage")
{
if (options.es)
statement("#extension GL_EXT_post_depth_coverage : require");
else
{
statement("#if defined(GL_ARB_post_depth_coverge)");
statement("#extension GL_ARB_post_depth_coverage : require");
statement("#else");
statement("#extension GL_EXT_post_depth_coverage : require");
statement("#endif");
}
}
else if (!options.vulkan_semantics && ext == "GL_ARB_shader_draw_parameters")
{
// Soft-enable this extension on plain GLSL.
statement("#ifdef ", ext);
statement("#extension ", ext, " : enable");
statement("#endif");
}
else if (ext == "GL_EXT_control_flow_attributes")
{
// These are just hints so we can conditionally enable and fallback in the shader.
statement("#if defined(GL_EXT_control_flow_attributes)");
statement("#extension GL_EXT_control_flow_attributes : require");
statement("#define SPIRV_CROSS_FLATTEN [[flatten]]");
statement("#define SPIRV_CROSS_BRANCH [[dont_flatten]]");
statement("#define SPIRV_CROSS_UNROLL [[unroll]]");
statement("#define SPIRV_CROSS_LOOP [[dont_unroll]]");
statement("#else");
statement("#define SPIRV_CROSS_FLATTEN");
statement("#define SPIRV_CROSS_BRANCH");
statement("#define SPIRV_CROSS_UNROLL");
statement("#define SPIRV_CROSS_LOOP");
statement("#endif");
}
else if (ext == "GL_NV_fragment_shader_interlock")
{
statement("#extension GL_NV_fragment_shader_interlock : require");
statement("#define SPIRV_Cross_beginInvocationInterlock() beginInvocationInterlockNV()");
statement("#define SPIRV_Cross_endInvocationInterlock() endInvocationInterlockNV()");
}
else if (ext == "GL_ARB_fragment_shader_interlock")
{
statement("#ifdef GL_ARB_fragment_shader_interlock");
statement("#extension GL_ARB_fragment_shader_interlock : enable");
statement("#define SPIRV_Cross_beginInvocationInterlock() beginInvocationInterlockARB()");
statement("#define SPIRV_Cross_endInvocationInterlock() endInvocationInterlockARB()");
statement("#elif defined(GL_INTEL_fragment_shader_ordering)");
statement("#extension GL_INTEL_fragment_shader_ordering : enable");
statement("#define SPIRV_Cross_beginInvocationInterlock() beginFragmentShaderOrderingINTEL()");
statement("#define SPIRV_Cross_endInvocationInterlock()");
statement("#endif");
}
else
statement("#extension ", ext, " : require");
}
if (!options.vulkan_semantics)
{
using Supp = ShaderSubgroupSupportHelper;
auto result = shader_subgroup_supporter.resolve();
for (uint32_t feature_index = 0; feature_index < Supp::FeatureCount; feature_index++)
{
auto feature = static_cast<Supp::Feature>(feature_index);
if (!shader_subgroup_supporter.is_feature_requested(feature))
continue;
auto exts = Supp::get_candidates_for_feature(feature, result);
if (exts.empty())
continue;
statement("");
for (auto &ext : exts)
{
const char *name = Supp::get_extension_name(ext);
const char *extra_predicate = Supp::get_extra_required_extension_predicate(ext);
auto extra_names = Supp::get_extra_required_extension_names(ext);
statement(&ext != &exts.front() ? "#elif" : "#if", " defined(", name, ")",
(*extra_predicate != '\0' ? " && " : ""), extra_predicate);
for (const auto &e : extra_names)
statement("#extension ", e, " : enable");
statement("#extension ", name, " : require");
}
if (!Supp::can_feature_be_implemented_without_extensions(feature))
{
statement("#else");
statement("#error No extensions available to emulate requested subgroup feature.");
}
statement("#endif");
}
}
for (auto &header : header_lines)
statement(header);
SmallVector<string> inputs;
SmallVector<string> outputs;
switch (execution.model)
{
case ExecutionModelVertex:
if (options.ovr_multiview_view_count)
inputs.push_back(join("num_views = ", options.ovr_multiview_view_count));
break;
case ExecutionModelGeometry:
if ((execution.flags.get(ExecutionModeInvocations)) && execution.invocations != 1)
inputs.push_back(join("invocations = ", execution.invocations));
if (execution.flags.get(ExecutionModeInputPoints))
inputs.push_back("points");
if (execution.flags.get(ExecutionModeInputLines))
inputs.push_back("lines");
if (execution.flags.get(ExecutionModeInputLinesAdjacency))
inputs.push_back("lines_adjacency");
if (execution.flags.get(ExecutionModeTriangles))
inputs.push_back("triangles");
if (execution.flags.get(ExecutionModeInputTrianglesAdjacency))
inputs.push_back("triangles_adjacency");
if (!execution.geometry_passthrough)
{
// For passthrough, these are implies and cannot be declared in shader.
outputs.push_back(join("max_vertices = ", execution.output_vertices));
if (execution.flags.get(ExecutionModeOutputTriangleStrip))
outputs.push_back("triangle_strip");
if (execution.flags.get(ExecutionModeOutputPoints))
outputs.push_back("points");
if (execution.flags.get(ExecutionModeOutputLineStrip))
outputs.push_back("line_strip");
}
break;
case ExecutionModelTessellationControl:
if (execution.flags.get(ExecutionModeOutputVertices))
outputs.push_back(join("vertices = ", execution.output_vertices));
break;
case ExecutionModelTessellationEvaluation:
if (execution.flags.get(ExecutionModeQuads))
inputs.push_back("quads");
if (execution.flags.get(ExecutionModeTriangles))
inputs.push_back("triangles");
if (execution.flags.get(ExecutionModeIsolines))
inputs.push_back("isolines");
if (execution.flags.get(ExecutionModePointMode))
inputs.push_back("point_mode");
if (!execution.flags.get(ExecutionModeIsolines))
{
if (execution.flags.get(ExecutionModeVertexOrderCw))
inputs.push_back("cw");
if (execution.flags.get(ExecutionModeVertexOrderCcw))
inputs.push_back("ccw");
}
if (execution.flags.get(ExecutionModeSpacingFractionalEven))
inputs.push_back("fractional_even_spacing");
if (execution.flags.get(ExecutionModeSpacingFractionalOdd))
inputs.push_back("fractional_odd_spacing");
if (execution.flags.get(ExecutionModeSpacingEqual))
inputs.push_back("equal_spacing");
break;
case ExecutionModelGLCompute:
case ExecutionModelTaskEXT:
case ExecutionModelMeshEXT:
{
if (execution.workgroup_size.constant != 0 || execution.flags.get(ExecutionModeLocalSizeId))
{
SpecializationConstant wg_x, wg_y, wg_z;
get_work_group_size_specialization_constants(wg_x, wg_y, wg_z);
// If there are any spec constants on legacy GLSL, defer declaration, we need to set up macro
// declarations before we can emit the work group size.
if (options.vulkan_semantics ||
((wg_x.id == ConstantID(0)) && (wg_y.id == ConstantID(0)) && (wg_z.id == ConstantID(0))))
build_workgroup_size(inputs, wg_x, wg_y, wg_z);
}
else
{
inputs.push_back(join("local_size_x = ", execution.workgroup_size.x));
inputs.push_back(join("local_size_y = ", execution.workgroup_size.y));
inputs.push_back(join("local_size_z = ", execution.workgroup_size.z));
}
if (execution.model == ExecutionModelMeshEXT)
{
outputs.push_back(join("max_vertices = ", execution.output_vertices));
outputs.push_back(join("max_primitives = ", execution.output_primitives));
if (execution.flags.get(ExecutionModeOutputTrianglesEXT))
outputs.push_back("triangles");
else if (execution.flags.get(ExecutionModeOutputLinesEXT))
outputs.push_back("lines");
else if (execution.flags.get(ExecutionModeOutputPoints))
outputs.push_back("points");
}
break;
}
case ExecutionModelFragment:
if (options.es)
{
switch (options.fragment.default_float_precision)
{
case Options::Lowp:
statement("precision lowp float;");
break;
case Options::Mediump:
statement("precision mediump float;");
break;
case Options::Highp:
statement("precision highp float;");
break;
default:
break;
}
switch (options.fragment.default_int_precision)
{
case Options::Lowp:
statement("precision lowp int;");
break;
case Options::Mediump:
statement("precision mediump int;");
break;
case Options::Highp:
statement("precision highp int;");
break;
default:
break;
}
}
if (execution.flags.get(ExecutionModeEarlyFragmentTests))
inputs.push_back("early_fragment_tests");
if (execution.flags.get(ExecutionModePostDepthCoverage))
inputs.push_back("post_depth_coverage");
if (interlock_used)
statement("#if defined(GL_ARB_fragment_shader_interlock)");
if (execution.flags.get(ExecutionModePixelInterlockOrderedEXT))
statement("layout(pixel_interlock_ordered) in;");
else if (execution.flags.get(ExecutionModePixelInterlockUnorderedEXT))
statement("layout(pixel_interlock_unordered) in;");
else if (execution.flags.get(ExecutionModeSampleInterlockOrderedEXT))
statement("layout(sample_interlock_ordered) in;");
else if (execution.flags.get(ExecutionModeSampleInterlockUnorderedEXT))
statement("layout(sample_interlock_unordered) in;");
if (interlock_used)
{
statement("#elif !defined(GL_INTEL_fragment_shader_ordering)");
statement("#error Fragment Shader Interlock/Ordering extension missing!");
statement("#endif");
}
if (!options.es && execution.flags.get(ExecutionModeDepthGreater))
statement("layout(depth_greater) out float gl_FragDepth;");
else if (!options.es && execution.flags.get(ExecutionModeDepthLess))
statement("layout(depth_less) out float gl_FragDepth;");
break;
default:
break;
}
for (auto &cap : ir.declared_capabilities)
if (cap == CapabilityRayTraversalPrimitiveCullingKHR)
statement("layout(primitive_culling);");
if (!inputs.empty())
statement("layout(", merge(inputs), ") in;");
if (!outputs.empty())
statement("layout(", merge(outputs), ") out;");
statement("");
}
bool CompilerGLSL::type_is_empty(const SPIRType &type)
{
return type.basetype == SPIRType::Struct && type.member_types.empty();
}
void CompilerGLSL::emit_struct(SPIRType &type)
{
// Struct types can be stamped out multiple times
// with just different offsets, matrix layouts, etc ...
// Type-punning with these types is legal, which complicates things
// when we are storing struct and array types in an SSBO for example.
// If the type master is packed however, we can no longer assume that the struct declaration will be redundant.
if (type.type_alias != TypeID(0) &&
!has_extended_decoration(type.type_alias, SPIRVCrossDecorationBufferBlockRepacked))
return;
add_resource_name(type.self);
auto name = type_to_glsl(type);
statement(!backend.explicit_struct_type ? "struct " : "", name);
begin_scope();
type.member_name_cache.clear();
uint32_t i = 0;
bool emitted = false;
for (auto &member : type.member_types)
{
add_member_name(type, i);
emit_struct_member(type, member, i);
i++;
emitted = true;
}
// Don't declare empty structs in GLSL, this is not allowed.
if (type_is_empty(type) && !backend.supports_empty_struct)
{
statement("int empty_struct_member;");
emitted = true;
}
if (has_extended_decoration(type.self, SPIRVCrossDecorationPaddingTarget))
emit_struct_padding_target(type);
end_scope_decl();
if (emitted)
statement("");
}
string CompilerGLSL::to_interpolation_qualifiers(const Bitset &flags)
{
string res;
//if (flags & (1ull << DecorationSmooth))
// res += "smooth ";
if (flags.get(DecorationFlat))
res += "flat ";
if (flags.get(DecorationNoPerspective))
{
if (options.es)
{
if (options.version < 300)
SPIRV_CROSS_THROW("noperspective requires ESSL 300.");
require_extension_internal("GL_NV_shader_noperspective_interpolation");
}
else if (is_legacy_desktop())
require_extension_internal("GL_EXT_gpu_shader4");
res += "noperspective ";
}
if (flags.get(DecorationCentroid))
res += "centroid ";
if (flags.get(DecorationPatch))
res += "patch ";
if (flags.get(DecorationSample))
{
if (options.es)
{
if (options.version < 300)
SPIRV_CROSS_THROW("sample requires ESSL 300.");
else if (options.version < 320)
require_extension_internal("GL_OES_shader_multisample_interpolation");
}
res += "sample ";
}
if (flags.get(DecorationInvariant) && (options.es || options.version >= 120))
res += "invariant ";
if (flags.get(DecorationPerPrimitiveEXT))
res += "perprimitiveEXT ";
if (flags.get(DecorationExplicitInterpAMD))
{
require_extension_internal("GL_AMD_shader_explicit_vertex_parameter");
res += "__explicitInterpAMD ";
}
if (flags.get(DecorationPerVertexKHR))
{
if (options.es && options.version < 320)
SPIRV_CROSS_THROW("pervertexEXT requires ESSL 320.");
else if (!options.es && options.version < 450)
SPIRV_CROSS_THROW("pervertexEXT requires GLSL 450.");
if (barycentric_is_nv)
{
require_extension_internal("GL_NV_fragment_shader_barycentric");
res += "pervertexNV ";
}
else
{
require_extension_internal("GL_EXT_fragment_shader_barycentric");
res += "pervertexEXT ";
}
}
return res;
}
string CompilerGLSL::layout_for_member(const SPIRType &type, uint32_t index)
{
if (is_legacy())
return "";
bool is_block = has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock);
if (!is_block)
return "";
auto &memb = ir.meta[type.self].members;
if (index >= memb.size())
return "";
auto &dec = memb[index];
SmallVector<string> attr;
if (has_member_decoration(type.self, index, DecorationPassthroughNV))
attr.push_back("passthrough");
// We can only apply layouts on members in block interfaces.
// This is a bit problematic because in SPIR-V decorations are applied on the struct types directly.
// This is not supported on GLSL, so we have to make the assumption that if a struct within our buffer block struct
// has a decoration, it was originally caused by a top-level layout() qualifier in GLSL.
//
// We would like to go from (SPIR-V style):
//
// struct Foo { layout(row_major) mat4 matrix; };
// buffer UBO { Foo foo; };
//
// to
//
// struct Foo { mat4 matrix; }; // GLSL doesn't support any layout shenanigans in raw struct declarations.
// buffer UBO { layout(row_major) Foo foo; }; // Apply the layout on top-level.
auto flags = combined_decoration_for_member(type, index);
if (flags.get(DecorationRowMajor))
attr.push_back("row_major");
// We don't emit any global layouts, so column_major is default.
//if (flags & (1ull << DecorationColMajor))
// attr.push_back("column_major");
if (dec.decoration_flags.get(DecorationLocation) && can_use_io_location(type.storage, true))
attr.push_back(join("location = ", dec.location));
// Can only declare component if we can declare location.
if (dec.decoration_flags.get(DecorationComponent) && can_use_io_location(type.storage, true))
{
if (!options.es)
{
if (options.version < 440 && options.version >= 140)
require_extension_internal("GL_ARB_enhanced_layouts");
else if (options.version < 140)
SPIRV_CROSS_THROW("Component decoration is not supported in targets below GLSL 1.40.");
attr.push_back(join("component = ", dec.component));
}
else
SPIRV_CROSS_THROW("Component decoration is not supported in ES targets.");
}
// SPIRVCrossDecorationPacked is set by layout_for_variable earlier to mark that we need to emit offset qualifiers.
// This is only done selectively in GLSL as needed.
if (has_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset) &&
dec.decoration_flags.get(DecorationOffset))
attr.push_back(join("offset = ", dec.offset));
else if (type.storage == StorageClassOutput && dec.decoration_flags.get(DecorationOffset))
attr.push_back(join("xfb_offset = ", dec.offset));
if (attr.empty())
return "";
string res = "layout(";
res += merge(attr);
res += ") ";
return res;
}
const char *CompilerGLSL::format_to_glsl(spv::ImageFormat format)
{
if (options.es && is_desktop_only_format(format))
SPIRV_CROSS_THROW("Attempting to use image format not supported in ES profile.");
switch (format)
{
case ImageFormatRgba32f:
return "rgba32f";
case ImageFormatRgba16f:
return "rgba16f";
case ImageFormatR32f:
return "r32f";
case ImageFormatRgba8:
return "rgba8";
case ImageFormatRgba8Snorm:
return "rgba8_snorm";
case ImageFormatRg32f:
return "rg32f";
case ImageFormatRg16f:
return "rg16f";
case ImageFormatRgba32i:
return "rgba32i";
case ImageFormatRgba16i:
return "rgba16i";
case ImageFormatR32i:
return "r32i";
case ImageFormatRgba8i:
return "rgba8i";
case ImageFormatRg32i:
return "rg32i";
case ImageFormatRg16i:
return "rg16i";
case ImageFormatRgba32ui:
return "rgba32ui";
case ImageFormatRgba16ui:
return "rgba16ui";
case ImageFormatR32ui:
return "r32ui";
case ImageFormatRgba8ui:
return "rgba8ui";
case ImageFormatRg32ui:
return "rg32ui";
case ImageFormatRg16ui:
return "rg16ui";
case ImageFormatR11fG11fB10f:
return "r11f_g11f_b10f";
case ImageFormatR16f:
return "r16f";
case ImageFormatRgb10A2:
return "rgb10_a2";
case ImageFormatR8:
return "r8";
case ImageFormatRg8:
return "rg8";
case ImageFormatR16:
return "r16";
case ImageFormatRg16:
return "rg16";
case ImageFormatRgba16:
return "rgba16";
case ImageFormatR16Snorm:
return "r16_snorm";
case ImageFormatRg16Snorm:
return "rg16_snorm";
case ImageFormatRgba16Snorm:
return "rgba16_snorm";
case ImageFormatR8Snorm:
return "r8_snorm";
case ImageFormatRg8Snorm:
return "rg8_snorm";
case ImageFormatR8ui:
return "r8ui";
case ImageFormatRg8ui:
return "rg8ui";
case ImageFormatR16ui:
return "r16ui";
case ImageFormatRgb10a2ui:
return "rgb10_a2ui";
case ImageFormatR8i:
return "r8i";
case ImageFormatRg8i:
return "rg8i";
case ImageFormatR16i:
return "r16i";
default:
case ImageFormatUnknown:
return nullptr;
}
}
uint32_t CompilerGLSL::type_to_packed_base_size(const SPIRType &type, BufferPackingStandard)
{
switch (type.basetype)
{
case SPIRType::Double:
case SPIRType::Int64:
case SPIRType::UInt64:
return 8;
case SPIRType::Float:
case SPIRType::Int:
case SPIRType::UInt:
return 4;
case SPIRType::Half:
case SPIRType::Short:
case SPIRType::UShort:
return 2;
case SPIRType::SByte:
case SPIRType::UByte:
return 1;
default:
SPIRV_CROSS_THROW("Unrecognized type in type_to_packed_base_size.");
}
}
uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bitset &flags,
BufferPackingStandard packing)
{
// If using PhysicalStorageBufferEXT storage class, this is a pointer,
// and is 64-bit.
if (type.storage == StorageClassPhysicalStorageBufferEXT)
{
if (!type.pointer)
SPIRV_CROSS_THROW("Types in PhysicalStorageBufferEXT must be pointers.");
if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT)
{
if (packing_is_vec4_padded(packing) && type_is_array_of_pointers(type))
return 16;
else
return 8;
}
else
SPIRV_CROSS_THROW("AddressingModelPhysicalStorageBuffer64EXT must be used for PhysicalStorageBufferEXT.");
}
if (!type.array.empty())
{
uint32_t minimum_alignment = 1;
if (packing_is_vec4_padded(packing))
minimum_alignment = 16;
auto *tmp = &get<SPIRType>(type.parent_type);
while (!tmp->array.empty())
tmp = &get<SPIRType>(tmp->parent_type);
// Get the alignment of the base type, then maybe round up.
return max(minimum_alignment, type_to_packed_alignment(*tmp, flags, packing));
}
if (type.basetype == SPIRType::Struct)
{
// Rule 9. Structs alignments are maximum alignment of its members.
uint32_t alignment = 1;
for (uint32_t i = 0; i < type.member_types.size(); i++)
{
auto member_flags = ir.meta[type.self].members[i].decoration_flags;
alignment =
max(alignment, type_to_packed_alignment(get<SPIRType>(type.member_types[i]), member_flags, packing));
}
// In std140, struct alignment is rounded up to 16.
if (packing_is_vec4_padded(packing))
alignment = max<uint32_t>(alignment, 16u);
return alignment;
}
else
{
const uint32_t base_alignment = type_to_packed_base_size(type, packing);
// Alignment requirement for scalar block layout is always the alignment for the most basic component.
if (packing_is_scalar(packing))
return base_alignment;
// Vectors are *not* aligned in HLSL, but there's an extra rule where vectors cannot straddle
// a vec4, this is handled outside since that part knows our current offset.
if (type.columns == 1 && packing_is_hlsl(packing))
return base_alignment;
// From 7.6.2.2 in GL 4.5 core spec.
// Rule 1
if (type.vecsize == 1 && type.columns == 1)
return base_alignment;
// Rule 2
if ((type.vecsize == 2 || type.vecsize == 4) && type.columns == 1)
return type.vecsize * base_alignment;
// Rule 3
if (type.vecsize == 3 && type.columns == 1)
return 4 * base_alignment;
// Rule 4 implied. Alignment does not change in std430.
// Rule 5. Column-major matrices are stored as arrays of
// vectors.
if (flags.get(DecorationColMajor) && type.columns > 1)
{
if (packing_is_vec4_padded(packing))
return 4 * base_alignment;
else if (type.vecsize == 3)
return 4 * base_alignment;
else
return type.vecsize * base_alignment;
}
// Rule 6 implied.
// Rule 7.
if (flags.get(DecorationRowMajor) && type.vecsize > 1)
{
if (packing_is_vec4_padded(packing))
return 4 * base_alignment;
else if (type.columns == 3)
return 4 * base_alignment;
else
return type.columns * base_alignment;
}
// Rule 8 implied.
}
SPIRV_CROSS_THROW("Did not find suitable rule for type. Bogus decorations?");
}
uint32_t CompilerGLSL::type_to_packed_array_stride(const SPIRType &type, const Bitset &flags,
BufferPackingStandard packing)
{
// Array stride is equal to aligned size of the underlying type.
uint32_t parent = type.parent_type;
assert(parent);
auto &tmp = get<SPIRType>(parent);
uint32_t size = type_to_packed_size(tmp, flags, packing);
uint32_t alignment = type_to_packed_alignment(type, flags, packing);
return (size + alignment - 1) & ~(alignment - 1);
}
uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing)
{
if (!type.array.empty())
{
uint32_t packed_size = to_array_size_literal(type) * type_to_packed_array_stride(type, flags, packing);
// For arrays of vectors and matrices in HLSL, the last element has a size which depends on its vector size,
// so that it is possible to pack other vectors into the last element.
if (packing_is_hlsl(packing) && type.basetype != SPIRType::Struct)
packed_size -= (4 - type.vecsize) * (type.width / 8);
return packed_size;
}
// If using PhysicalStorageBufferEXT storage class, this is a pointer,
// and is 64-bit.
if (type.storage == StorageClassPhysicalStorageBufferEXT)
{
if (!type.pointer)
SPIRV_CROSS_THROW("Types in PhysicalStorageBufferEXT must be pointers.");
if (ir.addressing_model == AddressingModelPhysicalStorageBuffer64EXT)
return 8;
else
SPIRV_CROSS_THROW("AddressingModelPhysicalStorageBuffer64EXT must be used for PhysicalStorageBufferEXT.");
}
uint32_t size = 0;
if (type.basetype == SPIRType::Struct)
{
uint32_t pad_alignment = 1;
for (uint32_t i = 0; i < type.member_types.size(); i++)
{
auto member_flags = ir.meta[type.self].members[i].decoration_flags;
auto &member_type = get<SPIRType>(type.member_types[i]);
uint32_t packed_alignment = type_to_packed_alignment(member_type, member_flags, packing);
uint32_t alignment = max(packed_alignment, pad_alignment);
// The next member following a struct member is aligned to the base alignment of the struct that came before.
// GL 4.5 spec, 7.6.2.2.
if (member_type.basetype == SPIRType::Struct)
pad_alignment = packed_alignment;
else
pad_alignment = 1;
size = (size + alignment - 1) & ~(alignment - 1);
size += type_to_packed_size(member_type, member_flags, packing);
}
}
else
{
const uint32_t base_alignment = type_to_packed_base_size(type, packing);
if (packing_is_scalar(packing))
{
size = type.vecsize * type.columns * base_alignment;
}
else
{
if (type.columns == 1)
size = type.vecsize * base_alignment;
if (flags.get(DecorationColMajor) && type.columns > 1)
{
if (packing_is_vec4_padded(packing))
size = type.columns * 4 * base_alignment;
else if (type.vecsize == 3)
size = type.columns * 4 * base_alignment;
else
size = type.columns * type.vecsize * base_alignment;
}
if (flags.get(DecorationRowMajor) && type.vecsize > 1)
{
if (packing_is_vec4_padded(packing))
size = type.vecsize * 4 * base_alignment;
else if (type.columns == 3)
size = type.vecsize * 4 * base_alignment;
else
size = type.vecsize * type.columns * base_alignment;
}
// For matrices in HLSL, the last element has a size which depends on its vector size,
// so that it is possible to pack other vectors into the last element.
if (packing_is_hlsl(packing) && type.columns > 1)
size -= (4 - type.vecsize) * (type.width / 8);
}
}
return size;
}
bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing,
uint32_t *failed_validation_index, uint32_t start_offset,
uint32_t end_offset)
{
// This is very tricky and error prone, but try to be exhaustive and correct here.
// SPIR-V doesn't directly say if we're using std430 or std140.
// SPIR-V communicates this using Offset and ArrayStride decorations (which is what really matters),
// so we have to try to infer whether or not the original GLSL source was std140 or std430 based on this information.
// We do not have to consider shared or packed since these layouts are not allowed in Vulkan SPIR-V (they are useless anyways, and custom offsets would do the same thing).
//
// It is almost certain that we're using std430, but it gets tricky with arrays in particular.
// We will assume std430, but infer std140 if we can prove the struct is not compliant with std430.
//
// The only two differences between std140 and std430 are related to padding alignment/array stride
// in arrays and structs. In std140 they take minimum vec4 alignment.
// std430 only removes the vec4 requirement.
uint32_t offset = 0;
uint32_t pad_alignment = 1;
bool is_top_level_block =
has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock);
for (uint32_t i = 0; i < type.member_types.size(); i++)
{
auto &memb_type = get<SPIRType>(type.member_types[i]);
auto member_flags = ir.meta[type.self].members[i].decoration_flags;
// Verify alignment rules.
uint32_t packed_alignment = type_to_packed_alignment(memb_type, member_flags, packing);
// This is a rather dirty workaround to deal with some cases of OpSpecConstantOp used as array size, e.g:
// layout(constant_id = 0) const int s = 10;
// const int S = s + 5; // SpecConstantOp
// buffer Foo { int data[S]; }; // <-- Very hard for us to deduce a fixed value here,
// we would need full implementation of compile-time constant folding. :(
// If we are the last member of a struct, there might be cases where the actual size of that member is irrelevant
// for our analysis (e.g. unsized arrays).
// This lets us simply ignore that there are spec constant op sized arrays in our buffers.
// Querying size of this member will fail, so just don't call it unless we have to.
//
// This is likely "best effort" we can support without going into unacceptably complicated workarounds.
bool member_can_be_unsized =
is_top_level_block && size_t(i + 1) == type.member_types.size() && !memb_type.array.empty();
uint32_t packed_size = 0;
if (!member_can_be_unsized || packing_is_hlsl(packing))
packed_size = type_to_packed_size(memb_type, member_flags, packing);
// We only need to care about this if we have non-array types which can straddle the vec4 boundary.
uint32_t actual_offset = type_struct_member_offset(type, i);
if (packing_is_hlsl(packing))
{
// If a member straddles across a vec4 boundary, alignment is actually vec4.
uint32_t begin_word = actual_offset / 16;
uint32_t end_word = (actual_offset + packed_size - 1) / 16;
if (begin_word != end_word)
packed_alignment = max<uint32_t>(packed_alignment, 16u);
}
// Field is not in the specified range anymore and we can ignore any further fields.
if (actual_offset >= end_offset)
break;
uint32_t alignment = max(packed_alignment, pad_alignment);
offset = (offset + alignment - 1) & ~(alignment - 1);
// The next member following a struct member is aligned to the base alignment of the struct that came before.
// GL 4.5 spec, 7.6.2.2.
if (memb_type.basetype == SPIRType::Struct && !memb_type.pointer)
pad_alignment = packed_alignment;
else
pad_alignment = 1;
// Only care about packing if we are in the given range
if (actual_offset >= start_offset)
{
// We only care about offsets in std140, std430, etc ...
// For EnhancedLayout variants, we have the flexibility to choose our own offsets.
if (!packing_has_flexible_offset(packing))
{
if (actual_offset != offset) // This cannot be the packing we're looking for.
{
if (failed_validation_index)
*failed_validation_index = i;
return false;
}
}
else if ((actual_offset & (alignment - 1)) != 0)
{
// We still need to verify that alignment rules are observed, even if we have explicit offset.
if (failed_validation_index)
*failed_validation_index = i;
return false;
}
// Verify array stride rules.
if (!memb_type.array.empty() && type_to_packed_array_stride(memb_type, member_flags, packing) !=
type_struct_member_array_stride(type, i))
{
if (failed_validation_index)
*failed_validation_index = i;
return false;
}
// Verify that sub-structs also follow packing rules.
// We cannot use enhanced layouts on substructs, so they better be up to spec.
auto substruct_packing = packing_to_substruct_packing(packing);
if (!memb_type.pointer && !memb_type.member_types.empty() &&
!buffer_is_packing_standard(memb_type, substruct_packing))
{
if (failed_validation_index)
*failed_validation_index = i;
return false;
}
}
// Bump size.
offset = actual_offset + packed_size;
}
return true;
}
bool CompilerGLSL::can_use_io_location(StorageClass storage, bool block)
{
// Location specifiers are must have in SPIR-V, but they aren't really supported in earlier versions of GLSL.
// Be very explicit here about how to solve the issue.
if ((get_execution_model() != ExecutionModelVertex && storage == StorageClassInput) ||
(get_execution_model() != ExecutionModelFragment && storage == StorageClassOutput))
{
uint32_t minimum_desktop_version = block ? 440 : 410;
// ARB_enhanced_layouts vs ARB_separate_shader_objects ...
if (!options.es && options.version < minimum_desktop_version && !options.separate_shader_objects)
return false;
else if (options.es && options.version < 310)
return false;
}
if ((get_execution_model() == ExecutionModelVertex && storage == StorageClassInput) ||
(get_execution_model() == ExecutionModelFragment && storage == StorageClassOutput))
{
if (options.es && options.version < 300)
return false;
else if (!options.es && options.version < 330)
return false;
}
if (storage == StorageClassUniform || storage == StorageClassUniformConstant || storage == StorageClassPushConstant)
{
if (options.es && options.version < 310)
return false;
else if (!options.es && options.version < 430)
return false;
}
return true;
}
string CompilerGLSL::layout_for_variable(const SPIRVariable &var)
{
// FIXME: Come up with a better solution for when to disable layouts.
// Having layouts depend on extensions as well as which types
// of layouts are used. For now, the simple solution is to just disable
// layouts for legacy versions.
if (is_legacy())
return "";
if (subpass_input_is_framebuffer_fetch(var.self))
return "";
SmallVector<string> attr;
auto &type = get<SPIRType>(var.basetype);
auto &flags = get_decoration_bitset(var.self);
auto &typeflags = get_decoration_bitset(type.self);
if (flags.get(DecorationPassthroughNV))
attr.push_back("passthrough");
if (options.vulkan_semantics && var.storage == StorageClassPushConstant)
attr.push_back("push_constant");
else if (var.storage == StorageClassShaderRecordBufferKHR)
attr.push_back(ray_tracing_is_khr ? "shaderRecordEXT" : "shaderRecordNV");
if (flags.get(DecorationRowMajor))
attr.push_back("row_major");
if (flags.get(DecorationColMajor))
attr.push_back("column_major");
if (options.vulkan_semantics)
{
if (flags.get(DecorationInputAttachmentIndex))
attr.push_back(join("input_attachment_index = ", get_decoration(var.self, DecorationInputAttachmentIndex)));
}
bool is_block = has_decoration(type.self, DecorationBlock);
if (flags.get(DecorationLocation) && can_use_io_location(var.storage, is_block))
{
Bitset combined_decoration;
for (uint32_t i = 0; i < ir.meta[type.self].members.size(); i++)
combined_decoration.merge_or(combined_decoration_for_member(type, i));
// If our members have location decorations, we don't need to
// emit location decorations at the top as well (looks weird).
if (!combined_decoration.get(DecorationLocation))
attr.push_back(join("location = ", get_decoration(var.self, DecorationLocation)));
}
if (get_execution_model() == ExecutionModelFragment && var.storage == StorageClassOutput &&
location_is_non_coherent_framebuffer_fetch(get_decoration(var.self, DecorationLocation)))
{
attr.push_back("noncoherent");
}
// Transform feedback
bool uses_enhanced_layouts = false;
if (is_block && var.storage == StorageClassOutput)
{
// For blocks, there is a restriction where xfb_stride/xfb_buffer must only be declared on the block itself,
// since all members must match the same xfb_buffer. The only thing we will declare for members of the block
// is the xfb_offset.
uint32_t member_count = uint32_t(type.member_types.size());
bool have_xfb_buffer_stride = false;
bool have_any_xfb_offset = false;
bool have_geom_stream = false;
uint32_t xfb_stride = 0, xfb_buffer = 0, geom_stream = 0;
if (flags.get(DecorationXfbBuffer) && flags.get(DecorationXfbStride))
{
have_xfb_buffer_stride = true;
xfb_buffer = get_decoration(var.self, DecorationXfbBuffer);
xfb_stride = get_decoration(var.self, DecorationXfbStride);
}
if (flags.get(DecorationStream))
{
have_geom_stream = true;
geom_stream = get_decoration(var.self, DecorationStream);
}
// Verify that none of the members violate our assumption.
for (uint32_t i = 0; i < member_count; i++)
{
if (has_member_decoration(type.self, i, DecorationStream))
{
uint32_t member_geom_stream = get_member_decoration(type.self, i, DecorationStream);
if (have_geom_stream && member_geom_stream != geom_stream)
SPIRV_CROSS_THROW("IO block member Stream mismatch.");
have_geom_stream = true;
geom_stream = member_geom_stream;
}
// Only members with an Offset decoration participate in XFB.
if (!has_member_decoration(type.self, i, DecorationOffset))
continue;
have_any_xfb_offset = true;
if (has_member_decoration(type.self, i, DecorationXfbBuffer))
{
uint32_t buffer_index = get_member_decoration(type.self, i, DecorationXfbBuffer);
if (have_xfb_buffer_stride && buffer_index != xfb_buffer)
SPIRV_CROSS_THROW("IO block member XfbBuffer mismatch.");
have_xfb_buffer_stride = true;
xfb_buffer = buffer_index;
}
if (has_member_decoration(type.self, i, DecorationXfbStride))
{
uint32_t stride = get_member_decoration(type.self, i, DecorationXfbStride);
if (have_xfb_buffer_stride && stride != xfb_stride)
SPIRV_CROSS_THROW("IO block member XfbStride mismatch.");
have_xfb_buffer_stride = true;
xfb_stride = stride;
}
}
if (have_xfb_buffer_stride && have_any_xfb_offset)
{
attr.push_back(join("xfb_buffer = ", xfb_buffer));
attr.push_back(join("xfb_stride = ", xfb_stride));
uses_enhanced_layouts = true;
}
if (have_geom_stream)
{
if (get_execution_model() != ExecutionModelGeometry)
SPIRV_CROSS_THROW("Geometry streams can only be used in geometry shaders.");
if (options.es)
SPIRV_CROSS_THROW("Multiple geometry streams not supported in ESSL.");
if (options.version < 400)
require_extension_internal("GL_ARB_transform_feedback3");
attr.push_back(join("stream = ", get_decoration(var.self, DecorationStream)));
}
}
else if (var.storage == StorageClassOutput)
{
if (flags.get(DecorationXfbBuffer) && flags.get(DecorationXfbStride) && flags.get(DecorationOffset))
{
// XFB for standalone variables, we can emit all decorations.
attr.push_back(join("xfb_buffer = ", get_decoration(var.self, DecorationXfbBuffer)));
attr.push_back(join("xfb_stride = ", get_decoration(var.self, DecorationXfbStride)));
attr.push_back(join("xfb_offset = ", get_decoration(var.self, DecorationOffset)));
uses_enhanced_layouts = true;
}
if (flags.get(DecorationStream))
{
if (get_execution_model() != ExecutionModelGeometry)
SPIRV_CROSS_THROW("Geometry streams can only be used in geometry shaders.");
if (options.es)
SPIRV_CROSS_THROW("Multiple geometry streams not supported in ESSL.");
if (options.version < 400)
require_extension_internal("GL_ARB_transform_feedback3");
attr.push_back(join("stream = ", get_decoration(var.self, DecorationStream)));
}
}
// Can only declare Component if we can declare location.
if (flags.get(DecorationComponent) && can_use_io_location(var.storage, is_block))
{
uses_enhanced_layouts = true;
attr.push_back(join("component = ", get_decoration(var.self, DecorationComponent)));
}
if (uses_enhanced_layouts)
{
if (!options.es)
{
if (options.version < 440 && options.version >= 140)
require_extension_internal("GL_ARB_enhanced_layouts");
else if (options.version < 140)
SPIRV_CROSS_THROW("GL_ARB_enhanced_layouts is not supported in targets below GLSL 1.40.");
if (!options.es && options.version < 440)
require_extension_internal("GL_ARB_enhanced_layouts");
}
else if (options.es)
SPIRV_CROSS_THROW("GL_ARB_enhanced_layouts is not supported in ESSL.");
}
if (flags.get(DecorationIndex))
attr.push_back(join("index = ", get_decoration(var.self, DecorationIndex)));
// Do not emit set = decoration in regular GLSL output, but
// we need to preserve it in Vulkan GLSL mode.
if (var.storage != StorageClassPushConstant && var.storage != StorageClassShaderRecordBufferKHR)
{
if (flags.get(DecorationDescriptorSet) && options.vulkan_semantics)
attr.push_back(join("set = ", get_decoration(var.self, DecorationDescriptorSet)));
}
bool push_constant_block = options.vulkan_semantics && var.storage == StorageClassPushConstant;
bool ssbo_block = var.storage == StorageClassStorageBuffer || var.storage == StorageClassShaderRecordBufferKHR ||
(var.storage == StorageClassUniform && typeflags.get(DecorationBufferBlock));
bool emulated_ubo = var.storage == StorageClassPushConstant && options.emit_push_constant_as_uniform_buffer;
bool ubo_block = var.storage == StorageClassUniform && typeflags.get(DecorationBlock);
// GL 3.0/GLSL 1.30 is not considered legacy, but it doesn't have UBOs ...
bool can_use_buffer_blocks = (options.es && options.version >= 300) || (!options.es && options.version >= 140);
// pretend no UBOs when options say so
if (ubo_block && options.emit_uniform_buffer_as_plain_uniforms)
can_use_buffer_blocks = false;
bool can_use_binding;
if (options.es)
can_use_binding = options.version >= 310;
else
can_use_binding = options.enable_420pack_extension || (options.version >= 420);
// Make sure we don't emit binding layout for a classic uniform on GLSL 1.30.
if (!can_use_buffer_blocks && var.storage == StorageClassUniform)
can_use_binding = false;
if (var.storage == StorageClassShaderRecordBufferKHR)
can_use_binding = false;
if (can_use_binding && flags.get(DecorationBinding))
attr.push_back(join("binding = ", get_decoration(var.self, DecorationBinding)));
if (var.storage != StorageClassOutput && flags.get(DecorationOffset))
attr.push_back(join("offset = ", get_decoration(var.self, DecorationOffset)));
// Instead of adding explicit offsets for every element here, just assume we're using std140 or std430.
// If SPIR-V does not comply with either layout, we cannot really work around it.
if (can_use_buffer_blocks && (ubo_block || emulated_ubo))
{
attr.push_back(buffer_to_packing_standard(type, false));
}
else if (can_use_buffer_blocks && (push_constant_block || ssbo_block))
{
attr.push_back(buffer_to_packing_standard(type, true));
}
// For images, the type itself adds a layout qualifer.
// Only emit the format for storage images.
if (type.basetype == SPIRType::Image && type.image.sampled == 2)
{
const char *fmt = format_to_glsl(type.image.format);
if (fmt)
attr.push_back(fmt);
}
if (attr.empty())
return "";
string res = "layout(";
res += merge(attr);
res += ") ";
return res;
}
string CompilerGLSL::buffer_to_packing_standard(const SPIRType &type, bool support_std430_without_scalar_layout)
{
if (support_std430_without_scalar_layout && buffer_is_packing_standard(type, BufferPackingStd430))
return "std430";
else if (buffer_is_packing_standard(type, BufferPackingStd140))
return "std140";
else if (options.vulkan_semantics && buffer_is_packing_standard(type, BufferPackingScalar))
{
require_extension_internal("GL_EXT_scalar_block_layout");
return "scalar";
}
else if (support_std430_without_scalar_layout &&
buffer_is_packing_standard(type, BufferPackingStd430EnhancedLayout))
{
if (options.es && !options.vulkan_semantics)
SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do "
"not support GL_ARB_enhanced_layouts.");
if (!options.es && !options.vulkan_semantics && options.version < 440)
require_extension_internal("GL_ARB_enhanced_layouts");
set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset);
return "std430";
}
else if (buffer_is_packing_standard(type, BufferPackingStd140EnhancedLayout))
{
// Fallback time. We might be able to use the ARB_enhanced_layouts to deal with this difference,
// however, we can only use layout(offset) on the block itself, not any substructs, so the substructs better be the appropriate layout.
// Enhanced layouts seem to always work in Vulkan GLSL, so no need for extensions there.
if (options.es && !options.vulkan_semantics)
SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do "
"not support GL_ARB_enhanced_layouts.");
if (!options.es && !options.vulkan_semantics && options.version < 440)
require_extension_internal("GL_ARB_enhanced_layouts");
set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset);
return "std140";
}
else if (options.vulkan_semantics && buffer_is_packing_standard(type, BufferPackingScalarEnhancedLayout))
{
set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset);
require_extension_internal("GL_EXT_scalar_block_layout");
return "scalar";
}
else if (!support_std430_without_scalar_layout && options.vulkan_semantics &&
buffer_is_packing_standard(type, BufferPackingStd430))
{
// UBOs can support std430 with GL_EXT_scalar_block_layout.
require_extension_internal("GL_EXT_scalar_block_layout");
return "std430";
}
else if (!support_std430_without_scalar_layout && options.vulkan_semantics &&
buffer_is_packing_standard(type, BufferPackingStd430EnhancedLayout))
{
// UBOs can support std430 with GL_EXT_scalar_block_layout.
set_extended_decoration(type.self, SPIRVCrossDecorationExplicitOffset);
require_extension_internal("GL_EXT_scalar_block_layout");
return "std430";
}
else
{
SPIRV_CROSS_THROW("Buffer block cannot be expressed as any of std430, std140, scalar, even with enhanced "
"layouts. You can try flattening this block to support a more flexible layout.");
}
}
void CompilerGLSL::emit_push_constant_block(const SPIRVariable &var)
{
if (flattened_buffer_blocks.count(var.self))
emit_buffer_block_flattened(var);
else if (options.vulkan_semantics)
emit_push_constant_block_vulkan(var);
else if (options.emit_push_constant_as_uniform_buffer)
emit_buffer_block_native(var);
else
emit_push_constant_block_glsl(var);
}
void CompilerGLSL::emit_push_constant_block_vulkan(const SPIRVariable &var)
{
emit_buffer_block(var);
}
void CompilerGLSL::emit_push_constant_block_glsl(const SPIRVariable &var)
{
// OpenGL has no concept of push constant blocks, implement it as a uniform struct.
auto &type = get<SPIRType>(var.basetype);
unset_decoration(var.self, DecorationBinding);
unset_decoration(var.self, DecorationDescriptorSet);
#if 0
if (flags & ((1ull << DecorationBinding) | (1ull << DecorationDescriptorSet)))
SPIRV_CROSS_THROW("Push constant blocks cannot be compiled to GLSL with Binding or Set syntax. "
"Remap to location with reflection API first or disable these decorations.");
#endif
// We're emitting the push constant block as a regular struct, so disable the block qualifier temporarily.
// Otherwise, we will end up emitting layout() qualifiers on naked structs which is not allowed.
bool block_flag = has_decoration(type.self, DecorationBlock);
unset_decoration(type.self, DecorationBlock);
emit_struct(type);
if (block_flag)
set_decoration(type.self, DecorationBlock);
emit_uniform(var);
statement("");
}
void CompilerGLSL::emit_buffer_block(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
bool ubo_block = var.storage == StorageClassUniform && has_decoration(type.self, DecorationBlock);
if (flattened_buffer_blocks.count(var.self))
emit_buffer_block_flattened(var);
else if (is_legacy() || (!options.es && options.version == 130) ||
(ubo_block && options.emit_uniform_buffer_as_plain_uniforms))
emit_buffer_block_legacy(var);
else
emit_buffer_block_native(var);
}
void CompilerGLSL::emit_buffer_block_legacy(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
bool ssbo = var.storage == StorageClassStorageBuffer ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
if (ssbo)
SPIRV_CROSS_THROW("SSBOs not supported in legacy targets.");
// We're emitting the push constant block as a regular struct, so disable the block qualifier temporarily.
// Otherwise, we will end up emitting layout() qualifiers on naked structs which is not allowed.
auto &block_flags = ir.meta[type.self].decoration.decoration_flags;
bool block_flag = block_flags.get(DecorationBlock);
block_flags.clear(DecorationBlock);
emit_struct(type);
if (block_flag)
block_flags.set(DecorationBlock);
emit_uniform(var);
statement("");
}
void CompilerGLSL::emit_buffer_reference_block(uint32_t type_id, bool forward_declaration)
{
auto &type = get<SPIRType>(type_id);
string buffer_name;
if (forward_declaration)
{
// Block names should never alias, but from HLSL input they kind of can because block types are reused for UAVs ...
// Allow aliased name since we might be declaring the block twice. Once with buffer reference (forward declared) and one proper declaration.
// The names must match up.
buffer_name = to_name(type.self, false);
// Shaders never use the block by interface name, so we don't
// have to track this other than updating name caches.
// If we have a collision for any reason, just fallback immediately.
if (ir.meta[type.self].decoration.alias.empty() ||
block_ssbo_names.find(buffer_name) != end(block_ssbo_names) ||
resource_names.find(buffer_name) != end(resource_names))
{
buffer_name = join("_", type.self);
}
// Make sure we get something unique for both global name scope and block name scope.
// See GLSL 4.5 spec: section 4.3.9 for details.
add_variable(block_ssbo_names, resource_names, buffer_name);
// If for some reason buffer_name is an illegal name, make a final fallback to a workaround name.
// This cannot conflict with anything else, so we're safe now.
// We cannot reuse this fallback name in neither global scope (blocked by block_names) nor block name scope.
if (buffer_name.empty())
buffer_name = join("_", type.self);
block_names.insert(buffer_name);
block_ssbo_names.insert(buffer_name);
// Ensure we emit the correct name when emitting non-forward pointer type.
ir.meta[type.self].decoration.alias = buffer_name;
}
else if (type.basetype != SPIRType::Struct)
buffer_name = type_to_glsl(type);
else
buffer_name = to_name(type.self, false);
if (!forward_declaration)
{
auto itr = physical_storage_type_to_alignment.find(type_id);
uint32_t alignment = 0;
if (itr != physical_storage_type_to_alignment.end())
alignment = itr->second.alignment;
if (type.basetype == SPIRType::Struct)
{
SmallVector<std::string> attributes;
attributes.push_back("buffer_reference");
if (alignment)
attributes.push_back(join("buffer_reference_align = ", alignment));
attributes.push_back(buffer_to_packing_standard(type, true));
auto flags = ir.get_buffer_block_type_flags(type);
string decorations;
if (flags.get(DecorationRestrict))
decorations += " restrict";
if (flags.get(DecorationCoherent))
decorations += " coherent";
if (flags.get(DecorationNonReadable))
decorations += " writeonly";
if (flags.get(DecorationNonWritable))
decorations += " readonly";
statement("layout(", merge(attributes), ")", decorations, " buffer ", buffer_name);
}
else if (alignment)
statement("layout(buffer_reference, buffer_reference_align = ", alignment, ") buffer ", buffer_name);
else
statement("layout(buffer_reference) buffer ", buffer_name);
begin_scope();
if (type.basetype == SPIRType::Struct)
{
type.member_name_cache.clear();
uint32_t i = 0;
for (auto &member : type.member_types)
{
add_member_name(type, i);
emit_struct_member(type, member, i);
i++;
}
}
else
{
auto &pointee_type = get_pointee_type(type);
statement(type_to_glsl(pointee_type), " value", type_to_array_glsl(pointee_type), ";");
}
end_scope_decl();
statement("");
}
else
{
statement("layout(buffer_reference) buffer ", buffer_name, ";");
}
}
void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
Bitset flags = ir.get_buffer_block_flags(var);
bool ssbo = var.storage == StorageClassStorageBuffer || var.storage == StorageClassShaderRecordBufferKHR ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock);
bool is_restrict = ssbo && flags.get(DecorationRestrict);
bool is_writeonly = ssbo && flags.get(DecorationNonReadable);
bool is_readonly = ssbo && flags.get(DecorationNonWritable);
bool is_coherent = ssbo && flags.get(DecorationCoherent);
// Block names should never alias, but from HLSL input they kind of can because block types are reused for UAVs ...
auto buffer_name = to_name(type.self, false);
auto &block_namespace = ssbo ? block_ssbo_names : block_ubo_names;
// Shaders never use the block by interface name, so we don't
// have to track this other than updating name caches.
// If we have a collision for any reason, just fallback immediately.
if (ir.meta[type.self].decoration.alias.empty() || block_namespace.find(buffer_name) != end(block_namespace) ||
resource_names.find(buffer_name) != end(resource_names))
{
buffer_name = get_block_fallback_name(var.self);
}
// Make sure we get something unique for both global name scope and block name scope.
// See GLSL 4.5 spec: section 4.3.9 for details.
add_variable(block_namespace, resource_names, buffer_name);
// If for some reason buffer_name is an illegal name, make a final fallback to a workaround name.
// This cannot conflict with anything else, so we're safe now.
// We cannot reuse this fallback name in neither global scope (blocked by block_names) nor block name scope.
if (buffer_name.empty())
buffer_name = join("_", get<SPIRType>(var.basetype).self, "_", var.self);
block_names.insert(buffer_name);
block_namespace.insert(buffer_name);
// Save for post-reflection later.
declared_block_names[var.self] = buffer_name;
statement(layout_for_variable(var), is_coherent ? "coherent " : "", is_restrict ? "restrict " : "",
is_writeonly ? "writeonly " : "", is_readonly ? "readonly " : "", ssbo ? "buffer " : "uniform ",
buffer_name);
begin_scope();
type.member_name_cache.clear();
uint32_t i = 0;
for (auto &member : type.member_types)
{
add_member_name(type, i);
emit_struct_member(type, member, i);
i++;
}
// var.self can be used as a backup name for the block name,
// so we need to make sure we don't disturb the name here on a recompile.
// It will need to be reset if we have to recompile.
preserve_alias_on_reset(var.self);
add_resource_name(var.self);
end_scope_decl(to_name(var.self) + type_to_array_glsl(type));
statement("");
}
void CompilerGLSL::emit_buffer_block_flattened(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
// Block names should never alias.
auto buffer_name = to_name(type.self, false);
size_t buffer_size = (get_declared_struct_size(type) + 15) / 16;
SPIRType::BaseType basic_type;
if (get_common_basic_type(type, basic_type))
{
SPIRType tmp;
tmp.basetype = basic_type;
tmp.vecsize = 4;
if (basic_type != SPIRType::Float && basic_type != SPIRType::Int && basic_type != SPIRType::UInt)
SPIRV_CROSS_THROW("Basic types in a flattened UBO must be float, int or uint.");
auto flags = ir.get_buffer_block_flags(var);
statement("uniform ", flags_to_qualifiers_glsl(tmp, flags), type_to_glsl(tmp), " ", buffer_name, "[",
buffer_size, "];");
}
else
SPIRV_CROSS_THROW("All basic types in a flattened block must be the same.");
}
const char *CompilerGLSL::to_storage_qualifiers_glsl(const SPIRVariable &var)
{
auto &execution = get_entry_point();
if (subpass_input_is_framebuffer_fetch(var.self))
return "";
if (var.storage == StorageClassInput || var.storage == StorageClassOutput)
{
if (is_legacy() && execution.model == ExecutionModelVertex)
return var.storage == StorageClassInput ? "attribute " : "varying ";
else if (is_legacy() && execution.model == ExecutionModelFragment)
return "varying "; // Fragment outputs are renamed so they never hit this case.
else if (execution.model == ExecutionModelFragment && var.storage == StorageClassOutput)
{
uint32_t loc = get_decoration(var.self, DecorationLocation);
bool is_inout = location_is_framebuffer_fetch(loc);
if (is_inout)
return "inout ";
else
return "out ";
}
else
return var.storage == StorageClassInput ? "in " : "out ";
}
else if (var.storage == StorageClassUniformConstant || var.storage == StorageClassUniform ||
var.storage == StorageClassPushConstant)
{
return "uniform ";
}
else if (var.storage == StorageClassRayPayloadKHR)
{
return ray_tracing_is_khr ? "rayPayloadEXT " : "rayPayloadNV ";
}
else if (var.storage == StorageClassIncomingRayPayloadKHR)
{
return ray_tracing_is_khr ? "rayPayloadInEXT " : "rayPayloadInNV ";
}
else if (var.storage == StorageClassHitAttributeKHR)
{
return ray_tracing_is_khr ? "hitAttributeEXT " : "hitAttributeNV ";
}
else if (var.storage == StorageClassCallableDataKHR)
{
return ray_tracing_is_khr ? "callableDataEXT " : "callableDataNV ";
}
else if (var.storage == StorageClassIncomingCallableDataKHR)
{
return ray_tracing_is_khr ? "callableDataInEXT " : "callableDataInNV ";
}
return "";
}
void CompilerGLSL::emit_flattened_io_block_member(const std::string &basename, const SPIRType &type, const char *qual,
const SmallVector<uint32_t> &indices)
{
uint32_t member_type_id = type.self;
const SPIRType *member_type = &type;
const SPIRType *parent_type = nullptr;
auto flattened_name = basename;
for (auto &index : indices)
{
flattened_name += "_";
flattened_name += to_member_name(*member_type, index);
parent_type = member_type;
member_type_id = member_type->member_types[index];
member_type = &get<SPIRType>(member_type_id);
}
assert(member_type->basetype != SPIRType::Struct);
// We're overriding struct member names, so ensure we do so on the primary type.
if (parent_type->type_alias)
parent_type = &get<SPIRType>(parent_type->type_alias);
// Sanitize underscores because joining the two identifiers might create more than 1 underscore in a row,
// which is not allowed.
ParsedIR::sanitize_underscores(flattened_name);
uint32_t last_index = indices.back();
// Pass in the varying qualifier here so it will appear in the correct declaration order.
// Replace member name while emitting it so it encodes both struct name and member name.
auto backup_name = get_member_name(parent_type->self, last_index);
auto member_name = to_member_name(*parent_type, last_index);
set_member_name(parent_type->self, last_index, flattened_name);
emit_struct_member(*parent_type, member_type_id, last_index, qual);
// Restore member name.
set_member_name(parent_type->self, last_index, member_name);
}
void CompilerGLSL::emit_flattened_io_block_struct(const std::string &basename, const SPIRType &type, const char *qual,
const SmallVector<uint32_t> &indices)
{
auto sub_indices = indices;
sub_indices.push_back(0);
const SPIRType *member_type = &type;
for (auto &index : indices)
member_type = &get<SPIRType>(member_type->member_types[index]);
assert(member_type->basetype == SPIRType::Struct);
if (!member_type->array.empty())
SPIRV_CROSS_THROW("Cannot flatten array of structs in I/O blocks.");
for (uint32_t i = 0; i < uint32_t(member_type->member_types.size()); i++)
{
sub_indices.back() = i;
if (get<SPIRType>(member_type->member_types[i]).basetype == SPIRType::Struct)
emit_flattened_io_block_struct(basename, type, qual, sub_indices);
else
emit_flattened_io_block_member(basename, type, qual, sub_indices);
}
}
void CompilerGLSL::emit_flattened_io_block(const SPIRVariable &var, const char *qual)
{
auto &var_type = get<SPIRType>(var.basetype);
if (!var_type.array.empty())
SPIRV_CROSS_THROW("Array of varying structs cannot be flattened to legacy-compatible varyings.");
// Emit flattened types based on the type alias. Normally, we are never supposed to emit
// struct declarations for aliased types.
auto &type = var_type.type_alias ? get<SPIRType>(var_type.type_alias) : var_type;
auto old_flags = ir.meta[type.self].decoration.decoration_flags;
// Emit the members as if they are part of a block to get all qualifiers.
ir.meta[type.self].decoration.decoration_flags.set(DecorationBlock);
type.member_name_cache.clear();
SmallVector<uint32_t> member_indices;
member_indices.push_back(0);
auto basename = to_name(var.self);
uint32_t i = 0;
for (auto &member : type.member_types)
{
add_member_name(type, i);
auto &membertype = get<SPIRType>(member);
member_indices.back() = i;
if (membertype.basetype == SPIRType::Struct)
emit_flattened_io_block_struct(basename, type, qual, member_indices);
else
emit_flattened_io_block_member(basename, type, qual, member_indices);
i++;
}
ir.meta[type.self].decoration.decoration_flags = old_flags;
// Treat this variable as fully flattened from now on.
flattened_structs[var.self] = true;
}
void CompilerGLSL::emit_interface_block(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
if (var.storage == StorageClassInput && type.basetype == SPIRType::Double &&
!options.es && options.version < 410)
{
require_extension_internal("GL_ARB_vertex_attrib_64bit");
}
// Either make it plain in/out or in/out blocks depending on what shader is doing ...
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
const char *qual = to_storage_qualifiers_glsl(var);
if (block)
{
// ESSL earlier than 310 and GLSL earlier than 150 did not support
// I/O variables which are struct types.
// To support this, flatten the struct into separate varyings instead.
if (options.force_flattened_io_blocks || (options.es && options.version < 310) ||
(!options.es && options.version < 150))
{
// I/O blocks on ES require version 310 with Android Extension Pack extensions, or core version 320.
// On desktop, I/O blocks were introduced with geometry shaders in GL 3.2 (GLSL 150).
emit_flattened_io_block(var, qual);
}
else
{
if (options.es && options.version < 320)
{
// Geometry and tessellation extensions imply this extension.
if (!has_extension("GL_EXT_geometry_shader") && !has_extension("GL_EXT_tessellation_shader"))
require_extension_internal("GL_EXT_shader_io_blocks");
}
// Workaround to make sure we can emit "patch in/out" correctly.
fixup_io_block_patch_primitive_qualifiers(var);
// Block names should never alias.
auto block_name = to_name(type.self, false);
// The namespace for I/O blocks is separate from other variables in GLSL.
auto &block_namespace = type.storage == StorageClassInput ? block_input_names : block_output_names;
// Shaders never use the block by interface name, so we don't
// have to track this other than updating name caches.
if (block_name.empty() || block_namespace.find(block_name) != end(block_namespace))
block_name = get_fallback_name(type.self);
else
block_namespace.insert(block_name);
// If for some reason buffer_name is an illegal name, make a final fallback to a workaround name.
// This cannot conflict with anything else, so we're safe now.
if (block_name.empty())
block_name = join("_", get<SPIRType>(var.basetype).self, "_", var.self);
// Instance names cannot alias block names.
resource_names.insert(block_name);
const char *block_qualifier;
if (has_decoration(var.self, DecorationPatch))
block_qualifier = "patch ";
else if (has_decoration(var.self, DecorationPerPrimitiveEXT))
block_qualifier = "perprimitiveEXT ";
else
block_qualifier = "";
statement(layout_for_variable(var), block_qualifier, qual, block_name);
begin_scope();
type.member_name_cache.clear();
uint32_t i = 0;
for (auto &member : type.member_types)
{
add_member_name(type, i);
emit_struct_member(type, member, i);
i++;
}
add_resource_name(var.self);
end_scope_decl(join(to_name(var.self), type_to_array_glsl(type)));
statement("");
}
}
else
{
// ESSL earlier than 310 and GLSL earlier than 150 did not support
// I/O variables which are struct types.
// To support this, flatten the struct into separate varyings instead.
if (type.basetype == SPIRType::Struct &&
(options.force_flattened_io_blocks || (options.es && options.version < 310) ||
(!options.es && options.version < 150)))
{
emit_flattened_io_block(var, qual);
}
else
{
add_resource_name(var.self);
// Legacy GLSL did not support int attributes, we automatically
// declare them as float and cast them on load/store
SPIRType newtype = type;
if (is_legacy() && var.storage == StorageClassInput && type.basetype == SPIRType::Int)
newtype.basetype = SPIRType::Float;
// Tessellation control and evaluation shaders must have either
// gl_MaxPatchVertices or unsized arrays for input arrays.
// Opt for unsized as it's the more "correct" variant to use.
if (type.storage == StorageClassInput && !type.array.empty() &&
!has_decoration(var.self, DecorationPatch) &&
(get_entry_point().model == ExecutionModelTessellationControl ||
get_entry_point().model == ExecutionModelTessellationEvaluation))
{
newtype.array.back() = 0;
newtype.array_size_literal.back() = true;
}
statement(layout_for_variable(var), to_qualifiers_glsl(var.self),
variable_decl(newtype, to_name(var.self), var.self), ";");
}
}
}
void CompilerGLSL::emit_uniform(const SPIRVariable &var)
{
auto &type = get<SPIRType>(var.basetype);
if (type.basetype == SPIRType::Image && type.image.sampled == 2 && type.image.dim != DimSubpassData)
{
if (!options.es && options.version < 420)
require_extension_internal("GL_ARB_shader_image_load_store");
else if (options.es && options.version < 310)
SPIRV_CROSS_THROW("At least ESSL 3.10 required for shader image load store.");
}
add_resource_name(var.self);
statement(layout_for_variable(var), variable_decl(var), ";");
}
string CompilerGLSL::constant_value_macro_name(uint32_t id)
{
return join("SPIRV_CROSS_CONSTANT_ID_", id);
}
void CompilerGLSL::emit_specialization_constant_op(const SPIRConstantOp &constant)
{
auto &type = get<SPIRType>(constant.basetype);
add_resource_name(constant.self);
auto name = to_name(constant.self);
statement("const ", variable_decl(type, name), " = ", constant_op_expression(constant), ";");
}
int CompilerGLSL::get_constant_mapping_to_workgroup_component(const SPIRConstant &c) const
{
auto &entry_point = get_entry_point();
int index = -1;
// Need to redirect specialization constants which are used as WorkGroupSize to the builtin,
// since the spec constant declarations are never explicitly declared.
if (entry_point.workgroup_size.constant == 0 && entry_point.flags.get(ExecutionModeLocalSizeId))
{
if (c.self == entry_point.workgroup_size.id_x)
index = 0;
else if (c.self == entry_point.workgroup_size.id_y)
index = 1;
else if (c.self == entry_point.workgroup_size.id_z)
index = 2;
}
return index;
}
void CompilerGLSL::emit_constant(const SPIRConstant &constant)
{
auto &type = get<SPIRType>(constant.constant_type);
SpecializationConstant wg_x, wg_y, wg_z;
ID workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z);
// This specialization constant is implicitly declared by emitting layout() in;
if (constant.self == workgroup_size_id)
return;
// These specialization constants are implicitly declared by emitting layout() in;
// In legacy GLSL, we will still need to emit macros for these, so a layout() in; declaration
// later can use macro overrides for work group size.
bool is_workgroup_size_constant = ConstantID(constant.self) == wg_x.id || ConstantID(constant.self) == wg_y.id ||
ConstantID(constant.self) == wg_z.id;
if (options.vulkan_semantics && is_workgroup_size_constant)
{
// Vulkan GLSL does not need to declare workgroup spec constants explicitly, it is handled in layout().
return;
}
else if (!options.vulkan_semantics && is_workgroup_size_constant &&
!has_decoration(constant.self, DecorationSpecId))
{
// Only bother declaring a workgroup size if it is actually a specialization constant, because we need macros.
return;
}
add_resource_name(constant.self);
auto name = to_name(constant.self);
// Only scalars have constant IDs.
if (has_decoration(constant.self, DecorationSpecId))
{
if (options.vulkan_semantics)
{
statement("layout(constant_id = ", get_decoration(constant.self, DecorationSpecId), ") const ",
variable_decl(type, name), " = ", constant_expression(constant), ";");
}
else
{
const string &macro_name = constant.specialization_constant_macro_name;
statement("#ifndef ", macro_name);
statement("#define ", macro_name, " ", constant_expression(constant));
statement("#endif");
// For workgroup size constants, only emit the macros.
if (!is_workgroup_size_constant)
statement("const ", variable_decl(type, name), " = ", macro_name, ";");
}
}
else
{
statement("const ", variable_decl(type, name), " = ", constant_expression(constant), ";");
}
}
void CompilerGLSL::emit_entry_point_declarations()
{
}
void CompilerGLSL::replace_illegal_names