| // |
| // Copyright (C) 2002-2005 3Dlabs Inc. Ltd. |
| // Copyright (C) 2012-2016 LunarG, Inc. |
| // Copyright (C) 2015-2016 Google, Inc. |
| // Copyright (C) 2017 ARM Limited. |
| // |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions |
| // are met: |
| // |
| // Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // |
| // Redistributions in binary form must reproduce the above |
| // copyright notice, this list of conditions and the following |
| // disclaimer in the documentation and/or other materials provided |
| // with the distribution. |
| // |
| // Neither the name of 3Dlabs Inc. Ltd. nor the names of its |
| // contributors may be used to endorse or promote products derived |
| // from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
| // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| // POSSIBILITY OF SUCH DAMAGE. |
| // |
| |
| // |
| // Create strings that declare built-in definitions, add built-ins programmatically |
| // that cannot be expressed in the strings, and establish mappings between |
| // built-in functions and operators. |
| // |
| // Where to put a built-in: |
| // TBuiltIns::initialize(version,profile) context-independent textual built-ins; add them to the right string |
| // TBuiltIns::initialize(resources,...) context-dependent textual built-ins; add them to the right string |
| // TBuiltIns::identifyBuiltIns(...,symbolTable) context-independent programmatic additions/mappings to the symbol table, |
| // including identifying what extensions are needed if a version does not allow a symbol |
| // TBuiltIns::identifyBuiltIns(...,symbolTable, resources) context-dependent programmatic additions/mappings to the symbol table, |
| // including identifying what extensions are needed if a version does not allow a symbol |
| // |
| |
| #include "../Include/intermediate.h" |
| #include "Initialize.h" |
| |
| namespace glslang { |
| |
| // TODO: ARB_Compatability: do full extension support |
| const bool ARBCompatibility = true; |
| |
| const bool ForwardCompatibility = false; |
| |
| // change this back to false if depending on textual spellings of texturing calls when consuming the AST |
| // Using PureOperatorBuiltins=false is deprecated. |
| bool PureOperatorBuiltins = true; |
| |
| inline bool IncludeLegacy(int version, EProfile profile, const SpvVersion& spvVersion) |
| { |
| return profile != EEsProfile && (version <= 130 || (spvVersion.spv == 0 && ARBCompatibility) || profile == ECompatibilityProfile); |
| } |
| |
| // Construct TBuiltInParseables base class. This can be used for language-common constructs. |
| TBuiltInParseables::TBuiltInParseables() |
| { |
| } |
| |
| // Destroy TBuiltInParseables. |
| TBuiltInParseables::~TBuiltInParseables() |
| { |
| } |
| |
| TBuiltIns::TBuiltIns() |
| { |
| // Set up textual representations for making all the permutations |
| // of texturing/imaging functions. |
| prefixes[EbtFloat] = ""; |
| prefixes[EbtInt8] = "i8"; |
| prefixes[EbtUint8] = "u8"; |
| prefixes[EbtInt16] = "i16"; |
| prefixes[EbtUint16] = "u16"; |
| prefixes[EbtInt] = "i"; |
| prefixes[EbtUint] = "u"; |
| postfixes[2] = "2"; |
| postfixes[3] = "3"; |
| postfixes[4] = "4"; |
| |
| // Map from symbolic class of texturing dimension to numeric dimensions. |
| dimMap[Esd1D] = 1; |
| dimMap[Esd2D] = 2; |
| dimMap[EsdRect] = 2; |
| dimMap[Esd3D] = 3; |
| dimMap[EsdCube] = 3; |
| dimMap[EsdBuffer] = 1; |
| dimMap[EsdSubpass] = 2; // potientially unused for now |
| } |
| |
| TBuiltIns::~TBuiltIns() |
| { |
| } |
| |
| |
| // |
| // Add all context-independent built-in functions and variables that are present |
| // for the given version and profile. Share common ones across stages, otherwise |
| // make stage-specific entries. |
| // |
| // Most built-ins variables can be added as simple text strings. Some need to |
| // be added programmatically, which is done later in IdentifyBuiltIns() below. |
| // |
| void TBuiltIns::initialize(int version, EProfile profile, const SpvVersion& spvVersion) |
| { |
| //============================================================================ |
| // |
| // Prototypes for built-in functions seen by both vertex and fragment shaders. |
| // |
| //============================================================================ |
| |
| // |
| // Angle and Trigonometric Functions. |
| // |
| commonBuiltins.append( |
| "float radians(float degrees);" |
| "vec2 radians(vec2 degrees);" |
| "vec3 radians(vec3 degrees);" |
| "vec4 radians(vec4 degrees);" |
| |
| "float degrees(float radians);" |
| "vec2 degrees(vec2 radians);" |
| "vec3 degrees(vec3 radians);" |
| "vec4 degrees(vec4 radians);" |
| |
| "float sin(float angle);" |
| "vec2 sin(vec2 angle);" |
| "vec3 sin(vec3 angle);" |
| "vec4 sin(vec4 angle);" |
| |
| "float cos(float angle);" |
| "vec2 cos(vec2 angle);" |
| "vec3 cos(vec3 angle);" |
| "vec4 cos(vec4 angle);" |
| |
| "float tan(float angle);" |
| "vec2 tan(vec2 angle);" |
| "vec3 tan(vec3 angle);" |
| "vec4 tan(vec4 angle);" |
| |
| "float asin(float x);" |
| "vec2 asin(vec2 x);" |
| "vec3 asin(vec3 x);" |
| "vec4 asin(vec4 x);" |
| |
| "float acos(float x);" |
| "vec2 acos(vec2 x);" |
| "vec3 acos(vec3 x);" |
| "vec4 acos(vec4 x);" |
| |
| "float atan(float y, float x);" |
| "vec2 atan(vec2 y, vec2 x);" |
| "vec3 atan(vec3 y, vec3 x);" |
| "vec4 atan(vec4 y, vec4 x);" |
| |
| "float atan(float y_over_x);" |
| "vec2 atan(vec2 y_over_x);" |
| "vec3 atan(vec3 y_over_x);" |
| "vec4 atan(vec4 y_over_x);" |
| |
| "\n"); |
| |
| if (version >= 130) { |
| commonBuiltins.append( |
| "float sinh(float angle);" |
| "vec2 sinh(vec2 angle);" |
| "vec3 sinh(vec3 angle);" |
| "vec4 sinh(vec4 angle);" |
| |
| "float cosh(float angle);" |
| "vec2 cosh(vec2 angle);" |
| "vec3 cosh(vec3 angle);" |
| "vec4 cosh(vec4 angle);" |
| |
| "float tanh(float angle);" |
| "vec2 tanh(vec2 angle);" |
| "vec3 tanh(vec3 angle);" |
| "vec4 tanh(vec4 angle);" |
| |
| "float asinh(float x);" |
| "vec2 asinh(vec2 x);" |
| "vec3 asinh(vec3 x);" |
| "vec4 asinh(vec4 x);" |
| |
| "float acosh(float x);" |
| "vec2 acosh(vec2 x);" |
| "vec3 acosh(vec3 x);" |
| "vec4 acosh(vec4 x);" |
| |
| "float atanh(float y_over_x);" |
| "vec2 atanh(vec2 y_over_x);" |
| "vec3 atanh(vec3 y_over_x);" |
| "vec4 atanh(vec4 y_over_x);" |
| |
| "\n"); |
| } |
| |
| // |
| // Exponential Functions. |
| // |
| commonBuiltins.append( |
| "float pow(float x, float y);" |
| "vec2 pow(vec2 x, vec2 y);" |
| "vec3 pow(vec3 x, vec3 y);" |
| "vec4 pow(vec4 x, vec4 y);" |
| |
| "float exp(float x);" |
| "vec2 exp(vec2 x);" |
| "vec3 exp(vec3 x);" |
| "vec4 exp(vec4 x);" |
| |
| "float log(float x);" |
| "vec2 log(vec2 x);" |
| "vec3 log(vec3 x);" |
| "vec4 log(vec4 x);" |
| |
| "float exp2(float x);" |
| "vec2 exp2(vec2 x);" |
| "vec3 exp2(vec3 x);" |
| "vec4 exp2(vec4 x);" |
| |
| "float log2(float x);" |
| "vec2 log2(vec2 x);" |
| "vec3 log2(vec3 x);" |
| "vec4 log2(vec4 x);" |
| |
| "float sqrt(float x);" |
| "vec2 sqrt(vec2 x);" |
| "vec3 sqrt(vec3 x);" |
| "vec4 sqrt(vec4 x);" |
| |
| "float inversesqrt(float x);" |
| "vec2 inversesqrt(vec2 x);" |
| "vec3 inversesqrt(vec3 x);" |
| "vec4 inversesqrt(vec4 x);" |
| |
| "\n"); |
| |
| // |
| // Common Functions. |
| // |
| commonBuiltins.append( |
| "float abs(float x);" |
| "vec2 abs(vec2 x);" |
| "vec3 abs(vec3 x);" |
| "vec4 abs(vec4 x);" |
| |
| "float sign(float x);" |
| "vec2 sign(vec2 x);" |
| "vec3 sign(vec3 x);" |
| "vec4 sign(vec4 x);" |
| |
| "float floor(float x);" |
| "vec2 floor(vec2 x);" |
| "vec3 floor(vec3 x);" |
| "vec4 floor(vec4 x);" |
| |
| "float ceil(float x);" |
| "vec2 ceil(vec2 x);" |
| "vec3 ceil(vec3 x);" |
| "vec4 ceil(vec4 x);" |
| |
| "float fract(float x);" |
| "vec2 fract(vec2 x);" |
| "vec3 fract(vec3 x);" |
| "vec4 fract(vec4 x);" |
| |
| "float mod(float x, float y);" |
| "vec2 mod(vec2 x, float y);" |
| "vec3 mod(vec3 x, float y);" |
| "vec4 mod(vec4 x, float y);" |
| "vec2 mod(vec2 x, vec2 y);" |
| "vec3 mod(vec3 x, vec3 y);" |
| "vec4 mod(vec4 x, vec4 y);" |
| |
| "float min(float x, float y);" |
| "vec2 min(vec2 x, float y);" |
| "vec3 min(vec3 x, float y);" |
| "vec4 min(vec4 x, float y);" |
| "vec2 min(vec2 x, vec2 y);" |
| "vec3 min(vec3 x, vec3 y);" |
| "vec4 min(vec4 x, vec4 y);" |
| |
| "float max(float x, float y);" |
| "vec2 max(vec2 x, float y);" |
| "vec3 max(vec3 x, float y);" |
| "vec4 max(vec4 x, float y);" |
| "vec2 max(vec2 x, vec2 y);" |
| "vec3 max(vec3 x, vec3 y);" |
| "vec4 max(vec4 x, vec4 y);" |
| |
| "float clamp(float x, float minVal, float maxVal);" |
| "vec2 clamp(vec2 x, float minVal, float maxVal);" |
| "vec3 clamp(vec3 x, float minVal, float maxVal);" |
| "vec4 clamp(vec4 x, float minVal, float maxVal);" |
| "vec2 clamp(vec2 x, vec2 minVal, vec2 maxVal);" |
| "vec3 clamp(vec3 x, vec3 minVal, vec3 maxVal);" |
| "vec4 clamp(vec4 x, vec4 minVal, vec4 maxVal);" |
| |
| "float mix(float x, float y, float a);" |
| "vec2 mix(vec2 x, vec2 y, float a);" |
| "vec3 mix(vec3 x, vec3 y, float a);" |
| "vec4 mix(vec4 x, vec4 y, float a);" |
| "vec2 mix(vec2 x, vec2 y, vec2 a);" |
| "vec3 mix(vec3 x, vec3 y, vec3 a);" |
| "vec4 mix(vec4 x, vec4 y, vec4 a);" |
| |
| "float step(float edge, float x);" |
| "vec2 step(vec2 edge, vec2 x);" |
| "vec3 step(vec3 edge, vec3 x);" |
| "vec4 step(vec4 edge, vec4 x);" |
| "vec2 step(float edge, vec2 x);" |
| "vec3 step(float edge, vec3 x);" |
| "vec4 step(float edge, vec4 x);" |
| |
| "float smoothstep(float edge0, float edge1, float x);" |
| "vec2 smoothstep(vec2 edge0, vec2 edge1, vec2 x);" |
| "vec3 smoothstep(vec3 edge0, vec3 edge1, vec3 x);" |
| "vec4 smoothstep(vec4 edge0, vec4 edge1, vec4 x);" |
| "vec2 smoothstep(float edge0, float edge1, vec2 x);" |
| "vec3 smoothstep(float edge0, float edge1, vec3 x);" |
| "vec4 smoothstep(float edge0, float edge1, vec4 x);" |
| |
| "\n"); |
| |
| if (version >= 130) { |
| commonBuiltins.append( |
| " int abs( int x);" |
| "ivec2 abs(ivec2 x);" |
| "ivec3 abs(ivec3 x);" |
| "ivec4 abs(ivec4 x);" |
| |
| " int sign( int x);" |
| "ivec2 sign(ivec2 x);" |
| "ivec3 sign(ivec3 x);" |
| "ivec4 sign(ivec4 x);" |
| |
| "float trunc(float x);" |
| "vec2 trunc(vec2 x);" |
| "vec3 trunc(vec3 x);" |
| "vec4 trunc(vec4 x);" |
| |
| "float round(float x);" |
| "vec2 round(vec2 x);" |
| "vec3 round(vec3 x);" |
| "vec4 round(vec4 x);" |
| |
| "float roundEven(float x);" |
| "vec2 roundEven(vec2 x);" |
| "vec3 roundEven(vec3 x);" |
| "vec4 roundEven(vec4 x);" |
| |
| "float modf(float, out float);" |
| "vec2 modf(vec2, out vec2 );" |
| "vec3 modf(vec3, out vec3 );" |
| "vec4 modf(vec4, out vec4 );" |
| |
| " int min(int x, int y);" |
| "ivec2 min(ivec2 x, int y);" |
| "ivec3 min(ivec3 x, int y);" |
| "ivec4 min(ivec4 x, int y);" |
| "ivec2 min(ivec2 x, ivec2 y);" |
| "ivec3 min(ivec3 x, ivec3 y);" |
| "ivec4 min(ivec4 x, ivec4 y);" |
| |
| " uint min(uint x, uint y);" |
| "uvec2 min(uvec2 x, uint y);" |
| "uvec3 min(uvec3 x, uint y);" |
| "uvec4 min(uvec4 x, uint y);" |
| "uvec2 min(uvec2 x, uvec2 y);" |
| "uvec3 min(uvec3 x, uvec3 y);" |
| "uvec4 min(uvec4 x, uvec4 y);" |
| |
| " int max(int x, int y);" |
| "ivec2 max(ivec2 x, int y);" |
| "ivec3 max(ivec3 x, int y);" |
| "ivec4 max(ivec4 x, int y);" |
| "ivec2 max(ivec2 x, ivec2 y);" |
| "ivec3 max(ivec3 x, ivec3 y);" |
| "ivec4 max(ivec4 x, ivec4 y);" |
| |
| " uint max(uint x, uint y);" |
| "uvec2 max(uvec2 x, uint y);" |
| "uvec3 max(uvec3 x, uint y);" |
| "uvec4 max(uvec4 x, uint y);" |
| "uvec2 max(uvec2 x, uvec2 y);" |
| "uvec3 max(uvec3 x, uvec3 y);" |
| "uvec4 max(uvec4 x, uvec4 y);" |
| |
| "int clamp(int x, int minVal, int maxVal);" |
| "ivec2 clamp(ivec2 x, int minVal, int maxVal);" |
| "ivec3 clamp(ivec3 x, int minVal, int maxVal);" |
| "ivec4 clamp(ivec4 x, int minVal, int maxVal);" |
| "ivec2 clamp(ivec2 x, ivec2 minVal, ivec2 maxVal);" |
| "ivec3 clamp(ivec3 x, ivec3 minVal, ivec3 maxVal);" |
| "ivec4 clamp(ivec4 x, ivec4 minVal, ivec4 maxVal);" |
| |
| "uint clamp(uint x, uint minVal, uint maxVal);" |
| "uvec2 clamp(uvec2 x, uint minVal, uint maxVal);" |
| "uvec3 clamp(uvec3 x, uint minVal, uint maxVal);" |
| "uvec4 clamp(uvec4 x, uint minVal, uint maxVal);" |
| "uvec2 clamp(uvec2 x, uvec2 minVal, uvec2 maxVal);" |
| "uvec3 clamp(uvec3 x, uvec3 minVal, uvec3 maxVal);" |
| "uvec4 clamp(uvec4 x, uvec4 minVal, uvec4 maxVal);" |
| |
| "float mix(float x, float y, bool a);" |
| "vec2 mix(vec2 x, vec2 y, bvec2 a);" |
| "vec3 mix(vec3 x, vec3 y, bvec3 a);" |
| "vec4 mix(vec4 x, vec4 y, bvec4 a);" |
| |
| "bool isnan(float x);" |
| "bvec2 isnan(vec2 x);" |
| "bvec3 isnan(vec3 x);" |
| "bvec4 isnan(vec4 x);" |
| |
| "bool isinf(float x);" |
| "bvec2 isinf(vec2 x);" |
| "bvec3 isinf(vec3 x);" |
| "bvec4 isinf(vec4 x);" |
| |
| "\n"); |
| } |
| |
| // |
| // double functions added to desktop 4.00, but not fma, frexp, ldexp, or pack/unpack |
| // |
| if (profile != EEsProfile && version >= 400) { |
| commonBuiltins.append( |
| |
| "double sqrt(double);" |
| "dvec2 sqrt(dvec2);" |
| "dvec3 sqrt(dvec3);" |
| "dvec4 sqrt(dvec4);" |
| |
| "double inversesqrt(double);" |
| "dvec2 inversesqrt(dvec2);" |
| "dvec3 inversesqrt(dvec3);" |
| "dvec4 inversesqrt(dvec4);" |
| |
| "double abs(double);" |
| "dvec2 abs(dvec2);" |
| "dvec3 abs(dvec3);" |
| "dvec4 abs(dvec4);" |
| |
| "double sign(double);" |
| "dvec2 sign(dvec2);" |
| "dvec3 sign(dvec3);" |
| "dvec4 sign(dvec4);" |
| |
| "double floor(double);" |
| "dvec2 floor(dvec2);" |
| "dvec3 floor(dvec3);" |
| "dvec4 floor(dvec4);" |
| |
| "double trunc(double);" |
| "dvec2 trunc(dvec2);" |
| "dvec3 trunc(dvec3);" |
| "dvec4 trunc(dvec4);" |
| |
| "double round(double);" |
| "dvec2 round(dvec2);" |
| "dvec3 round(dvec3);" |
| "dvec4 round(dvec4);" |
| |
| "double roundEven(double);" |
| "dvec2 roundEven(dvec2);" |
| "dvec3 roundEven(dvec3);" |
| "dvec4 roundEven(dvec4);" |
| |
| "double ceil(double);" |
| "dvec2 ceil(dvec2);" |
| "dvec3 ceil(dvec3);" |
| "dvec4 ceil(dvec4);" |
| |
| "double fract(double);" |
| "dvec2 fract(dvec2);" |
| "dvec3 fract(dvec3);" |
| "dvec4 fract(dvec4);" |
| |
| "double mod(double, double);" |
| "dvec2 mod(dvec2 , double);" |
| "dvec3 mod(dvec3 , double);" |
| "dvec4 mod(dvec4 , double);" |
| "dvec2 mod(dvec2 , dvec2);" |
| "dvec3 mod(dvec3 , dvec3);" |
| "dvec4 mod(dvec4 , dvec4);" |
| |
| "double modf(double, out double);" |
| "dvec2 modf(dvec2, out dvec2);" |
| "dvec3 modf(dvec3, out dvec3);" |
| "dvec4 modf(dvec4, out dvec4);" |
| |
| "double min(double, double);" |
| "dvec2 min(dvec2, double);" |
| "dvec3 min(dvec3, double);" |
| "dvec4 min(dvec4, double);" |
| "dvec2 min(dvec2, dvec2);" |
| "dvec3 min(dvec3, dvec3);" |
| "dvec4 min(dvec4, dvec4);" |
| |
| "double max(double, double);" |
| "dvec2 max(dvec2 , double);" |
| "dvec3 max(dvec3 , double);" |
| "dvec4 max(dvec4 , double);" |
| "dvec2 max(dvec2 , dvec2);" |
| "dvec3 max(dvec3 , dvec3);" |
| "dvec4 max(dvec4 , dvec4);" |
| |
| "double clamp(double, double, double);" |
| "dvec2 clamp(dvec2 , double, double);" |
| "dvec3 clamp(dvec3 , double, double);" |
| "dvec4 clamp(dvec4 , double, double);" |
| "dvec2 clamp(dvec2 , dvec2 , dvec2);" |
| "dvec3 clamp(dvec3 , dvec3 , dvec3);" |
| "dvec4 clamp(dvec4 , dvec4 , dvec4);" |
| |
| "double mix(double, double, double);" |
| "dvec2 mix(dvec2, dvec2, double);" |
| "dvec3 mix(dvec3, dvec3, double);" |
| "dvec4 mix(dvec4, dvec4, double);" |
| "dvec2 mix(dvec2, dvec2, dvec2);" |
| "dvec3 mix(dvec3, dvec3, dvec3);" |
| "dvec4 mix(dvec4, dvec4, dvec4);" |
| "double mix(double, double, bool);" |
| "dvec2 mix(dvec2, dvec2, bvec2);" |
| "dvec3 mix(dvec3, dvec3, bvec3);" |
| "dvec4 mix(dvec4, dvec4, bvec4);" |
| |
| "double step(double, double);" |
| "dvec2 step(dvec2 , dvec2);" |
| "dvec3 step(dvec3 , dvec3);" |
| "dvec4 step(dvec4 , dvec4);" |
| "dvec2 step(double, dvec2);" |
| "dvec3 step(double, dvec3);" |
| "dvec4 step(double, dvec4);" |
| |
| "double smoothstep(double, double, double);" |
| "dvec2 smoothstep(dvec2 , dvec2 , dvec2);" |
| "dvec3 smoothstep(dvec3 , dvec3 , dvec3);" |
| "dvec4 smoothstep(dvec4 , dvec4 , dvec4);" |
| "dvec2 smoothstep(double, double, dvec2);" |
| "dvec3 smoothstep(double, double, dvec3);" |
| "dvec4 smoothstep(double, double, dvec4);" |
| |
| "bool isnan(double);" |
| "bvec2 isnan(dvec2);" |
| "bvec3 isnan(dvec3);" |
| "bvec4 isnan(dvec4);" |
| |
| "bool isinf(double);" |
| "bvec2 isinf(dvec2);" |
| "bvec3 isinf(dvec3);" |
| "bvec4 isinf(dvec4);" |
| |
| "double length(double);" |
| "double length(dvec2);" |
| "double length(dvec3);" |
| "double length(dvec4);" |
| |
| "double distance(double, double);" |
| "double distance(dvec2 , dvec2);" |
| "double distance(dvec3 , dvec3);" |
| "double distance(dvec4 , dvec4);" |
| |
| "double dot(double, double);" |
| "double dot(dvec2 , dvec2);" |
| "double dot(dvec3 , dvec3);" |
| "double dot(dvec4 , dvec4);" |
| |
| "dvec3 cross(dvec3, dvec3);" |
| |
| "double normalize(double);" |
| "dvec2 normalize(dvec2);" |
| "dvec3 normalize(dvec3);" |
| "dvec4 normalize(dvec4);" |
| |
| "double faceforward(double, double, double);" |
| "dvec2 faceforward(dvec2, dvec2, dvec2);" |
| "dvec3 faceforward(dvec3, dvec3, dvec3);" |
| "dvec4 faceforward(dvec4, dvec4, dvec4);" |
| |
| "double reflect(double, double);" |
| "dvec2 reflect(dvec2 , dvec2 );" |
| "dvec3 reflect(dvec3 , dvec3 );" |
| "dvec4 reflect(dvec4 , dvec4 );" |
| |
| "double refract(double, double, double);" |
| "dvec2 refract(dvec2 , dvec2 , double);" |
| "dvec3 refract(dvec3 , dvec3 , double);" |
| "dvec4 refract(dvec4 , dvec4 , double);" |
| |
| "dmat2 matrixCompMult(dmat2, dmat2);" |
| "dmat3 matrixCompMult(dmat3, dmat3);" |
| "dmat4 matrixCompMult(dmat4, dmat4);" |
| "dmat2x3 matrixCompMult(dmat2x3, dmat2x3);" |
| "dmat2x4 matrixCompMult(dmat2x4, dmat2x4);" |
| "dmat3x2 matrixCompMult(dmat3x2, dmat3x2);" |
| "dmat3x4 matrixCompMult(dmat3x4, dmat3x4);" |
| "dmat4x2 matrixCompMult(dmat4x2, dmat4x2);" |
| "dmat4x3 matrixCompMult(dmat4x3, dmat4x3);" |
| |
| "dmat2 outerProduct(dvec2, dvec2);" |
| "dmat3 outerProduct(dvec3, dvec3);" |
| "dmat4 outerProduct(dvec4, dvec4);" |
| "dmat2x3 outerProduct(dvec3, dvec2);" |
| "dmat3x2 outerProduct(dvec2, dvec3);" |
| "dmat2x4 outerProduct(dvec4, dvec2);" |
| "dmat4x2 outerProduct(dvec2, dvec4);" |
| "dmat3x4 outerProduct(dvec4, dvec3);" |
| "dmat4x3 outerProduct(dvec3, dvec4);" |
| |
| "dmat2 transpose(dmat2);" |
| "dmat3 transpose(dmat3);" |
| "dmat4 transpose(dmat4);" |
| "dmat2x3 transpose(dmat3x2);" |
| "dmat3x2 transpose(dmat2x3);" |
| "dmat2x4 transpose(dmat4x2);" |
| "dmat4x2 transpose(dmat2x4);" |
| "dmat3x4 transpose(dmat4x3);" |
| "dmat4x3 transpose(dmat3x4);" |
| |
| "double determinant(dmat2);" |
| "double determinant(dmat3);" |
| "double determinant(dmat4);" |
| |
| "dmat2 inverse(dmat2);" |
| "dmat3 inverse(dmat3);" |
| "dmat4 inverse(dmat4);" |
| |
| "bvec2 lessThan(dvec2, dvec2);" |
| "bvec3 lessThan(dvec3, dvec3);" |
| "bvec4 lessThan(dvec4, dvec4);" |
| |
| "bvec2 lessThanEqual(dvec2, dvec2);" |
| "bvec3 lessThanEqual(dvec3, dvec3);" |
| "bvec4 lessThanEqual(dvec4, dvec4);" |
| |
| "bvec2 greaterThan(dvec2, dvec2);" |
| "bvec3 greaterThan(dvec3, dvec3);" |
| "bvec4 greaterThan(dvec4, dvec4);" |
| |
| "bvec2 greaterThanEqual(dvec2, dvec2);" |
| "bvec3 greaterThanEqual(dvec3, dvec3);" |
| "bvec4 greaterThanEqual(dvec4, dvec4);" |
| |
| "bvec2 equal(dvec2, dvec2);" |
| "bvec3 equal(dvec3, dvec3);" |
| "bvec4 equal(dvec4, dvec4);" |
| |
| "bvec2 notEqual(dvec2, dvec2);" |
| "bvec3 notEqual(dvec3, dvec3);" |
| "bvec4 notEqual(dvec4, dvec4);" |
| |
| "\n"); |
| } |
| |
| if (profile != EEsProfile && version >= 450) { |
| commonBuiltins.append( |
| |
| "int64_t abs(int64_t);" |
| "i64vec2 abs(i64vec2);" |
| "i64vec3 abs(i64vec3);" |
| "i64vec4 abs(i64vec4);" |
| |
| "int64_t sign(int64_t);" |
| "i64vec2 sign(i64vec2);" |
| "i64vec3 sign(i64vec3);" |
| "i64vec4 sign(i64vec4);" |
| |
| "int64_t min(int64_t, int64_t);" |
| "i64vec2 min(i64vec2, int64_t);" |
| "i64vec3 min(i64vec3, int64_t);" |
| "i64vec4 min(i64vec4, int64_t);" |
| "i64vec2 min(i64vec2, i64vec2);" |
| "i64vec3 min(i64vec3, i64vec3);" |
| "i64vec4 min(i64vec4, i64vec4);" |
| "uint64_t min(uint64_t, uint64_t);" |
| "u64vec2 min(u64vec2, uint64_t);" |
| "u64vec3 min(u64vec3, uint64_t);" |
| "u64vec4 min(u64vec4, uint64_t);" |
| "u64vec2 min(u64vec2, u64vec2);" |
| "u64vec3 min(u64vec3, u64vec3);" |
| "u64vec4 min(u64vec4, u64vec4);" |
| |
| "int64_t max(int64_t, int64_t);" |
| "i64vec2 max(i64vec2, int64_t);" |
| "i64vec3 max(i64vec3, int64_t);" |
| "i64vec4 max(i64vec4, int64_t);" |
| "i64vec2 max(i64vec2, i64vec2);" |
| "i64vec3 max(i64vec3, i64vec3);" |
| "i64vec4 max(i64vec4, i64vec4);" |
| "uint64_t max(uint64_t, uint64_t);" |
| "u64vec2 max(u64vec2, uint64_t);" |
| "u64vec3 max(u64vec3, uint64_t);" |
| "u64vec4 max(u64vec4, uint64_t);" |
| "u64vec2 max(u64vec2, u64vec2);" |
| "u64vec3 max(u64vec3, u64vec3);" |
| "u64vec4 max(u64vec4, u64vec4);" |
| |
| "int64_t clamp(int64_t, int64_t, int64_t);" |
| "i64vec2 clamp(i64vec2, int64_t, int64_t);" |
| "i64vec3 clamp(i64vec3, int64_t, int64_t);" |
| "i64vec4 clamp(i64vec4, int64_t, int64_t);" |
| "i64vec2 clamp(i64vec2, i64vec2, i64vec2);" |
| "i64vec3 clamp(i64vec3, i64vec3, i64vec3);" |
| "i64vec4 clamp(i64vec4, i64vec4, i64vec4);" |
| "uint64_t clamp(uint64_t, uint64_t, uint64_t);" |
| "u64vec2 clamp(u64vec2, uint64_t, uint64_t);" |
| "u64vec3 clamp(u64vec3, uint64_t, uint64_t);" |
| "u64vec4 clamp(u64vec4, uint64_t, uint64_t);" |
| "u64vec2 clamp(u64vec2, u64vec2, u64vec2);" |
| "u64vec3 clamp(u64vec3, u64vec3, u64vec3);" |
| "u64vec4 clamp(u64vec4, u64vec4, u64vec4);" |
| |
| "int64_t mix(int64_t, int64_t, bool);" |
| "i64vec2 mix(i64vec2, i64vec2, bvec2);" |
| "i64vec3 mix(i64vec3, i64vec3, bvec3);" |
| "i64vec4 mix(i64vec4, i64vec4, bvec4);" |
| "uint64_t mix(uint64_t, uint64_t, bool);" |
| "u64vec2 mix(u64vec2, u64vec2, bvec2);" |
| "u64vec3 mix(u64vec3, u64vec3, bvec3);" |
| "u64vec4 mix(u64vec4, u64vec4, bvec4);" |
| |
| "int64_t doubleBitsToInt64(double);" |
| "i64vec2 doubleBitsToInt64(dvec2);" |
| "i64vec3 doubleBitsToInt64(dvec3);" |
| "i64vec4 doubleBitsToInt64(dvec4);" |
| |
| "uint64_t doubleBitsToUint64(double);" |
| "u64vec2 doubleBitsToUint64(dvec2);" |
| "u64vec3 doubleBitsToUint64(dvec3);" |
| "u64vec4 doubleBitsToUint64(dvec4);" |
| |
| "double int64BitsToDouble(int64_t);" |
| "dvec2 int64BitsToDouble(i64vec2);" |
| "dvec3 int64BitsToDouble(i64vec3);" |
| "dvec4 int64BitsToDouble(i64vec4);" |
| |
| "double uint64BitsToDouble(uint64_t);" |
| "dvec2 uint64BitsToDouble(u64vec2);" |
| "dvec3 uint64BitsToDouble(u64vec3);" |
| "dvec4 uint64BitsToDouble(u64vec4);" |
| |
| "int64_t packInt2x32(ivec2);" |
| "uint64_t packUint2x32(uvec2);" |
| "ivec2 unpackInt2x32(int64_t);" |
| "uvec2 unpackUint2x32(uint64_t);" |
| |
| "bvec2 lessThan(i64vec2, i64vec2);" |
| "bvec3 lessThan(i64vec3, i64vec3);" |
| "bvec4 lessThan(i64vec4, i64vec4);" |
| "bvec2 lessThan(u64vec2, u64vec2);" |
| "bvec3 lessThan(u64vec3, u64vec3);" |
| "bvec4 lessThan(u64vec4, u64vec4);" |
| |
| "bvec2 lessThanEqual(i64vec2, i64vec2);" |
| "bvec3 lessThanEqual(i64vec3, i64vec3);" |
| "bvec4 lessThanEqual(i64vec4, i64vec4);" |
| "bvec2 lessThanEqual(u64vec2, u64vec2);" |
| "bvec3 lessThanEqual(u64vec3, u64vec3);" |
| "bvec4 lessThanEqual(u64vec4, u64vec4);" |
| |
| "bvec2 greaterThan(i64vec2, i64vec2);" |
| "bvec3 greaterThan(i64vec3, i64vec3);" |
| "bvec4 greaterThan(i64vec4, i64vec4);" |
| "bvec2 greaterThan(u64vec2, u64vec2);" |
| "bvec3 greaterThan(u64vec3, u64vec3);" |
| "bvec4 greaterThan(u64vec4, u64vec4);" |
| |
| "bvec2 greaterThanEqual(i64vec2, i64vec2);" |
| "bvec3 greaterThanEqual(i64vec3, i64vec3);" |
| "bvec4 greaterThanEqual(i64vec4, i64vec4);" |
| "bvec2 greaterThanEqual(u64vec2, u64vec2);" |
| "bvec3 greaterThanEqual(u64vec3, u64vec3);" |
| "bvec4 greaterThanEqual(u64vec4, u64vec4);" |
| |
| "bvec2 equal(i64vec2, i64vec2);" |
| "bvec3 equal(i64vec3, i64vec3);" |
| "bvec4 equal(i64vec4, i64vec4);" |
| "bvec2 equal(u64vec2, u64vec2);" |
| "bvec3 equal(u64vec3, u64vec3);" |
| "bvec4 equal(u64vec4, u64vec4);" |
| |
| "bvec2 notEqual(i64vec2, i64vec2);" |
| "bvec3 notEqual(i64vec3, i64vec3);" |
| "bvec4 notEqual(i64vec4, i64vec4);" |
| "bvec2 notEqual(u64vec2, u64vec2);" |
| "bvec3 notEqual(u64vec3, u64vec3);" |
| "bvec4 notEqual(u64vec4, u64vec4);" |
| |
| "int findLSB(int64_t);" |
| "ivec2 findLSB(i64vec2);" |
| "ivec3 findLSB(i64vec3);" |
| "ivec4 findLSB(i64vec4);" |
| |
| "int findLSB(uint64_t);" |
| "ivec2 findLSB(u64vec2);" |
| "ivec3 findLSB(u64vec3);" |
| "ivec4 findLSB(u64vec4);" |
| |
| "int findMSB(int64_t);" |
| "ivec2 findMSB(i64vec2);" |
| "ivec3 findMSB(i64vec3);" |
| "ivec4 findMSB(i64vec4);" |
| |
| "int findMSB(uint64_t);" |
| "ivec2 findMSB(u64vec2);" |
| "ivec3 findMSB(u64vec3);" |
| "ivec4 findMSB(u64vec4);" |
| |
| "\n" |
| ); |
| } |
| |
| #ifdef AMD_EXTENSIONS |
| // GL_AMD_shader_trinary_minmax |
| if (profile != EEsProfile && version >= 430) { |
| commonBuiltins.append( |
| "float min3(float, float, float);" |
| "vec2 min3(vec2, vec2, vec2);" |
| "vec3 min3(vec3, vec3, vec3);" |
| "vec4 min3(vec4, vec4, vec4);" |
| |
| "int min3(int, int, int);" |
| "ivec2 min3(ivec2, ivec2, ivec2);" |
| "ivec3 min3(ivec3, ivec3, ivec3);" |
| "ivec4 min3(ivec4, ivec4, ivec4);" |
| |
| "uint min3(uint, uint, uint);" |
| "uvec2 min3(uvec2, uvec2, uvec2);" |
| "uvec3 min3(uvec3, uvec3, uvec3);" |
| "uvec4 min3(uvec4, uvec4, uvec4);" |
| |
| "float max3(float, float, float);" |
| "vec2 max3(vec2, vec2, vec2);" |
| "vec3 max3(vec3, vec3, vec3);" |
| "vec4 max3(vec4, vec4, vec4);" |
| |
| "int max3(int, int, int);" |
| "ivec2 max3(ivec2, ivec2, ivec2);" |
| "ivec3 max3(ivec3, ivec3, ivec3);" |
| "ivec4 max3(ivec4, ivec4, ivec4);" |
| |
| "uint max3(uint, uint, uint);" |
| "uvec2 max3(uvec2, uvec2, uvec2);" |
| "uvec3 max3(uvec3, uvec3, uvec3);" |
| "uvec4 max3(uvec4, uvec4, uvec4);" |
| |
| "float mid3(float, float, float);" |
| "vec2 mid3(vec2, vec2, vec2);" |
| "vec3 mid3(vec3, vec3, vec3);" |
| "vec4 mid3(vec4, vec4, vec4);" |
| |
| "int mid3(int, int, int);" |
| "ivec2 mid3(ivec2, ivec2, ivec2);" |
| "ivec3 mid3(ivec3, ivec3, ivec3);" |
| "ivec4 mid3(ivec4, ivec4, ivec4);" |
| |
| "uint mid3(uint, uint, uint);" |
| "uvec2 mid3(uvec2, uvec2, uvec2);" |
| "uvec3 mid3(uvec3, uvec3, uvec3);" |
| "uvec4 mid3(uvec4, uvec4, uvec4);" |
| |
| "float16_t min3(float16_t, float16_t, float16_t);" |
| "f16vec2 min3(f16vec2, f16vec2, f16vec2);" |
| "f16vec3 min3(f16vec3, f16vec3, f16vec3);" |
| "f16vec4 min3(f16vec4, f16vec4, f16vec4);" |
| |
| "float16_t max3(float16_t, float16_t, float16_t);" |
| "f16vec2 max3(f16vec2, f16vec2, f16vec2);" |
| "f16vec3 max3(f16vec3, f16vec3, f16vec3);" |
| "f16vec4 max3(f16vec4, f16vec4, f16vec4);" |
| |
| "float16_t mid3(float16_t, float16_t, float16_t);" |
| "f16vec2 mid3(f16vec2, f16vec2, f16vec2);" |
| "f16vec3 mid3(f16vec3, f16vec3, f16vec3);" |
| "f16vec4 mid3(f16vec4, f16vec4, f16vec4);" |
| |
| "\n" |
| ); |
| } |
| #endif |
| |
| if ((profile == EEsProfile && version >= 310) || |
| (profile != EEsProfile && version >= 430)) { |
| commonBuiltins.append( |
| "uint atomicAdd(coherent volatile inout uint, uint);" |
| " int atomicAdd(coherent volatile inout int, int);" |
| |
| "uint atomicMin(coherent volatile inout uint, uint);" |
| " int atomicMin(coherent volatile inout int, int);" |
| |
| "uint atomicMax(coherent volatile inout uint, uint);" |
| " int atomicMax(coherent volatile inout int, int);" |
| |
| "uint atomicAnd(coherent volatile inout uint, uint);" |
| " int atomicAnd(coherent volatile inout int, int);" |
| |
| "uint atomicOr (coherent volatile inout uint, uint);" |
| " int atomicOr (coherent volatile inout int, int);" |
| |
| "uint atomicXor(coherent volatile inout uint, uint);" |
| " int atomicXor(coherent volatile inout int, int);" |
| |
| "uint atomicExchange(coherent volatile inout uint, uint);" |
| " int atomicExchange(coherent volatile inout int, int);" |
| |
| "uint atomicCompSwap(coherent volatile inout uint, uint, uint);" |
| " int atomicCompSwap(coherent volatile inout int, int, int);" |
| |
| "\n"); |
| } |
| |
| #ifdef NV_EXTENSIONS |
| if (profile != EEsProfile && version >= 440) { |
| commonBuiltins.append( |
| "uint64_t atomicMin(coherent volatile inout uint64_t, uint64_t);" |
| " int64_t atomicMin(coherent volatile inout int64_t, int64_t);" |
| |
| "uint64_t atomicMax(coherent volatile inout uint64_t, uint64_t);" |
| " int64_t atomicMax(coherent volatile inout int64_t, int64_t);" |
| |
| "uint64_t atomicAnd(coherent volatile inout uint64_t, uint64_t);" |
| " int64_t atomicAnd(coherent volatile inout int64_t, int64_t);" |
| |
| "uint64_t atomicOr (coherent volatile inout uint64_t, uint64_t);" |
| " int64_t atomicOr (coherent volatile inout int64_t, int64_t);" |
| |
| "uint64_t atomicXor(coherent volatile inout uint64_t, uint64_t);" |
| " int64_t atomicXor(coherent volatile inout int64_t, int64_t);" |
| |
| " int64_t atomicAdd(coherent volatile inout int64_t, int64_t);" |
| " int64_t atomicExchange(coherent volatile inout int64_t, int64_t);" |
| " int64_t atomicCompSwap(coherent volatile inout int64_t, int64_t, int64_t);" |
| |
| "\n"); |
| } |
| #endif |
| |
| if ((profile == EEsProfile && version >= 310) || |
| (profile != EEsProfile && version >= 450)) { |
| commonBuiltins.append( |
| "int mix(int x, int y, bool a);" |
| "ivec2 mix(ivec2 x, ivec2 y, bvec2 a);" |
| "ivec3 mix(ivec3 x, ivec3 y, bvec3 a);" |
| "ivec4 mix(ivec4 x, ivec4 y, bvec4 a);" |
| |
| "uint mix(uint x, uint y, bool a);" |
| "uvec2 mix(uvec2 x, uvec2 y, bvec2 a);" |
| "uvec3 mix(uvec3 x, uvec3 y, bvec3 a);" |
| "uvec4 mix(uvec4 x, uvec4 y, bvec4 a);" |
| |
| "bool mix(bool x, bool y, bool a);" |
| "bvec2 mix(bvec2 x, bvec2 y, bvec2 a);" |
| "bvec3 mix(bvec3 x, bvec3 y, bvec3 a);" |
| "bvec4 mix(bvec4 x, bvec4 y, bvec4 a);" |
| |
| "\n"); |
| } |
| |
| if ((profile == EEsProfile && version >= 300) || |
| (profile != EEsProfile && version >= 330)) { |
| commonBuiltins.append( |
| "int floatBitsToInt(highp float value);" |
| "ivec2 floatBitsToInt(highp vec2 value);" |
| "ivec3 floatBitsToInt(highp vec3 value);" |
| "ivec4 floatBitsToInt(highp vec4 value);" |
| |
| "uint floatBitsToUint(highp float value);" |
| "uvec2 floatBitsToUint(highp vec2 value);" |
| "uvec3 floatBitsToUint(highp vec3 value);" |
| "uvec4 floatBitsToUint(highp vec4 value);" |
| |
| "float intBitsToFloat(highp int value);" |
| "vec2 intBitsToFloat(highp ivec2 value);" |
| "vec3 intBitsToFloat(highp ivec3 value);" |
| "vec4 intBitsToFloat(highp ivec4 value);" |
| |
| "float uintBitsToFloat(highp uint value);" |
| "vec2 uintBitsToFloat(highp uvec2 value);" |
| "vec3 uintBitsToFloat(highp uvec3 value);" |
| "vec4 uintBitsToFloat(highp uvec4 value);" |
| |
| "\n"); |
| } |
| |
| if ((profile != EEsProfile && version >= 400) || |
| (profile == EEsProfile && version >= 310)) { // GL_OES_gpu_shader5 |
| |
| commonBuiltins.append( |
| "float fma(float, float, float );" |
| "vec2 fma(vec2, vec2, vec2 );" |
| "vec3 fma(vec3, vec3, vec3 );" |
| "vec4 fma(vec4, vec4, vec4 );" |
| "\n"); |
| |
| if (profile != EEsProfile) { |
| commonBuiltins.append( |
| "double fma(double, double, double);" |
| "dvec2 fma(dvec2, dvec2, dvec2 );" |
| "dvec3 fma(dvec3, dvec3, dvec3 );" |
| "dvec4 fma(dvec4, dvec4, dvec4 );" |
| "\n"); |
| } |
| } |
| |
| if ((profile == EEsProfile && version >= 310) || |
| (profile != EEsProfile && version >= 400)) { |
| commonBuiltins.append( |
| "float frexp(highp float, out highp int);" |
| "vec2 frexp(highp vec2, out highp ivec2);" |
| "vec3 frexp(highp vec3, out highp ivec3);" |
| "vec4 frexp(highp vec4, out highp ivec4);" |
| |
| "float ldexp(highp float, highp int);" |
| "vec2 ldexp(highp vec2, highp ivec2);" |
| "vec3 ldexp(highp vec3, highp ivec3);" |
| "vec4 ldexp(highp vec4, highp ivec4);" |
| |
| "\n"); |
| } |
| |
| if (profile != EEsProfile && version >= 400) { |
| commonBuiltins.append( |
| "double frexp(double, out int);" |
| "dvec2 frexp( dvec2, out ivec2);" |
| "dvec3 frexp( dvec3, out ivec3);" |
| "dvec4 frexp( dvec4, out ivec4);" |
| |
| "double ldexp(double, int);" |
| "dvec2 ldexp( dvec2, ivec2);" |
| "dvec3 ldexp( dvec3, ivec3);" |
| "dvec4 ldexp( dvec4, ivec4);" |
| |
| "double packDouble2x32(uvec2);" |
| "uvec2 unpackDouble2x32(double);" |
| |
| "\n"); |
| } |
| |
| if ((profile == EEsProfile && version >= 300) || |
| (profile != EEsProfile && version >= 400)) { |
| commonBuiltins.append( |
| "highp uint packUnorm2x16(vec2);" |
| "vec2 unpackUnorm2x16(highp uint);" |
| "\n"); |
| } |
| |
| if ((profile == EEsProfile && version >= 300) || |
| (profile != EEsProfile && version >= 420)) { |
| commonBuiltins.append( |
| "highp uint packSnorm2x16(vec2);" |
| " vec2 unpackSnorm2x16(highp uint);" |
| "highp uint packHalf2x16(vec2);" |
| "\n"); |
| } |
| |
| if (profile == EEsProfile && version >= 300) { |
| commonBuiltins.append( |
| "mediump vec2 unpackHalf2x16(highp uint);" |
| "\n"); |
| } else if (profile != EEsProfile && version >= 420) { |
| commonBuiltins.append( |
| " vec2 unpackHalf2x16(highp uint);" |
| "\n"); |
| } |
| |
| if ((profile == EEsProfile && version >= 310) || |
| (profile != EEsProfile && version >= 400)) { |
| commonBuiltins.append( |
| "highp uint packSnorm4x8(vec4);" |
| "highp uint packUnorm4x8(vec4);" |
| "\n"); |
| } |
| |
| if (profile == EEsProfile && version >= 310) { |
| commonBuiltins.append( |
| "mediump vec4 unpackSnorm4x8(highp uint);" |
| "mediump vec4 unpackUnorm4x8(highp uint);" |
| "\n"); |
| } else if (profile != EEsProfile && version >= 400) { |
| commonBuiltins.append( |
| "vec4 unpackSnorm4x8(highp uint);" |
| "vec4 unpackUnorm4x8(highp uint);" |
| "\n"); |
| } |
| |
| // |
| // Geometric Functions. |
| // |
| commonBuiltins.append( |
| "float length(float x);" |
| "float length(vec2 x);" |
| "float length(vec3 x);" |
| "float length(vec4 x);" |
| |
| "float distance(float p0, float p1);" |
| "float distance(vec2 p0, vec2 p1);" |
| "float distance(vec3 p0, vec3 p1);" |
| "float distance(vec4 p0, vec4 p1);" |
| |
| "float dot(float x, float y);" |
| "float dot(vec2 x, vec2 y);" |
| "float dot(vec3 x, vec3 y);" |
| "float dot(vec4 x, vec4 y);" |
| |
| "vec3 cross(vec3 x, vec3 y);" |
| "float normalize(float x);" |
| "vec2 normalize(vec2 x);" |
| "vec3 normalize(vec3 x);" |
| "vec4 normalize(vec4 x);" |
| |
| "float faceforward(float N, float I, float Nref);" |
| "vec2 faceforward(vec2 N, vec2 I, vec2 Nref);" |
| "vec3 faceforward(vec3 N, vec3 I, vec3 Nref);" |
| "vec4 faceforward(vec4 N, vec4 I, vec4 Nref);" |
| |
| "float reflect(float I, float N);" |
| "vec2 reflect(vec2 I, vec2 N);" |
| "vec3 reflect(vec3 I, vec3 N);" |
| "vec4 reflect(vec4 I, vec4 N);" |
| |
| "float refract(float I, float N, float eta);" |
| "vec2 refract(vec2 I, vec2 N, float eta);" |
| "vec3 refract(vec3 I, vec3 N, float eta);" |
| "vec4 refract(vec4 I, vec4 N, float eta);" |
| |
| "\n"); |
| |
| // |
| // Matrix Functions. |
| // |
| commonBuiltins.append( |
| "mat2 matrixCompMult(mat2 x, mat2 y);" |
| "mat3 matrixCompMult(mat3 x, mat3 y);" |
| "mat4 matrixCompMult(mat4 x, mat4 y);" |
| |
| "\n"); |
| |
| // 120 is correct for both ES and desktop |
| if (version >= 120) { |
| commonBuiltins.append( |
| "mat2 outerProduct(vec2 c, vec2 r);" |
| "mat3 outerProduct(vec3 c, vec3 r);" |
| "mat4 outerProduct(vec4 c, vec4 r);" |
| "mat2x3 outerProduct(vec3 c, vec2 r);" |
| "mat3x2 outerProduct(vec2 c, vec3 r);" |
| "mat2x4 outerProduct(vec4 c, vec2 r);" |
| "mat4x2 outerProduct(vec2 c, vec4 r);" |
| "mat3x4 outerProduct(vec4 c, vec3 r);" |
| "mat4x3 outerProduct(vec3 c, vec4 r);" |
| |
| "mat2 transpose(mat2 m);" |
| "mat3 transpose(mat3 m);" |
| "mat4 transpose(mat4 m);" |
| "mat2x3 transpose(mat3x2 m);" |
| "mat3x2 transpose(mat2x3 m);" |
| "mat2x4 transpose(mat4x2 m);" |
| "mat4x2 transpose(mat2x4 m);" |
| "mat3x4 transpose(mat4x3 m);" |
| "mat4x3 transpose(mat3x4 m);" |
| |
| "mat2x3 matrixCompMult(mat2x3, mat2x3);" |
| "mat2x4 matrixCompMult(mat2x4, mat2x4);" |
| "mat3x2 matrixCompMult(mat3x2, mat3x2);" |
| "mat3x4 matrixCompMult(mat3x4, mat3x4);" |
| "mat4x2 matrixCompMult(mat4x2, mat4x2);" |
| "mat4x3 matrixCompMult(mat4x3, mat4x3);" |
| |
| "\n"); |
| |
| // 150 is correct for both ES and desktop |
| if (version >= 150) { |
| commonBuiltins.append( |
| "float determinant(mat2 m);" |
| "float determinant(mat3 m);" |
| "float determinant(mat4 m);" |
| |
| "mat2 inverse(mat2 m);" |
| "mat3 inverse(mat3 m);" |
| "mat4 inverse(mat4 m);" |
| |
| "\n"); |
| } |
| } |
| |
| // |
| // Vector relational functions. |
| // |
| commonBuiltins.append( |
| "bvec2 lessThan(vec2 x, vec2 y);" |
| "bvec3 lessThan(vec3 x, vec3 y);" |
| "bvec4 lessThan(vec4 x, vec4 y);" |
| |
| "bvec2 lessThan(ivec2 x, ivec2 y);" |
| "bvec3 lessThan(ivec3 x, ivec3 y);" |
| "bvec4 lessThan(ivec4 x, ivec4 y);" |
| |
| "bvec2 lessThanEqual(vec2 x, vec2 y);" |
| "bvec3 lessThanEqual(vec3 x, vec3 y);" |
| "bvec4 lessThanEqual(vec4 x, vec4 y);" |
| |
| "bvec2 lessThanEqual(ivec2 x, ivec2 y);" |
| "bvec3 lessThanEqual(ivec3 x, ivec3 y);" |
| "bvec4 lessThanEqual(ivec4 x, ivec4 y);" |
| |
| "bvec2 greaterThan(vec2 x, vec2 y);" |
| "bvec3 greaterThan(vec3 x, vec3 y);" |
| "bvec4 greaterThan(vec4 x, vec4 y);" |
| |
| "bvec2 greaterThan(ivec2 x, ivec2 y);" |
| "bvec3 greaterThan(ivec3 x, ivec3 y);" |
| "bvec4 greaterThan(ivec4 x, ivec4 y);" |
| |
| "bvec2 greaterThanEqual(vec2 x, vec2 y);" |
| "bvec3 greaterThanEqual(vec3 x, vec3 y);" |
| "bvec4 greaterThanEqual(vec4 x, vec4 y);" |
| |
| "bvec2 greaterThanEqual(ivec2 x, ivec2 y);" |
| "bvec3 greaterThanEqual(ivec3 x, ivec3 y);" |
| "bvec4 greaterThanEqual(ivec4 x, ivec4 y);" |
| |
| "bvec2 equal(vec2 x, vec2 y);" |
| "bvec3 equal(vec3 x, vec3 y);" |
| "bvec4 equal(vec4 x, vec4 y);" |
| |
| "bvec2 equal(ivec2 x, ivec2 y);" |
| "bvec3 equal(ivec3 x, ivec3 y);" |
| "bvec4 equal(ivec4 x, ivec4 y);" |
| |
| "bvec2 equal(bvec2 x, bvec2 y);" |
| "bvec3 equal(bvec3 x, bvec3 y);" |
| "bvec4 equal(bvec4 x, bvec4 y);" |
| |
| "bvec2 notEqual(vec2 x, vec2 y);" |
| "bvec3 notEqual(vec3 x, vec3 y);" |
| "bvec4 notEqual(vec4 x, vec4 y);" |
| |
| "bvec2 notEqual(ivec2 x, ivec2 y);" |
| "bvec3 notEqual(ivec3 x, ivec3 y);" |
| "bvec4 notEqual(ivec4 x, ivec4 y);" |
| |
| "bvec2 notEqual(bvec2 x, bvec2 y);" |
| "bvec3 notEqual(bvec3 x, bvec3 y);" |
| "bvec4 notEqual(bvec4 x, bvec4 y);" |
| |
| "bool any(bvec2 x);" |
| "bool any(bvec3 x);" |
| "bool any(bvec4 x);" |
| |
| "bool all(bvec2 x);" |
| "bool all(bvec3 x);" |
| "bool all(bvec4 x);" |
| |
| "bvec2 not(bvec2 x);" |
| "bvec3 not(bvec3 x);" |
| "bvec4 not(bvec4 x);" |
| |
| "\n"); |
| |
| if (version >= 130) { |
| commonBuiltins.append( |
| "bvec2 lessThan(uvec2 x, uvec2 y);" |
| "bvec3 lessThan(uvec3 x, uvec3 y);" |
| "bvec4 lessThan(uvec4 x, uvec4 y);" |
| |
| "bvec2 lessThanEqual(uvec2 x, uvec2 y);" |
| "bvec3 lessThanEqual(uvec3 x, uvec3 y);" |
| "bvec4 lessThanEqual(uvec4 x, uvec4 y);" |
| |
| "bvec2 greaterThan(uvec2 x, uvec2 y);" |
| "bvec3 greaterThan(uvec3 x, uvec3 y);" |
| "bvec4 greaterThan(uvec4 x, uvec4 y);" |
| |
| "bvec2 greaterThanEqual(uvec2 x, uvec2 y);" |
| "bvec3 greaterThanEqual(uvec3 x, uvec3 y);" |
| "bvec4 greaterThanEqual(uvec4 x, uvec4 y);" |
| |
| "bvec2 equal(uvec2 x, uvec2 y);" |
| "bvec3 equal(uvec3 x, uvec3 y);" |
| "bvec4 equal(uvec4 x, uvec4 y);" |
| |
| "bvec2 notEqual(uvec2 x, uvec2 y);" |
| "bvec3 notEqual(uvec3 x, uvec3 y);" |
| "bvec4 notEqual(uvec4 x, uvec4 y);" |
| |
| "\n"); |
| } |
| |
| // |
| // Original-style texture functions existing in all stages. |
| // (Per-stage functions below.) |
| // |
| if ((profile == EEsProfile && version == 100) || |
| profile == ECompatibilityProfile || |
| (profile == ECoreProfile && version < 420) || |
| profile == ENoProfile) { |
| if (spvVersion.spv == 0) { |
| commonBuiltins.append( |
| "vec4 texture2D(sampler2D, vec2);" |
| |
| "vec4 texture2DProj(sampler2D, vec3);" |
| "vec4 texture2DProj(sampler2D, vec4);" |
| |
| "vec4 texture3D(sampler3D, vec3);" // OES_texture_3D, but caught by keyword check |
| "vec4 texture3DProj(sampler3D, vec4);" // OES_texture_3D, but caught by keyword check |
| |
| "vec4 textureCube(samplerCube, vec3);" |
| |
| "\n"); |
| } |
| } |
| |
| if ( profile == ECompatibilityProfile || |
| (profile == ECoreProfile && version < 420) || |
| profile == ENoProfile) { |
| if (spvVersion.spv == 0) { |
| commonBuiltins.append( |
| "vec4 texture1D(sampler1D, float);" |
| |
| "vec4 texture1DProj(sampler1D, vec2);" |
| "vec4 texture1DProj(sampler1D, vec4);" |
| |
| "vec4 shadow1D(sampler1DShadow, vec3);" |
| "vec4 shadow2D(sampler2DShadow, vec3);" |
| "vec4 shadow1DProj(sampler1DShadow, vec4);" |
| "vec4 shadow2DProj(sampler2DShadow, vec4);" |
| |
| "vec4 texture2DRect(sampler2DRect, vec2);" // GL_ARB_texture_rectangle, caught by keyword check |
| "vec4 texture2DRectProj(sampler2DRect, vec3);" // GL_ARB_texture_rectangle, caught by keyword check |
| "vec4 texture2DRectProj(sampler2DRect, vec4);" // GL_ARB_texture_rectangle, caught by keyword check |
| "vec4 shadow2DRect(sampler2DRectShadow, vec3);" // GL_ARB_texture_rectangle, caught by keyword check |
| "vec4 shadow2DRectProj(sampler2DRectShadow, vec4);" // GL_ARB_texture_rectangle, caught by keyword check |
| |
| "\n"); |
| } |
| } |
| |
| if (profile == EEsProfile) { |
| if (spvVersion.spv == 0) { |
| if (version < 300) { |
| commonBuiltins.append( |
| "vec4 texture2D(samplerExternalOES, vec2 coord);" // GL_OES_EGL_image_external |
| "vec4 texture2DProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external |
| "vec4 texture2DProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external |
| "\n"); |
| } else { |
| commonBuiltins.append( |
| "highp ivec2 textureSize(samplerExternalOES, int lod);" // GL_OES_EGL_image_external_essl3 |
| "vec4 texture(samplerExternalOES, vec2);" // GL_OES_EGL_image_external_essl3 |
| "vec4 texture(samplerExternalOES, vec2, float bias);" // GL_OES_EGL_image_external_essl3 |
| "vec4 textureProj(samplerExternalOES, vec3);" // GL_OES_EGL_image_external_essl3 |
| "vec4 textureProj(samplerExternalOES, vec3, float bias);" // GL_OES_EGL_image_external_essl3 |
| "vec4 textureProj(samplerExternalOES, vec4);" // GL_OES_EGL_image_external_essl3 |
| "vec4 textureProj(samplerExternalOES, vec4, float bias);" // GL_OES_EGL_image_external_essl3 |
| "vec4 texelFetch(samplerExternalOES, ivec2, int lod);" // GL_OES_EGL_image_external_essl3 |
| "\n"); |
| } |
| commonBuiltins.append( |
| "vec4 texture2DGradEXT(sampler2D, vec2, vec2, vec2);" // GL_EXT_shader_texture_lod |
| "vec4 texture2DProjGradEXT(sampler2D, vec3, vec2, vec2);" // GL_EXT_shader_texture_lod |
| "vec4 texture2DProjGradEXT(sampler2D, vec4, vec2, vec2);" // GL_EXT_shader_texture_lod |
| "vec4 textureCubeGradEXT(samplerCube, vec3, vec3, vec3);" // GL_EXT_shader_texture_lod |
| |
| "float shadow2DEXT(sampler2DShadow, vec3);" // GL_EXT_shadow_samplers |
| "float shadow2DProjEXT(sampler2DShadow, vec4);" // GL_EXT_shadow_samplers |
| |
| "\n"); |
| } |
| } |
| |
| // |
| // Noise functions. |
| // |
| if (profile != EEsProfile) { |
| commonBuiltins.append( |
| "float noise1(float x);" |
| "float noise1(vec2 x);" |
| "float noise1(vec3 x);" |
| "float noise1(vec4 x);" |
| |
| "vec2 noise2(float x);" |
| "vec2 noise2(vec2 x);" |
| "vec2 noise2(vec3 x);" |
| "vec2 noise2(vec4 x);" |
| |
| "vec3 noise3(float x);" |
| "vec3 noise3(vec2 x);" |
| "vec3 noise3(vec3 x);" |
| "vec3 noise3(vec4 x);" |
| |
| "vec4 noise4(float x);" |
| "vec4 noise4(vec2 x);" |
| "vec4 noise4(vec3 x);" |
| "vec4 noise4(vec4 x);" |
| |
| "\n"); |
| } |
| |
| if (spvVersion.vulkan == 0) { |
| // |
| // Atomic counter functions. |
| // |
| if ((profile != EEsProfile && version >= 300) || |
| (profile == EEsProfile && version >= 310)) { |
| commonBuiltins.append( |
| "uint atomicCounterIncrement(atomic_uint);" |
| "uint atomicCounterDecrement(atomic_uint);" |
| "uint atomicCounter(atomic_uint);" |
| |
| "\n"); |
| } |
| if (profile != EEsProfile && version >= 460) { |
| commonBuiltins.append( |
| "uint atomicCounterAdd(atomic_uint, uint);" |
| "uint atomicCounterSubtract(atomic_uint, uint);" |
| "uint atomicCounterMin(atomic_uint, uint);" |
| "uint atomicCounterMax(atomic_uint, uint);" |
| "uint atomicCounterAnd(atomic_uint, uint);" |
| "uint atomicCounterOr(atomic_uint, uint);" |
| "uint atomicCounterXor(atomic_uint, uint);" |
| "uint atomicCounterExchange(atomic_uint, uint);" |
| "uint atomicCounterCompSwap(atomic_uint, uint, uint);" |
| |
| "\n"); |
| } |
| } |
| |
| // Bitfield |
| if ((profile == EEsProfile && version >= 310) || |
| (profile != EEsProfile && version >= 400)) { |
| commonBuiltins.append( |
| " int bitfieldExtract( int, int, int);" |
| "ivec2 bitfieldExtract(ivec2, int, int);" |
| "ivec3 bitfieldExtract(ivec3, int, int);" |
| "ivec4 bitfieldExtract(ivec4, int, int);" |
| |
| " uint bitfieldExtract( uint, int, int);" |
| "uvec2 bitfieldExtract(uvec2, int, int);" |
| "uvec3 bitfieldExtract(uvec3, int, int);" |
| "uvec4 bitfieldExtract(uvec4, int, int);" |
| |
| " int bitfieldInsert( int base, int, int, int);" |
| "ivec2 bitfieldInsert(ivec2 base, ivec2, int, int);" |
| "ivec3 bitfieldInsert(ivec3 base, ivec3, int, int);" |
| "ivec4 bitfieldInsert(ivec4 base, ivec4, int, int);" |
| |
| " uint bitfieldInsert( uint base, uint, int, int);" |
| "uvec2 bitfieldInsert(uvec2 base, uvec2, int, int);" |
| "uvec3 bitfieldInsert(uvec3 base, uvec3, int, int);" |
| "uvec4 bitfieldInsert(uvec4 base, uvec4, int, int);" |
| |
| "\n"); |
| } |
| |
| if (profile != EEsProfile && version >= 400) { |
| commonBuiltins.append( |
| " int findLSB( int);" |
| "ivec2 findLSB(ivec2);" |
| "ivec3 findLSB(ivec3);" |
| "ivec4 findLSB(ivec4);" |
| |
| " int findLSB( uint);" |
| "ivec2 findLSB(uvec2);" |
| "ivec3 findLSB(uvec3);" |
| "ivec4 findLSB(uvec4);" |
| |
| "\n"); |
| } else if (profile == EEsProfile && version >= 310) { |
| commonBuiltins.append( |
| "lowp int findLSB( int);" |
| "lowp ivec2 findLSB(ivec2);" |
| "lowp ivec3 findLSB(ivec3);" |
| "lowp ivec4 findLSB(ivec4);" |
| |
| "lowp int findLSB( uint);" |
| "lowp ivec2 findLSB(uvec2);" |
| "lowp ivec3 findLSB(uvec3);" |
| "lowp ivec4 findLSB(uvec4);" |
| |
| "\n"); |
| } |
| |
| if (profile != EEsProfile && version >= 400) { |
| commonBuiltins.append( |
| " int bitCount( int);" |
| "ivec2 bitCount(ivec2);" |
| "ivec3 bitCount(ivec3);" |
| "ivec4 bitCount(ivec4);" |
| |
| " int bitCount( uint);" |
| "ivec2 bitCount(uvec2);" |
| "ivec3 bitCount(uvec3);" |
| "ivec4 bitCount(uvec4);" |
| |
| " int findMSB(highp int);" |
| "ivec2 findMSB(highp ivec2);" |
| "ivec3 findMSB(highp ivec3);" |
| "ivec4 findMSB(highp ivec4);" |
| |
| " int findMSB(highp uint);" |
| "ivec2 findMSB(highp uvec2);" |
| "ivec3 findMSB(highp uvec3);" |
| "ivec4 findMSB(highp uvec4);" |
| |
| "\n"); |
| } |
| |
| if ((profile == EEsProfile && version >= 310) || |
| (profile != EEsProfile && version >= 400)) { |
| commonBuiltins.append( |
| " uint uaddCarry(highp uint, highp uint, out lowp uint carry);" |
| "uvec2 uaddCarry(highp uvec2, highp uvec2, out lowp uvec2 carry);" |
| "uvec3 uaddCarry(highp uvec3, highp uvec3, out lowp uvec3 carry);" |
| "uvec4 uaddCarry(highp uvec4, highp uvec4, out lowp uvec4 carry);" |
| |
| " uint usubBorrow(highp uint, highp uint, out lowp uint borrow);" |
| "uvec2 usubBorrow(highp uvec2, highp uvec2, out lowp uvec2 borrow);" |
| "uvec3 usubBorrow(highp uvec3, highp uvec3, out lowp uvec3 borrow);" |
| "uvec4 usubBorrow(highp uvec4, highp uvec4, out lowp uvec4 borrow);" |
| |
| "void umulExtended(highp uint, highp uint, out highp uint, out highp uint lsb);" |
| "void umulExtended(highp uvec2, highp uvec2, out highp uvec2, out highp uvec2 lsb);" |
| "void umulExtended(highp uvec3, highp uvec3, out highp uvec3, out highp uvec3 lsb);" |
| "void umulExtended(highp uvec4, highp uvec4, out highp uvec4, out highp uvec4 lsb);" |
| |
| "void imulExtended(highp int, highp int, out highp int, out highp int lsb);" |
| "void imulExtended(highp ivec2, highp ivec2, out highp ivec2, out highp ivec2 lsb);" |
| "void imulExtended(highp ivec3, highp ivec3, out highp ivec3, out highp ivec3 lsb);" |
| "void imulExtended(highp ivec4, highp ivec4, out highp ivec4, out highp ivec4 lsb);" |
| |
| " int bitfieldReverse(highp int);" |
| "ivec2 bitfieldReverse(highp ivec2);" |
| "ivec3 bitfieldReverse(highp ivec3);" |
| "ivec4 bitfieldReverse(highp ivec4);" |
| |
| " uint bitfieldReverse(highp uint);" |
| "uvec2 bitfieldReverse(highp uvec2);" |
| "uvec3 bitfieldReverse(highp uvec3);" |
| "uvec4 bitfieldReverse(highp uvec4);" |
| |
| "\n"); |
| } |
| |
| if (profile == EEsProfile && version >= 310) { |
| commonBuiltins.append( |
| "lowp int bitCount( int);" |
| "lowp ivec2 bitCount(ivec2);" |
| "lowp ivec3 bitCount(ivec3);" |
| "lowp ivec4 bitCount(ivec4);" |
| |
| "lowp int bitCount( uint);" |
| "lowp ivec2 bitCount(uvec2);" |
| "lowp ivec3 bitCount(uvec3);" |
| "lowp ivec4 bitCount(uvec4);" |
| |
| "lowp int findMSB(highp int);" |
| "lowp ivec2 findMSB(highp ivec2);" |
| "lowp ivec3 findMSB(highp ivec3);" |
| "lowp ivec4 findMSB(highp ivec4);" |
| |
| "lowp int findMSB(highp uint);" |
| "lowp ivec2 findMSB(highp uvec2);" |
| "lowp ivec3 findMSB(highp uvec3);" |
| "lowp ivec4 findMSB(highp uvec4);" |
| |
| "\n"); |
| } |
| |
| // GL_ARB_shader_ballot |
| if (profile != EEsProfile && version >= 450) { |
| commonBuiltins.append( |
| "uint64_t ballotARB(bool);" |
| |
| "float readInvocationARB(float, uint);" |
| "vec2 readInvocationARB(vec2, uint);" |
| "vec3 readInvocationARB(vec3, uint);" |
| "vec4 readInvocationARB(vec4, uint);" |
| |
| "int readInvocationARB(int, uint);" |
| "ivec2 readInvocationARB(ivec2, uint);" |
| "ivec3 readInvocationARB(ivec3, uint);" |
| "ivec4 readInvocationARB(ivec4, uint);" |
| |
| "uint readInvocationARB(uint, uint);" |
| "uvec2 readInvocationARB(uvec2, uint);" |
| "uvec3 readInvocationARB(uvec3, uint);" |
| "uvec4 readInvocationARB(uvec4, uint);" |
| |
| "float readFirstInvocationARB(float);" |
| "vec2 readFirstInvocationARB(vec2);" |
| "vec3 readFirstInvocationARB(vec3);" |
| "vec4 readFirstInvocationARB(vec4);" |
| |
| "int readFirstInvocationARB(int);" |
| "ivec2 readFirstInvocationARB(ivec2);" |
| "ivec3 readFirstInvocationARB(ivec3);" |
| "ivec4 readFirstInvocationARB(ivec4);" |
| |
| "uint readFirstInvocationARB(uint);" |
| "uvec2 readFirstInvocationARB(uvec2);" |
| "uvec3 readFirstInvocationARB(uvec3);" |
| "uvec4 readFirstInvocationARB(uvec4);" |
| |
| "\n"); |
| } |
| |
| // GL_ARB_shader_group_vote |
| if (profile != EEsProfile && version >= 430) { |
| commonBuiltins.append( |
| "bool anyInvocationARB(bool);" |
| "bool allInvocationsARB(bool);" |
| "bool allInvocationsEqualARB(bool);" |
| |
| "\n"); |
| } |
| |
| // GL_KHR_shader_subgroup |
| if (spvVersion.vulkan >= 100) { |
| commonBuiltins.append( |
| "void subgroupBarrier();" |
| "void subgroupMemoryBarrier();" |
| "void subgroupMemoryBarrierBuffer();" |
| "void subgroupMemoryBarrierImage();" |
| "bool subgroupElect();" |
| |
| "bool subgroupAll(bool);\n" |
| "bool subgroupAny(bool);\n" |
| |
| "bool subgroupAllEqual(float);\n" |
| "bool subgroupAllEqual(vec2);\n" |
| "bool subgroupAllEqual(vec3);\n" |
| "bool subgroupAllEqual(vec4);\n" |
| "bool subgroupAllEqual(int);\n" |
| "bool subgroupAllEqual(ivec2);\n" |
| "bool subgroupAllEqual(ivec3);\n" |
| "bool subgroupAllEqual(ivec4);\n" |
| "bool subgroupAllEqual(uint);\n" |
| "bool subgroupAllEqual(uvec2);\n" |
| "bool subgroupAllEqual(uvec3);\n" |
| "bool subgroupAllEqual(uvec4);\n" |
| "bool subgroupAllEqual(bool);\n" |
| "bool subgroupAllEqual(bvec2);\n" |
| "bool subgroupAllEqual(bvec3);\n" |
| "bool subgroupAllEqual(bvec4);\n" |
| |
| "float subgroupBroadcast(float, uint);\n" |
| "vec2 subgroupBroadcast(vec2, uint);\n" |
| "vec3 subgroupBroadcast(vec3, uint);\n" |
| "vec4 subgroupBroadcast(vec4, uint);\n" |
| "int subgroupBroadcast(int, uint);\n" |
| "ivec2 subgroupBroadcast(ivec2, uint);\n" |
| "ivec3 subgroupBroadcast(ivec3, uint);\n" |
| "ivec4 subgroupBroadcast(ivec4, uint);\n" |
| "uint subgroupBroadcast(uint, uint);\n" |
| "uvec2 subgroupBroadcast(uvec2, uint);\n" |
| "uvec3 subgroupBroadcast(uvec3, uint);\n" |
| "uvec4 subgroupBroadcast(uvec4, uint);\n" |
| "bool subgroupBroadcast(bool, uint);\n" |
| "bvec2 subgroupBroadcast(bvec2, uint);\n" |
| "bvec3 subgroupBroadcast(bvec3, uint);\n" |
| "bvec4 subgroupBroadcast(bvec4, uint);\n" |
| |
| "float subgroupBroadcastFirst(float);\n" |
| "vec2 subgroupBroadcastFirst(vec2);\n" |
| "vec3 subgroupBroadcastFirst(vec3);\n" |
| "vec4 subgroupBroadcastFirst(vec4);\n" |
| "int subgroupBroadcastFirst(int);\n" |
| "ivec2 subgroupBroadcastFirst(ivec2);\n" |
| "ivec3 subgroupBroadcastFirst(ivec3);\n" |
| "ivec4 subgroupBroadcastFirst(ivec4);\n" |
| "uint subgroupBroadcastFirst(uint);\n" |
| "uvec2 subgroupBroadcastFirst(uvec2);\n" |
| "uvec3 subgroupBroadcastFirst(uvec3);\n" |
| "uvec4 subgroupBroadcastFirst(uvec4);\n" |
| "bool subgroupBroadcastFirst(bool);\n" |
| "bvec2 subgroupBroadcastFirst(bvec2);\n" |
| "bvec3 subgroupBroadcastFirst(bvec3);\n" |
| "bvec4 subgroupBroadcastFirst(bvec4);\n" |
| |
| "uvec4 subgroupBallot(bool);\n" |
| "bool subgroupInverseBallot(uvec4);\n" |
| "bool subgroupBallotBitExtract(uvec4, uint);\n" |
| "uint subgroupBallotBitCount(uvec4);\n" |
| "uint subgroupBallotInclusiveBitCount(uvec4);\n" |
| "uint subgroupBallotExclusiveBitCount(uvec4);\n" |
| "uint subgroupBallotFindLSB(uvec4);\n" |
| "uint subgroupBallotFindMSB(uvec4);\n" |
| |
| "float subgroupShuffle(float, uint);\n" |
| "vec2 subgroupShuffle(vec2, uint);\n" |
| "vec3 subgroupShuffle(vec3, uint);\n" |
| "vec4 subgroupShuffle(vec4, uint);\n" |
| "int subgroupShuffle(int, uint);\n" |
| "ivec2 subgroupShuffle(ivec2, uint);\n" |
| "ivec3 subgroupShuffle(ivec3, uint);\n" |
| "ivec4 subgroupShuffle(ivec4, uint);\n" |
| "uint subgroupShuffle(uint, uint);\n" |
| "uvec2 subgroupShuffle(uvec2, uint);\n" |
| "uvec3 subgroupShuffle(uvec3, uint);\n" |
| "uvec4 subgroupShuffle(uvec4, uint);\n" |
| "bool subgroupShuffle(bool, uint);\n" |
| "bvec2 subgroupShuffle(bvec2, uint);\n" |
| "bvec3 subgroupShuffle(bvec3, uint);\n" |
| "bvec4 subgroupShuffle(bvec4, uint);\n" |
| |
| "float subgroupShuffleXor(float, uint);\n" |
| "vec2 subgroupShuffleXor(vec2, uint);\n" |
| "vec3 subgroupShuffleXor(vec3, uint);\n" |
| "vec4 subgroupShuffleXor(vec4, uint);\n" |
| "int subgroupShuffleXor(int, uint);\n" |
| "ivec2 subgroupShuffleXor(ivec2, uint);\n" |
| "ivec3 subgroupShuffleXor(ivec3, uint);\n" |
| "ivec4 subgroupShuffleXor(ivec4, uint);\n" |
| "uint subgroupShuffleXor(uint, uint);\n" |
| "uvec2 subgroupShuffleXor(uvec2, uint);\n" |
| "uvec3 subgroupShuffleXor(uvec3, uint);\n" |
| "uvec4 subgroupShuffleXor(uvec4, uint);\n" |
| "bool subgroupShuffleXor(bool, uint);\n" |
| "bvec2 subgroupShuffleXor(bvec2, uint);\n" |
| "bvec3 subgroupShuffleXor(bvec3, uint);\n" |
| "bvec4 subgroupShuffleXor(bvec4, uint);\n" |
| |
| "float subgroupShuffleUp(float, uint delta);\n" |
| "vec2 subgroupShuffleUp(vec2, uint delta);\n" |
| "vec3 subgroupShuffleUp(vec3, uint delta);\n" |
| "vec4 subgroupShuffleUp(vec4, uint delta);\n" |
| "int subgroupShuffleUp(int, uint delta);\n" |
| "ivec2 subgroupShuffleUp(ivec2, uint delta);\n" |
| "ivec3 subgroupShuffleUp(ivec3, uint delta);\n" |
| "ivec4 subgroupShuffleUp(ivec4, uint delta);\n" |
| "uint subgroupShuffleUp(uint, uint delta);\n" |
| "uvec2 subgroupShuffleUp(uvec2, uint delta);\n" |
| "uvec3 subgroupShuffleUp(uvec3, uint delta);\n" |
| "uvec4 subgroupShuffleUp(uvec4, uint delta);\n" |
| "bool subgroupShuffleUp(bool, uint delta);\n" |
| "bvec2 subgroupShuffleUp(bvec2, uint delta);\n" |
| "bvec3 subgroupShuffleUp(bvec3, uint delta);\n" |
| "bvec4 subgroupShuffleUp(bvec4, uint delta);\n" |
| |
| "float subgroupShuffleDown(float, uint delta);\n" |
| "vec2 subgroupShuffleDown(vec2, uint delta);\n" |
| "vec3 subgroupShuffleDown(vec3, uint delta);\n" |
| "vec4 subgroupShuffleDown(vec4, uint delta);\n" |
| "int subgroupShuffleDown(int, uint delta);\n" |
| "ivec2 subgroupShuffleDown(ivec2, uint delta);\n" |
| "ivec3 subgroupShuffleDown(ivec3, uint delta);\n" |
| "ivec4 subgroupShuffleDown(ivec4, uint delta);\n" |
| "uint subgroupShuffleDown(uint, uint delta);\n" |
| "uvec2 subgroupShuffleDown(uvec2, uint delta);\n" |
| "uvec3 subgroupShuffleDown(uvec3, uint delta);\n" |
| "uvec4 subgroupShuffleDown(uvec4, uint delta);\n" |
| "bool subgroupShuffleDown(bool, uint delta);\n" |
| "bvec2 subgroupShuffleDown(bvec2, uint delta);\n" |
| "bvec3 subgroupShuffleDown(bvec3, uint delta);\n" |
| "bvec4 subgroupShuffleDown(bvec4, uint delta);\n" |
| |
| "float subgroupAdd(float);\n" |
| "vec2 subgroupAdd(vec2);\n" |
| "vec3 subgroupAdd(vec3);\n" |
| "vec4 subgroupAdd(vec4);\n" |
| "int subgroupAdd(int);\n" |
| "ivec2 subgroupAdd(ivec2);\n" |
| "ivec3 subgroupAdd(ivec3);\n" |
| "ivec4 subgroupAdd(ivec4);\n" |
| "uint subgroupAdd(uint);\n" |
| "uvec2 subgroupAdd(uvec2);\n" |
| "uvec3 subgroupAdd(uvec3);\n" |
| "uvec4 subgroupAdd(uvec4);\n" |
| |
| "float subgroupMul(float);\n" |
| "vec2 subgroupMul(vec2);\n" |
| "vec3 subgroupMul(vec3);\n" |
| "vec4 subgroupMul(vec4);\n" |
| "int subgroupMul(int);\n" |
| "ivec2 subgroupMul(ivec2);\n" |
| "ivec3 subgroupMul(ivec3);\n" |
| "ivec4 subgroupMul(ivec4);\n" |
| "uint subgroupMul(uint);\n" |
| "uvec2 subgroupMul(uvec2);\n" |
| "uvec3 subgroupMul(uvec3);\n" |
| "uvec4 subgroupMul(uvec4);\n" |
| |
| "float subgroupMin(float);\n" |
| "vec2 subgroupMin(vec2);\n" |
| "vec3 subgroupMin(vec3);\n" |
| "vec4 subgroupMin(vec4);\n" |
| "int subgroupMin(int);\n" |
| "ivec2 subgroupMin(ivec2);\n" |
| "ivec3 subgroupMin(ivec3);\n" |
| "ivec4 subgroupMin(ivec4);\n" |
| "uint subgroupMin(uint);\n" |
| "uvec2 subgroupMin(uvec2);\n" |
| "uvec3 subgroupMin(uvec3);\n" |
| "uvec4 subgroupMin(uvec4);\n" |
| |
| "float subgroupMax(float);\n" |
| "vec2 subgroupMax(vec2);\n" |
| "vec3 subgroupMax(vec3);\n" |
| "vec4 subgroupMax(vec4);\n" |
| "int subgroupMax(int);\n" |
| "ivec2 subgroupMax(ivec2);\n" |
| "ivec3 subgroupMax(ivec3);\n" |
| "ivec4 subgroupMax(ivec4);\n" |
| "uint subgroupMax(uint);\n" |
| "uvec2 subgroupMax(uvec2);\n" |
| "uvec3 subgroupMax(uvec3);\n" |
| "uvec4 subgroupMax(uvec4);\n" |
| |
| "int subgroupAnd(int);\n" |
| "ivec2 subgroupAnd(ivec2);\n" |
| "ivec3 subgroupAnd(ivec3);\n" |
| "ivec4 subgroupAnd(ivec4);\n" |
| "uint subgroupAnd(uint);\n" |
| "uvec2 subgroupAnd(uvec2);\n" |
| "uvec3 subgroupAnd(uvec3);\n" |
| "uvec4 subgroupAnd(uvec4);\n" |
| "bool subgroupAnd(bool);\n" |
| "bvec2 subgroupAnd(bvec2);\n" |
| "bvec3 subgroupAnd(bvec3);\n" |
| "bvec4 subgroupAnd(bvec4);\n" |
| |
| "int subgroupOr(int);\n" |
| "ivec2 subgroupOr(ivec2);\n" |
| "ivec3 subgroupOr(ivec3);\n" |
| "ivec4 subgroupOr(ivec4);\n" |
| "uint subgroupOr(uint);\n" |
| "uvec2 subgroupOr(uvec2);\n" |
| "uvec3 subgroupOr(uvec3);\n" |
| "uvec4 subgroupOr(uvec4);\n" |
| "bool subgroupOr(bool);\n" |
| "bvec2 subgroupOr(bvec2);\n" |
| "bvec3 subgroupOr(bvec3);\n" |
| "bvec4 subgroupOr(bvec4);\n" |
| |
| "int subgroupXor(int);\n" |
| "ivec2 subgroupXor(ivec2);\n" |
| "ivec3 subgroupXor(ivec3);\n" |
| "ivec4 subgroupXor(ivec4);\n" |
| "uint subgroupXor(uint);\n" |
| "uvec2 subgroupXor(uvec2);\n" |
| "uvec3 subgroupXor(uvec3);\n" |
| "uvec4 subgroupXor(uvec4);\n" |
| "bool subgroupXor(bool);\n" |
| "bvec2 subgroupXor(bvec2);\n" |
| "bvec3 subgroupXor(bvec3);\n" |
| "bvec4 subgroupXor(bvec4);\n" |
| |
| "float subgroupInclusiveAdd(float);\n" |
| "vec2 subgroupInclusiveAdd(vec2);\n" |
| "vec3 subgroupInclusiveAdd(vec3);\n" |
| "vec4 subgroupInclusiveAdd(vec4);\n" |
| "int subgroupInclusiveAdd(int);\n" |
| "ivec2 subgroupInclusiveAdd(ivec2);\n" |
| "ivec3 subgroupInclusiveAdd(ivec3);\n" |
| "ivec4 subgroupInclusiveAdd(ivec4);\n" |
| "uint subgroupInclusiveAdd(uint);\n" |
| "uvec2 subgroupInclusiveAdd(uvec2);\n" |
| "uvec3 subgroupInclusiveAdd(uvec3);\n" |
| "uvec4 subgroupInclusiveAdd(uvec4);\n" |
| |
| "float subgroupInclusiveMul(float);\n" |
| "vec2 subgroupInclusiveMul(vec2);\n" |
| "vec3 subgroupInclusiveMul(vec3);\n" |
| "vec4 subgroupInclusiveMul(vec4);\n" |
| "int subgroupInclusiveMul(int);\n" |
| "ivec2 subgroupInclusiveMul(ivec2);\n" |
| "ivec3 subgroupInclusiveMul(ivec3);\n" |
| "ivec4 subgroupInclusiveMul(ivec4);\n" |
| "uint subgroupInclusiveMul(uint);\n" |
| "uvec2 subgroupInclusiveMul(uvec2);\n" |
| "uvec3 subgroupInclusiveMul(uvec3);\n" |
| "uvec4 subgroupInclusiveMul(uvec4);\n" |
| |
| "float subgroupInclusiveMin(float);\n" |
| "vec2 subgroupInclusiveMin(vec2);\n" |
| "vec3 subgroupInclusiveMin(vec3);\n" |
| "vec4 subgroupInclusiveMin(vec4);\n" |
| "int subgroupInclusiveMin(int);\n" |
| "ivec2 subgroupInclusiveMin(ivec2);\n" |
| "ivec3 subgroupInclusiveMin(ivec3);\n" |
| "ivec4 subgroupInclusiveMin(ivec4);\n" |
| "uint subgroupInclusiveMin(uint);\n" |
| "uvec2 subgroupInclusiveMin(uvec2);\n" |
| "uvec3 subgroupInclusiveMin(uvec3);\n" |
| "uvec4 subgroupInclusiveMin(uvec4);\n" |
| |
| "float subgroupInclusiveMax(float);\n" |
| "vec2 subgroupInclusiveMax(vec2);\n" |
| "vec3 subgroupInclusiveMax(vec3);\n" |
| "vec4 subgroupInclusiveMax(vec4);\n" |
| "int subgroupInclusiveMax(int);\n" |
| "ivec2 subgroupInclusiveMax(ivec2);\n" |
| "ivec3 subgroupInclusiveMax(ivec3);\n" |
| "ivec4 subgroupInclusiveMax(ivec4);\n" |
| "uint subgroupInclusiveMax(uint);\n" |
| "uvec2 subgroupInclusiveMax(uvec2);\n" |
| "uvec3 subgroupInclusiveMax(uvec3);\n" |
| "uvec4 subgroupInclusiveMax(uvec4);\n" |
| |
| "int subgroupInclusiveAnd(int);\n" |
| "ivec2 subgroupInclusiveAnd(ivec2);\n" |
| "ivec3 subgroupInclusiveAnd(ivec3);\n" |
| "ivec4 subgroupInclusiveAnd(ivec4);\n" |
| "uint subgroupInclusiveAnd(uint);\n" |
| "uvec2 subgroupInclusiveAnd(uvec2);\n" |
| "uvec3 subgroupInclusiveAnd(uvec3);\n" |
| "uvec4 subgroupInclusiveAnd(uvec4);\n" |
| "bool subgroupInclusiveAnd(bool);\n" |
| "bvec2 subgroupInclusiveAnd(bvec2);\n" |
| "bvec3 subgroupInclusiveAnd(bvec3);\n" |
| "bvec4 subgroupInclusiveAnd(bvec4);\n" |
| |
| "int subgroupInclusiveOr(int);\n" |
| "ivec2 subgroupInclusiveOr(ivec2);\n" |
| "ivec3 subgroupInclusiveOr(ivec3);\n" |
| "ivec4 subgroupInclusiveOr(ivec4);\n" |
| "uint subgroupInclusiveOr(uint);\n" |
| "uvec2 subgroupInclusiveOr(uvec2);\n" |
| "uvec3 subgroupInclusiveOr(uvec3);\n" |
| "uvec4 subgroupInclusiveOr(uvec4);\n" |
| "bool subgroupInclusiveOr(bool);\n" |
| "bvec2 subgroupInclusiveOr(bvec2);\n" |
| "bvec3 subgroupInclusiveOr(bvec3);\n" |
| "bvec4 subgroupInclusiveOr(bvec4);\n" |
| |
| "int subgroupInclusiveXor(int);\n" |
| "ivec2 subgroupInclusiveXor(ivec2);\n" |
| "ivec3 subgroupInclusiveXor(ivec3);\n" |
| "ivec4 subgroupInclusiveXor(ivec4);\n" |
| "uint subgroupInclusiveXor(uint);\n" |
| "uvec2 subgroupInclusiveXor(uvec2);\n" |
| "uvec3 subgroupInclusiveXor(uvec3);\n" |
| "uvec4 subgroupInclusiveXor(uvec4);\n" |
| "bool subgroupInclusiveXor(bool);\n" |
| "bvec2 subgroupInclusiveXor(bvec2);\n" |
| "bvec3 subgroupInclusiveXor(bvec3);\n" |
| "bvec4 subgroupInclusiveXor(bvec4);\n" |
| |
| "float subgroupExclusiveAdd(float);\n" |
| "vec2 subgroupExclusiveAdd(vec2);\n" |
| "vec3 subgroupExclusiveAdd(vec3);\n" |
| "vec4 subgroupExclusiveAdd(vec4);\n" |
| "int subgroupExclusiveAdd(int);\n" |
| "ivec2 subgroupExclusiveAdd(ivec2);\n" |
| "ivec3 subgroupExclusiveAdd(ivec3);\n" |
| "ivec4 subgroupExclusiveAdd(ivec4);\n" |
| "uint subgroupExclusiveAdd(uint);\n" |
| "uvec2 subgroupExclusiveAdd(uvec2);\n" |
| "uvec3 subgroupExclusiveAdd(uvec3);\n" |
| "uvec4 subgroupExclusiveAdd(uvec4);\n" |
| |
| "float subgroupExclusiveMul(float);\n" |
| "vec2 subgroupExclusiveMul(vec2);\n" |
| "vec3 subgroupExclusiveMul(vec3);\n" |
| "vec4 subgroupExclusiveMul(vec4);\n" |
| "int subgroupExclusiveMul(int);\n" |
| "ivec2 subgroupExclusiveMul(ivec2);\n" |
| "ivec3 subgroupExclusiveMul(ivec3);\n" |
| "ivec4 subgroupExclusiveMul(ivec4);\n" |
| "uint subgroupExclusiveMul(uint);\n" |
| "uvec2 subgroupExclusiveMul(uvec2);\n" |
| "uvec3 subgroupExclusiveMul(uvec3);\n" |
| "uvec4 subgroupExclusiveMul(uvec4);\n" |
| |
| "float subgroupExclusiveMin(float);\n" |
| "vec2 subgroupExclusiveMin(vec2);\n" |
| "vec3 subgroupExclusiveMin(vec3);\n" |
| "vec4 subgroupExclusiveMin(vec4);\n" |
| "int subgroupExclusiveMin(int);\n" |
| "ivec2 subgroupExclusiveMin(ivec2);\n" |
| "ivec3 subgroupExclusiveMin(ivec3);\n" |
| "ivec4 subgroupExclusiveMin(ivec4);\n" |
| "uint subgroupExclusiveMin(uint);\n" |
| "uvec2 subgroupExclusiveMin(uvec2);\n" |
| "uvec3 subgroupExclusiveMin(uvec3);\n" |
| "uvec4 subgroupExclusiveMin(uvec4);\n" |
| |
| "float subgroupExclusiveMax(float);\n" |
| "vec2 subgroupExclusiveMax(vec2);\n" |
| "vec3 subgroupExclusiveMax(vec3);\n" |
| "vec4 subgroupExclusiveMax(vec4);\n" |
| "int subgroupExclusiveMax(int);\n" |
| "ivec2 subgroupExclusiveMax(ivec2);\n" |
| "ivec3 subgroupExclusiveMax(ivec3);\n" |
| "ivec4 subgroupExclusiveMax(ivec4);\n" |
| "uint subgroupExclusiveMax(uint);\n" |
| "uvec2 subgroupExclusiveMax(uvec2);\n" |
| "uvec3 subgroupExclusiveMax(uvec3);\n" |
| "uvec4 subgroupExclusiveMax(uvec4);\n" |
| |
| "int subgroupExclusiveAnd(int);\n" |
| "ivec2 subgroupExclusiveAnd(ivec2);\n" |
| "ivec3 subgroupExclusiveAnd(ivec3);\n" |
| "ivec4 subgroupExclusiveAnd(ivec4);\n" |
| "uint subgroupExclusiveAnd(uint);\n" |
| "uvec2 subgroupExclusiveAnd(uvec2);\n" |
| "uvec3 subgroupExclusiveAnd(uvec3);\n" |
| "uvec4 subgroupExclusiveAnd(uvec4);\n" |
| "bool subgroupExclusiveAnd(bool);\n" |
| "bvec2 subgroupExclusiveAnd(bvec2);\n" |
| "bvec3 subgroupExclusiveAnd(bvec3);\n" |
| "bvec4 subgroupExclusiveAnd(bvec4);\n" |
| |
| "int subgroupExclusiveOr(int);\n" |
| "ivec2 subgroupExclusiveOr(ivec2);\n" |
| "ivec3 subgroupExclusiveOr(ivec3);\n" |
| "ivec4 subgroupExclusiveOr(ivec4);\n" |
| "uint subgroupExclusiveOr(uint);\n" |
| "uvec2 subgroupExclusiveOr(uvec2);\n" |
| "uvec3 subgroupExclusiveOr(uvec3);\n" |
| "uvec4 subgroupExclusiveOr(uvec4);\n" |
| "bool subgroupExclusiveOr(bool);\n" |
| "bvec2 subgroupExclusiveOr(bvec2);\n" |
| "bvec3 subgroupExclusiveOr(bvec3);\n" |
| "bvec4 subgroupExclusiveOr(bvec4);\n" |
| |
| "int subgroupExclusiveXor(int);\n" |
| "ivec2 subgroupExclusiveXor(ivec2);\n" |
| "ivec3 subgroupExclusiveXor(ivec3);\n" |
| "ivec4 subgroupExclusiveXor(ivec4);\n" |
| "uint subgroupExclusiveXor(uint);\n" |
| "uvec2 subgroupExclusiveXor(uvec2);\n" |
| "uvec3 subgroupExclusiveXor(uvec3);\n" |
| "uvec4 subgroupExclusiveXor(uvec4);\n" |
| "bool subgroupExclusiveXor(bool);\n" |
| "bvec2 subgroupExclusiveXor(bvec2);\n" |
| "bvec3 subgroupExclusiveXor(bvec3);\n" |
| "bvec4 subgroupExclusiveXor(bvec4);\n" |
| |
| "float subgroupClusteredAdd(float, uint);\n" |
| "vec2 subgroupClusteredAdd(vec2, uint);\n" |
| "vec3 subgroupClusteredAdd(vec3, uint);\n" |
| "vec4 subgroupClusteredAdd(vec4, uint);\n" |
| "int subgroupClusteredAdd(int, uint);\n" |
| "ivec2 subgroupClusteredAdd(ivec2, uint);\n" |
| "ivec3 subgroupClusteredAdd(ivec3, uint);\n" |
| "ivec4 subgroupClusteredAdd(ivec4, uint);\n" |
| "uint subgroupClusteredAdd(uint, uint);\n" |
| "uvec2 subgroupClusteredAdd(uvec2, uint);\n" |
| "uvec3 subgroupClusteredAdd(uvec3, uint);\n" |
| "uvec4 subgroupClusteredAdd(uvec4, uint);\n" |
| |
| "float subgroupClusteredMul(float, uint);\n" |
| "vec2 subgroupClusteredMul(vec2, uint);\n" |
| "vec3 subgroupClusteredMul(vec3, uint);\n" |
| "vec4 subgroupClusteredMul(vec4, uint);\n" |
| "int subgroupClusteredMul(int, uint);\n" |
| "ivec2 subgroupClusteredMul(ivec2, uint);\n" |
| "ivec3 subgroupClusteredMul(ivec3, uint);\n" |
| "ivec4 subgroupClusteredMul(ivec4, uint);\n" |
| "uint subgroupClusteredMul(uint, uint);\n" |
| "uvec2 subgroupClusteredMul(uvec2, uint);\n" |
| "uvec3 subgroupClusteredMul(uvec3, uint);\n" |
| "uvec4 subgroupClusteredMul(uvec4, uint);\n" |
| |
| "float subgroupClusteredMin(float, uint);\n" |
| "vec2 subgroupClusteredMin(vec2, uint);\n" |
| "vec3 subgroupClusteredMin(vec3, uint);\n" |
| "vec4 subgroupClusteredMin(vec4, uint);\n" |
| "int subgroupClusteredMin(int, uint);\n" |
| "ivec2 subgroupClusteredMin(ivec2, uint);\n" |
| "ivec3 subgroupClusteredMin(ivec3, uint);\n" |
| "ivec4 subgroupClusteredMin(ivec4, uint);\n" |
| "uint subgroupClusteredMin(uint, uint);\n" |
| "uvec2 subgroupClusteredMin(uvec2, uint);\n" |
| "uvec3 subgroupClusteredMin(uvec3, uint);\n" |
| "uvec4 subgroupClusteredMin(uvec4, uint);\n" |
| |
| "float subgroupClusteredMax(float, uint);\n" |
| "vec2 subgroupClusteredMax(vec2, uint);\n" |
| "vec3 subgroupClusteredMax(vec3, uint);\n" |
| "vec4 subgroupClusteredMax(vec4, uint);\n" |
| "int subgroupClusteredMax(int, uint);\n" |
| "ivec2 subgroupClusteredMax(ivec2, uint);\n" |
| "ivec3 subgroupClusteredMax(ivec3, uint);\n" |
| "ivec4 subgroupClusteredMax(ivec4, uint);\n" |
| "uint subgroupClusteredMax(uint, uint);\n" |
| "uvec2 subgroupClusteredMax(uvec2, uint);\n" |
| "uvec3 subgroupClusteredMax(uvec3, uint);\n" |
| "uvec4 subgroupClusteredMax(uvec4, uint);\n" |
| |
| "int subgroupClusteredAnd(int, uint);\n" |
| "ivec2 subgroupClusteredAnd(ivec2, uint);\n" |
| "ivec3 subgroupClusteredAnd(ivec3, uint);\n" |
| "ivec4 subgroupClusteredAnd(ivec4, uint);\n" |
| "uint subgroupClusteredAnd(uint, uint);\n" |
| "uvec2 subgroupClusteredAnd(uvec2, uint);\n" |
| "uvec3 subgroupClusteredAnd(uvec3, uint);\n" |
| "uvec4 subgroupClusteredAnd(uvec4, uint);\n" |
| "bool subgroupClusteredAnd(bool, uint);\n" |
| "bvec2 subgroupClusteredAnd(bvec2, uint);\n" |
| "bvec3 subgroupClusteredAnd(bvec3, uint);\n" |
| "bvec4 subgroupClusteredAnd(bvec4, uint);\n" |
| |
| "int subgroupClusteredOr(int, uint);\n" |
| "ivec2 subgroupClusteredOr(ivec2, uint);\n" |
| "ivec3 subgroupClusteredOr(ivec3, uint);\n" |
| "ivec4 subgroupClusteredOr(ivec4, uint);\n" |
| "uint subgroupClusteredOr(uint, uint);\n" |
| "uvec2 subgroupClusteredOr(uvec2, uint);\n" |
| "uvec3 subgroupClusteredOr(uvec3, uint);\n" |
| "uvec4 subgroupClusteredOr(uvec4, uint);\n" |
| "bool subgroupClusteredOr(bool, uint);\n" |
| "bvec2 subgroupClusteredOr(bvec2, uint);\n" |
| "bvec3 subgroupClusteredOr(bvec3, uint);\n" |
| "bvec4 subgroupClusteredOr(bvec4, uint);\n" |
| |
| "int subgroupClusteredXor(int, uint);\n" |
| "ivec2 subgroupClusteredXor(ivec2, uint);\n" |
| "ivec3 subgroupClusteredXor(ivec3, uint);\n" |
| "ivec4 subgroupClusteredXor(ivec4, uint);\n" |
| "uint subgroupClusteredXor(uint, uint);\n" |
| "uvec2 subgroupClusteredXor(uvec2, uint);\n" |
| "uvec3 subgroupClusteredXor(uvec3, uint);\n" |
| "uvec4 subgroupClusteredXor(uvec4, uint);\n" |
| "bool subgroupClusteredXor(bool, uint);\n" |
| "bvec2 subgroupClusteredXor(bvec2, uint);\n" |
| "bvec3 subgroupClusteredXor(bvec3, uint);\n" |
| "bvec4 subgroupClusteredXor(bvec4, uint);\n" |
| |
| "float subgroupQuadBroadcast(float, uint);\n" |
| "vec2 subgroupQuadBroadcast(vec2, uint);\n" |
| "vec3 subgroupQuadBroadcast(vec3, uint);\n" |
| "vec4 subgroupQuadBroadcast(vec4, uint);\n" |
| "int subgroupQuadBroadcast(int, uint);\n" |
| "ivec2 subgroupQuadBroadcast(ivec2, uint);\n" |
| "ivec3 subgroupQuadBroadcast(ivec3, uint);\n" |
| "ivec4 subgroupQuadBroadcast(ivec4, uint);\n" |
| "uint subgroupQuadBroadcast(uint, uint);\n" |
| "uvec2 subgroupQuadBroadcast(uvec2, uint);\n" |
| "uvec3 subgroupQuadBroadcast(uvec3, uint);\n" |
| "uvec4 subgroupQuadBroadcast(uvec4, uint);\n" |
| "bool subgroupQuadBroadcast(bool, uint);\n" |
| "bvec2 subgroupQuadBroadcast(bvec2, uint);\n" |
| "bvec3 subgroupQuadBroadcast(bvec3, uint);\n" |
| "bvec4 subgroupQuadBroadcast(bvec4, uint);\n" |
| |
| "float subgroupQuadSwapHorizontal(float);\n" |
| "vec2 subgroupQuadSwapHorizontal(vec2);\n" |
| "vec3 subgroupQuadSwapHorizontal(vec3);\n" |
| "vec4 subgroupQuadSwapHorizontal(vec4);\n" |
| "int subgroupQuadSwapHorizontal(int);\n" |
| "ivec2 subgroupQuadSwapHorizontal(ivec2);\n" |
| "ivec3 subgroupQuadSwapHorizontal(ivec3);\n" |
| "ivec4 subgroupQuadSwapHorizontal(ivec4);\n" |
| "uint subgroupQuadSwapHorizontal(uint);\n" |
| "uvec2 subgroupQuadSwapHorizontal(uvec2);\n" |
| "uvec3 subgroupQuadSwapHorizontal(uvec3);\n" |
| "uvec4 subgroupQuadSwapHorizontal(uvec4);\n" |
| "bool subgroupQuadSwapHorizontal(bool);\n" |
| "bvec2 subgroupQuadSwapHorizontal(bvec2);\n" |
| "bvec3 subgroupQuadSwapHorizontal(bvec3);\n" |
| "bvec4 subgroupQuadSwapHorizontal(bvec4);\n" |
| |
| "float subgroupQuadSwapVertical(float);\n" |
| "vec2 subgroupQuadSwapVertical(vec2);\n" |
| "vec3 subgroupQuadSwapVertical(vec3);\n" |
| "vec4 subgroupQuadSwapVertical(vec4);\n" |
| "int subgroupQuadSwapVertical(int);\n" |
| "ivec2 subgroupQuadSwapVertical(ivec2);\n" |
| "ivec3 subgroupQuadSwapVertical(ivec3);\n" |
| "ivec4 subgroupQuadSwapVertical(ivec4);\n" |
| "uint subgroupQuadSwapVertical(uint);\n" |
| "uvec2 subgroupQuadSwapVertical(uvec2);\n" |
| "uvec3 subgroupQuadSwapVertical(uvec3);\n" |
| "uvec4 subgroupQuadSwapVertical(uvec4);\n" |
| "bool subgroupQuadSwapVertical(bool);\n" |
| "bvec2 subgroupQuadSwapVertical(bvec2);\n" |
| "bvec3 subgroupQuadSwapVertical(bvec3);\n" |
| "bvec4 subgroupQuadSwapVertical(bvec4);\n" |
| |
| "float subgroupQuadSwapDiagonal(float);\n" |
| "vec2 subgroupQuadSwapDiagonal(vec2);\n" |
| "vec3 subgroupQuadSwapDiagonal(vec3);\n" |
| "vec4 subgroupQuadSwapDiagonal(vec4);\n" |
| "int subgroupQuadSwapDiagonal(int);\n" |
| "ivec2 subgroupQuadSwapDiagonal(ivec2);\n" |
| "ivec3 subgroupQuadSwapDiagonal(ivec3);\n" |
| "ivec4 subgroupQuadSwapDiagonal(ivec4);\n" |
| "uint subgroupQuadSwapDiagonal(uint);\n" |
| "uvec2 subgroupQuadSwapDiagonal(uvec2);\n" |
| "uvec3 subgroupQuadSwapDiagonal(uvec3);\n" |
| "uvec4 subgroupQuadSwapDiagonal(uvec4);\n" |
| "bool subgroupQuadSwapDiagonal(bool);\n" |
| "bvec2 subgroupQuadSwapDiagonal(bvec2);\n" |
| "bvec3 subgroupQuadSwapDiagonal(bvec3);\n" |
| "bvec4 subgroupQuadSwapDiagonal(bvec4);\n" |
| |
| "\n"); |
| |
| if (profile != EEsProfile && version >= 400) { |
| commonBuiltins.append( |
| "bool subgroupAllEqual(double);\n" |
| "bool subgroupAllEqual(dvec2);\n" |
| "bool subgroupAllEqual(dvec3);\n" |
| "bool subgroupAllEqual(dvec4);\n" |
| |
| "double subgroupBroadcast(double, uint);\n" |
| "dvec2 subgroupBroadcast(dvec2, uint);\n" |
| "dvec3 subgroupBroadcast(dvec3, uint);\n" |
| "dvec4 subgroupBroadcast(dvec4, uint);\n" |
| |
| "double subgroupBroadcastFirst(double);\n" |
| "dvec2 subgroupBroadcastFirst(dvec2);\n" |
| "dvec3 subgroupBroadcastFirst(dvec3);\n" |
| "dvec4 subgroupBroadcastFirst(dvec4);\n" |
| |
| "double subgroupShuffle(double, uint);\n" |
| "dvec2 subgroupShuffle(dvec2, uint);\n" |
| "dvec3 subgroupShuffle(dvec3, uint);\n" |
| "dvec4 subgroupShuffle(dvec4, uint);\n" |
| |
| "double subgroupShuffleXor(double, uint);\n" |
| "dvec2 subgroupShuffleXor(dvec2, uint);\n" |
| "dvec3 subgroupShuffleXor(dvec3, uint);\n" |
| "dvec4 subgroupShuffleXor(dvec4, uint);\n" |
| |
| "double subgroupShuffleUp(double, uint delta);\n" |
| "dvec2 subgroupShuffleUp(dvec2, uint delta);\n" |
| "dvec3 subgroupShuffleUp(dvec3, uint delta);\n" |
| "dvec4 subgroupShuffleUp(dvec4, uint delta);\n" |
| |
| "double subgroupShuffleDown(double, uint delta);\n" |
| "dvec2 subgroupShuffleDown(dvec2, uint delta);\n" |
| "dvec3 subgroupShuffleDown(dvec3, uint delta);\n" |
| "dvec4 subgroupShuffleDown(dvec4, uint delta);\n" |
| |
| "double subgroupAdd(double);\n" |
| "dvec2 subgroupAdd(dvec2);\n" |
| "dvec3 subgroupAdd(dvec3);\n" |
| "dvec4 subgroupAdd(dvec4);\n" |
| |
| "double subgroupMul(double);\n" |
| "dvec2 subgroupMul(dvec2);\n" |
| "dvec3 subgroupMul(dvec3);\n" |
| "dvec4 subgroupMul(dvec4);\n" |
| |
| "double subgroupMin(double);\n" |
| "dvec2 subgroupMin(dvec2);\n" |
| "dvec3 subgroupMin(dvec3);\n" |
| "dvec4 subgroupMin(dvec4);\n" |
| |
| "double subgroupMax(double);\n" |
| "dvec2 subgroupMax(dvec2);\n" |
| "dvec3 subgroupMax(dvec3);\n" |
| "dvec4 subgroupMax(dvec4);\n" |
| |
| "double subgroupInclusiveAdd(double);\n" |
| "dvec2 subgroupInclusiveAdd(dvec2);\n" |
| "dvec3 subgroupInclusiveAdd(dvec3);\n" |
| "dvec4 subgroupInclusiveAdd(dvec4);\n" |
| |
| "double subgroupInclusiveMul(double);\n" |
| "dvec2 subgroupInclusiveMul(dvec2);\n" |
| "dvec3 subgroupInclusiveMul(dvec3);\n" |
| "dvec4 subgroupInclusiveMul(dvec4);\n" |
| |
| "double subgroupInclusiveMin(double);\n" |
| "dvec2 subgroupInclusiveMin(dvec2);\n" |
| "dvec3 subgroupInclusiveMin(dvec3);\n" |
| "dvec4 subgroupInclusiveMin(dvec4);\n" |
| |
| "double subgroupInclusiveMax(double);\n" |
| "dvec2 subgroupInclusiveMax(dvec2);\n" |
| "dvec3 subgroupInclusiveMax(dvec3);\n" |
| "dvec4 subgroupInclusiveMax(dvec4);\n" |
| |
| "double subgroupExclusiveAdd(double);\n" |
| "dvec2 subgroupExclusiveAdd(dvec2);\n" |
| "dvec3 subgroupExclusiveAdd(dvec3);\n" |
| "dvec4 subgroupExclusiveAdd(dvec4);\n" |
| |
| "double subgroupExclusiveMul(double);\n" |
| "dvec2 subgroupExclusiveMul(dvec2);\n" |
| "dvec3 subgroupExclusiveMul(dvec3);\n" |
| "dvec4 subgroupExclusiveMul(dvec4);\n" |
| |
| "double subgroupExclusiveMin(double);\n" |
| "dvec2 subgroupExclusiveMin(dvec2);\n" |
| "dvec3 subgroupExclusiveMin(dvec3);\n" |
| "dvec4 subgroupExclusiveMin(dvec4);\n" |
| |
| "double subgroupExclusiveMax(double);\n" |
| "dvec2 subgroupExclusiveMax(dvec2);\n" |
| "dvec3 subgroupExclusiveMax(dvec3);\n" |
| "dvec4 subgroupExclusiveMax(dvec4);\n" |
| |
| "double subgroupClusteredAdd(double, uint);\n" |
| "dvec2 subgroupClusteredAdd(dvec2, uint);\n" |
| "dvec3 subgroupClusteredAdd(dvec3, uint);\n" |
| "dvec4 subgroupClusteredAdd(dvec4, uint);\n" |
| |
| "double subgroupClusteredMul(double, uint);\n" |
| "dvec2 subgroupClusteredMul(dvec2, uint);\n" |
| "dvec3 subgroupClusteredMul(dvec3, uint);\n" |
| "dvec4 subgroupClusteredMul(dvec4, uint);\n" |
| |
| "double subgroupClusteredMin(double, uint);\n" |
| "dvec2 subgroupClusteredMin(dvec2, uint);\n" |
| "dvec3 subgroupClusteredMin(dvec3, uint);\n" |
| "dvec4 subgroupClusteredMin(dvec4, uint);\n" |
| |
| "double subgroupClusteredMax(double, uint);\n" |
| "dvec2 subgroupClusteredMax(dvec2, uint);\n" |
| "dvec3 subgroupClusteredMax(dvec3, uint);\n" |
| "dvec4 subgroupClusteredMax(dvec4, uint);\n" |
| |
| "double subgroupQuadBroadcast(double, uint);\n" |
| "dvec2 subgroupQuadBroadcast(dvec2, uint);\n" |
| "dvec3 subgroupQuadBroadcast(dvec3, uint);\n" |
| "dvec4 subgroupQuadBroadcast(dvec4, uint);\n" |
| |
| "double subgroupQuadSwapHorizontal(double);\n" |
| "dvec2 subgroupQuadSwapHorizontal(dvec2);\n" |
| "dvec3 subgroupQuadSwapHorizontal(dvec3);\n" |
| "dvec4 subgroupQuadSwapHorizontal(dvec4);\n" |
| |
| "double subgroupQuadSwapVertical(double);\n" |
| "dvec2 subgroupQuadSwapVertical(dvec2);\n" |
| "dvec3 subgroupQuadSwapVertical(dvec3);\n" |
| "dvec4 subgroupQuadSwapVertical(dvec4);\n" |
| |
| "double subgroupQuadSwapDiagonal(double);\n" |
| "dvec2 subgroupQuadSwapDiagonal(dvec2);\n" |
| "dvec3 subgroupQuadSwapDiagonal(dvec3);\n" |
| "dvec4 subgroupQuadSwapDiagonal(dvec4);\n" |
| |
| "\n"); |
| } |
| |
| stageBuiltins[EShLangCompute].append( |
| "void subgroupMemoryBarrierShared();" |
| |
| "\n" |
| ); |
| } |
| |
| if (profile != EEsProfile && version >= 460) { |
| commonBuiltins.append( |
| "bool anyInvocation(bool);" |
| "bool allInvocations(bool);" |
| "bool allInvocationsEqual(bool);" |
| |
| "\n"); |
| } |
| |
| #ifdef AMD_EXTENSIONS |
| // GL_AMD_shader_ballot |
| if (profile != EEsProfile && version >= 450) { |
| commonBuiltins.append( |
| "float minInvocationsAMD(float);" |
| "vec2 minInvocationsAMD(vec2);" |
| "vec3 minInvocationsAMD(vec3);" |
| "vec4 minInvocationsAMD(vec4);" |
| |
| "int minInvocationsAMD(int);" |
| "ivec2 minInvocationsAMD(ivec2);" |
| "ivec3 minInvocationsAMD(ivec3);" |
| "ivec4 minInvocationsAMD(ivec4);" |
| |
| "uint minInvocationsAMD(uint);" |
| "uvec2 minInvocationsAMD(uvec2);" |
| "uvec3 minInvocationsAMD(uvec3);" |
| "uvec4 minInvocationsAMD(uvec4);" |
| |
| "double minInvocationsAMD(double);" |
| "dvec2 minInvocationsAMD(dvec2);" |
| "dvec3 minInvocationsAMD(dvec3);" |
| "dvec4 minInvocationsAMD(dvec4);" |
| |
| "int64_t minInvocationsAMD(int64_t);" |
| "i64vec2 minInvocationsAMD(i64vec2);" |
| "i64vec3 minInvocationsAMD(i64vec3);" |
| "i64vec4 minInvocationsAMD(i64vec4);" |
| |
| "uint64_t minInvocationsAMD(uint64_t);" |
| "u64vec2 minInvocationsAMD(u64vec2);" |
| "u64vec3 minInvocationsAMD(u64vec3);" |
| "u64vec4 minInvocationsAMD(u64vec4);" |
| |
| "float16_t minInvocationsAMD(float16_t);" |
| "f16vec2 minInvocationsAMD(f16vec2);" |
| "f16vec3 minInvocationsAMD(f16vec3);" |
| "f16vec4 minInvocationsAMD(f16vec4);" |
| |
| "float minInvocationsInclusiveScanAMD(float);" |
| "vec2 minInvocationsInclusiveScanAMD(vec2);" |
| "vec3 minInvocationsInclusiveScanAMD(vec3);" |
| "vec4 minInvocationsInclusiveScanAMD(vec4);" |
| |
| "int minInvocationsInclusiveScanAMD(int);" |
| "ivec2 minInvocationsInclusiveScanAMD(ivec2);" |
| "ivec3 minInvocationsInclusiveScanAMD(ivec3);" |
| "ivec4 minInvocationsInclusiveScanAMD(ivec4);" |
| |
| "uint minInvocationsInclusiveScanAMD(uint);" |
| "uvec2 minInvocationsInclusiveScanAMD(uvec2);" |
| "uvec3 minInvocationsInclusiveScanAMD(uvec3);" |
| "uvec4 minInvocationsInclusiveScanAMD(uvec4);" |
| |
| "double minInvocationsInclusiveScanAMD(double);" |
| "dvec2 minInvocationsInclusiveScanAMD(dvec2);" |
| "dvec3 minInvocationsInclusiveScanAMD(dvec3);" |
| "dvec4 minInvocationsInclusiveScanAMD(dvec4);" |
| |
| "int64_t minInvocationsInclusiveScanAMD(int64_t);" |
| "i64vec2 minInvocationsInclusiveScanAMD(i64vec2);" |
| "i64vec3 minInvocationsInclusiveScanAMD(i64vec3);" |
| "i64vec4 minInvocationsInclusiveScanAMD(i64vec4);" |
| |
| "uint64_t minInvocationsInclusiveScanAMD(uint64_t);" |
| "u64vec2 minInvocationsInclusiveScanAMD(u64vec2);" |
| "u64vec3 minInvocationsInclusiveScanAMD(u64vec3);" |
| "u64vec4 minInvocationsInclusiveScanAMD(u64vec4);" |
| |
| "float16_t minInvocationsInclusiveScanAMD(float16_t);" |
| "f16vec2 minInvocationsInclusiveScanAMD(f16vec2);" |
| "f16vec3 minInvocationsInclusiveScanAMD(f16vec3);" |
| "f16vec4 minInvocationsInclusiveScanAMD(f16vec4);" |
| |
| "float minInvocationsExclusiveScanAMD(float);" |
| "vec2 minInvocationsExclusiveScanAMD(vec2);" |
| "vec3 minInvocationsExclusiveScanAMD(vec3);" |
| "vec4 minInvocationsExclusiveScanAMD(vec4);" |
| |
| "int minInvocationsExclusiveScanAMD(int);" |
| "ivec2 minInvocationsExclusiveScanAMD(ivec2);" |
| "ivec3 minInvocationsExclusiveScanAMD(ivec3);" |
| "ivec4 minInvocationsExclusiveScanAMD(ivec4);" |
| |
| "uint minInvocationsExclusiveScanAMD(uint);" |
| "uvec2 minInvocationsExclusiveScanAMD(uvec2);" |
| "uvec3 minInvocationsExclusiveScanAMD(uvec3);" |
| "uvec4 minInvocationsExclusiveScanAMD(uvec4);" |
| |
| "double minInvocationsExclusiveScanAMD(double);" |
| "dvec2 minInvocationsExclusiveScanAMD(dvec2);" |
| "dvec3 minInvocationsExclusiveScanAMD(dvec3);" |
| "dvec4 minInvocationsExclusiveScanAMD(dvec4);" |
| |
| "int64_t minInvocationsExclusiveScanAMD(int64_t);" |
| "i64vec2 minInvocationsExclusiveScanAMD(i64vec2);" |
| "i64vec3 minInvocationsExclusiveScanAMD(i64vec3);" |
| "i64vec4 minInvocationsExclusiveScanAMD(i64vec4);" |
| |
| "uint64_t minInvocationsExclusiveScanAMD(uint64_t);" |
| "u64vec2 minInvocationsExclusiveScanAMD(u64vec2);" |
| "u64vec3 minInvocationsExclusiveScanAMD(u64vec3);" |
| "u64vec4 minInvocationsExclusiveScanAMD(u64vec4);" |
| |
| "float16_t minInvocationsExclusiveScanAMD(float16_t);" |
| "f16vec2 minInvocationsExclusiveScanAMD(f16vec2);" |
| "f16vec3 minInvocationsExclusiveScanAMD(f16vec3);" |
| "f16vec4 minInvocationsExclusiveScanAMD(f16vec4);" |
| |
| "float maxInvocationsAMD(float);" |
| "vec2 maxInvocationsAMD(vec2);" |
| "vec3 maxInvocationsAMD(vec3);" |
| "vec4 maxInvocationsAMD(vec4);" |
| |
| "int maxInvocationsAMD(int);" |
| "ivec2 maxInvocationsAMD(ivec2);" |
| "ivec3 maxInvocationsAMD(ivec3);" |
| "ivec4 maxInvocationsAMD(ivec4);" |
| |
| "uint maxInvocationsAMD(uint);" |
| "uvec2 maxInvocationsAMD(uvec2);" |
| "uvec3 maxInvocationsAMD(uvec3);" |
| "uvec4 maxInvocationsAMD(uvec4);" |
| |
| "double maxInvocationsAMD(double);" |
| "dvec2 maxInvocationsAMD(dvec2);" |
| "dvec3 maxInvocationsAMD(dvec3);" |
| "dvec4 maxInvocationsAMD(dvec4);" |
| |
| "int64_t maxInvocationsAMD(int64_t);" |
| "i64vec2 maxInvocationsAMD(i64vec2);" |
| "i64vec3 maxInvocationsAMD(i64vec3);" |
| "i64vec4 maxInvocationsAMD(i64vec4);" |
| |
| "uint64_t maxInvocationsAMD(uint64_t);" |
| "u64vec2 maxInvocationsAMD(u64vec2);" |
| "u64vec3 maxInvocationsAMD(u64vec3);" |
| "u64vec4 maxInvocationsAMD(u64vec4);" |
| |
| "float16_t maxInvocationsAMD(float16_t);" |
| "f16vec2 maxInvocationsAMD(f16vec2);" |
| "f16vec3 maxInvocationsAMD(f16vec3);" |
| "f16vec4 maxInvocationsAMD(f16vec4);" |
| |
| "float maxInvocationsInclusiveScanAMD(float);" |
| "vec2 maxInvocationsInclusiveScanAMD(vec2);" |
| "vec3 maxInvocationsInclusiveScanAMD(vec3);" |
| "vec4 maxInvocationsInclusiveScanAMD(vec4);" |
| |
| "int maxInvocationsInclusiveScanAMD(int);" |
| "ivec2 maxInvocationsInclusiveScanAMD(ivec2);" |
| "ivec3 maxInvocationsInclusiveScanAMD(ivec3);" |
| "ivec4 maxInvocationsInclusiveScanAMD(ivec4);" |
| |
| "uint maxInvocationsInclusiveScanAMD(uint);" |
| "uvec2 maxInvocationsInclusiveScanAMD(uvec2);" |
| "uvec3 maxInvocationsInclusiveScanAMD(uvec3);" |
| "uvec4 maxInvocationsInclusiveScanAMD(uvec4);" |
| |
| "double maxInvocationsInclusiveScanAMD(double);" |
| "dvec2 maxInvocationsInclusiveScanAMD(dvec2);" |
| "dvec3 maxInvocationsInclusiveScanAMD(dvec3);" |
| "dvec4 maxInvocationsInclusiveScanAMD(dvec4);" |
| |
| "int64_t maxInvocationsInclusiveScanAMD(int64_t);" |
| "i64vec2 maxInvocationsInclusiveScanAMD(i64vec2);" |
| "i64vec3 maxInvocationsInclusiveScanAMD(i64vec3);" |
| "i64vec4 maxInvocationsInclusiveScanAMD(i64vec4);" |
| |
| "uint64_t maxInvocationsInclusiveScanAMD(uint64_t);" |
| "u64vec2 maxInvocationsInclusiveScanAMD(u64vec2);" |
| "u64vec3 maxInvocationsInclusiveScanAMD(u64vec3);" |
| "u64vec4 maxInvocationsInclusiveScanAMD(u64vec4);" |
| |
| "float16_t maxInvocationsInclusiveScanAMD(float16_t);" |
| "f16vec2 maxInvocationsInclusiveScanAMD(f16vec2);" |
| "f16vec3 maxInvocationsInclusiveScanAMD(f16vec3);" |
| "f16vec4 maxInvocationsInclusiveScanAMD(f16vec4);" |
| |
| "float maxInvocationsExclusiveScanAMD(float);" |
| "vec2 maxInvocationsExclusiveScanAMD(vec2);" |
| "vec3 maxInvocationsExclusiveScanAMD(vec3);" |
| "vec4 maxInvocationsExclusiveScanAMD(vec4);" |
| |
| "int maxInvocationsExclusiveScanAMD(int);" |
| "ivec2 maxInvocationsExclusiveScanAMD(ivec2);" |
| "ivec3 maxInvocationsExclusiveScanAMD(ivec3);" |
| "ivec4 maxInvocationsExclusiveScanAMD(ivec4);" |
| |
| "uint maxInvocationsExclusiveScanAMD(uint);" |
| "uvec2 maxInvocationsExclusiveScanAMD(uvec2);" |
| "uvec3 maxInvocationsExclusiveScanAMD(uvec3);" |
| "uvec4 maxInvocationsExclusiveScanAMD(uvec4);" |
| |
| "double maxInvocationsExclusiveScanAMD(double);" |
| "dvec2 maxInvocationsExclusiveScanAMD(dvec2);" |
| "dvec3 maxInvocationsExclusiveScanAMD(dvec3);" |
| "dvec4 maxInvocationsExclusiveScanAMD(dvec4);" |
| |
| "int64_t maxInvocationsExclusiveScanAMD(int64_t);" |
| "i64vec2 maxInvocationsExclusiveScanAMD(i64vec2);" |
| "i64vec3 maxInvocationsExclusiveScanAMD(i64vec3);" |
| "i64vec4 maxInvocationsExclusiveScanAMD(i64vec4);" |
| |
| "uint64_t maxInvocationsExclusiveScanAMD(uint64_t);" |
| "u64vec2 maxInvocationsExclusiveScanAMD(u64vec2);" |
| "u64vec3 maxInvocationsExclusiveScanAMD(u64vec3);" |
| "u64vec4 maxInvocationsExclusiveScanAMD(u64vec4);" |
| |
| "float16_t maxInvocationsExclusiveScanAMD(float16_t);" |
| "f16vec2 maxInvocationsExclusiveScanAMD(f16vec2);" |
| "f16vec3 maxInvocationsExclusiveScanAMD(f16vec3);" |
| "f16vec4 maxInvocationsExclusiveScanAMD(f16vec4);" |
| |
| "float addInvocationsAMD(float);" |
| "vec2 addInvocationsAMD(vec2);" |
| "vec3 addInvocationsAMD(vec3);" |
| "vec4 addInvocationsAMD(vec4);" |
| |
| "int addInvocationsAMD(int);" |
| "ivec2 addInvocationsAMD(ivec2);" |
| "ivec3 addInvocationsAMD(ivec3);" |
| "ivec4 addInvocationsAMD(ivec4);" |
| |
| "uint addInvocationsAMD(uint);" |
| "uvec2 addInvocationsAMD(uvec2);" |
| "uvec3 addInvocationsAMD(uvec3);" |
| "uvec4 addInvocationsAMD(uvec4);" |
| |
| "double addInvocationsAMD(double);" |
| "dvec2 addInvocationsAMD(dvec2);" |
| "dvec3 addInvocationsAMD(dvec3);" |
| "dvec4 addInvocationsAMD(dvec4);" |
| |
| "int64_t addInvocationsAMD(int64_t);" |
| "i64vec2 addInvocationsAMD(i64vec2);" |
| "i64vec3 addInvocationsAMD(i64vec3);" |
| "i64vec4 addInvocationsAMD(i64vec4);" |
| |
| "uint64_t addInvocationsAMD(uint64_t);" |
| "u64vec2 addInvocationsAMD(u64vec2);" |
| "u64vec3 addInvocationsAMD(u64vec3);" |
| "u64vec4 addInvocationsAMD(u64vec4);" |
| |
| "float16_t addInvocationsAMD(float16_t);" |
| "f16vec2 addInvocationsAMD(f16vec2);" |
| "f16vec3 addInvocationsAMD(f16vec3);" |
| "f16vec4 addInvocationsAMD(f16vec4);" |
| |
| "float addInvocationsInclusiveScanAMD(float);" |
| "vec2 addInvocationsInclusiveScanAMD(vec2);" |
| "vec3 addInvocationsInclusiveScanAMD(vec3);" |
| "vec4 addInvocationsInclusiveScanAMD(vec4);" |
| |
| "int addInvocationsInclusiveScanAMD(int);" |
| "ivec2 addInvocationsInclusiveScanAMD(ivec2);" |
| "ivec3 addInvocationsInclusiveScanAMD(ivec3);" |
| "ivec4 addInvocationsInclusiveScanAMD(ivec4);" |
| |
| "uint addInvocationsInclusiveScanAMD(uint);" |
| "uvec2 addInvocationsInclusiveScanAMD(uvec2);" |
| "uvec3 addInvocationsInclusiveScanAMD(uvec3);" |
| "uvec4 addInvocationsInclusiveScanAMD(uvec4);" |
| |
| "double addInvocationsInclusiveScanAMD(double);" |
| "dvec2 addInvocationsInclusiveScanAMD(dvec2);" |
| "dvec3 addInvocationsInclusiveScanAMD(dvec3);" |
| "dvec4 addInvocationsInclusiveScanAMD(dvec4);" |
| |
| "int64_t addInvocationsInclusiveScanAMD(int64_t);" |
| "i64vec2 addInvocationsInclusiveScanAMD(i64vec2);" |
| "i64vec3 addInvocationsInclusiveScanAMD(i64vec3);" |
| "i64vec4 addInvocationsInclusiveScanAMD(i64vec4);" |
| |
| "uint64_t addInvocationsInclusiveScanAMD(uint64_t);" |
| "u64vec2 addInvocationsInclusiveScanAMD(u64vec2);" |
| "u64vec3 addInvocationsInclusiveScanAMD(u64vec3);" |
| "u64vec4 addInvocationsInclusiveScanAMD(u64vec4);" |
| |
| "float16_t addInvocationsInclusiveScanAMD(float16_t);" |
| "f16vec2 addInvocationsInclusiveScanAMD(f16vec2);" |
| "f16vec3 addInvocationsInclusiveScanAMD(f16vec3);" |
| "f16vec4 addInvocationsInclusiveScanAMD(f16vec4);" |
| |
| "float addInvocationsExclusiveScanAMD(float);" |
| "vec2 addInvocationsExclusiveScanAMD(vec2);" |
| "vec3 addInvocationsExclusiveScanAMD(vec3);" |
| "vec4 addInvocationsExclusiveScanAMD(vec4);" |
| |
| "int addInvocationsExclusiveScanAMD(int);" |
| "ivec2 addInvocationsExclusiveScanAMD(ivec2);" |
| "ivec3 addInvocationsExclusiveScanAMD(ivec3);" |
| "ivec4 addInvocationsExclusiveScanAMD(ivec4);" |
| |
| "uint addInvocationsExclusiveScanAMD(uint);" |
| "uvec2 addInvocationsExclusiveScanAMD(uvec2);" |
| "uvec3 addInvocationsExclusiveScanAMD(uvec3);" |
| "uvec4 addInvocationsExclusiveScanAMD(uvec4);" |
| |
| "double addInvocationsExclusiveScanAMD(double);" |
| "dvec2 addInvocationsExclusiveScanAMD(dvec2);" |
| "dvec3 addInvocationsExclusiveScanAMD(dvec3);" |
| "dvec4 addInvocationsExclusiveScanAMD(dvec4);" |
| |
| "int64_t addInvocationsExclusiveScanAMD(int64_t);" |
| "i64vec2 addInvocationsExclusiveScanAMD(i64vec2);" |
| "i64vec3 addInvocationsExclusiveScanAMD(i64vec3);" |
| "i64vec4 addInvocationsExclusiveScanAMD(i64vec4);" |
| |
| "uint64_t addInvocationsExclusiveScanAMD(uint64_t);" |
| "u64vec2 addInvocationsExclusiveScanAMD(u64vec2);" |
| "u64vec3 addInvocationsExclusiveScanAMD(u64vec3);" |
| "u64vec4 addInvocationsExclusiveScanAMD(u64vec4);" |
| |
| "float16_t addInvocationsExclusiveScanAMD(float16_t);" |
| "f16vec2 addInvocationsExclusiveScanAMD(f16vec2);" |
| "f16vec3 addInvocationsExclusiveScanAMD(f16vec3);" |
| "f16vec4 addInvocationsExclusiveScanAMD(f16vec4);" |
| |
| "float minInvocationsNonUniformAMD(float);" |
| "vec2 minInvocationsNonUniformAMD(vec2);" |
| "vec3 minInvocationsNonUniformAMD(vec3);" |
| "vec4 minInvocationsNonUniformAMD(vec4);" |
| |
| "int minInvocationsNonUniformAMD(int);" |
| "ivec2 minInvocationsNonUniformAMD(ivec2);" |
| "ivec3 minInvocationsNonUniformAMD(ivec3);" |
| "ivec4 minInvocationsNonUniformAMD(ivec4);" |
| |
| "uint minInvocationsNonUniformAMD(uint);" |
| "uvec2 minInvocationsNonUniformAMD(uvec2);" |
| "uvec3 minInvocationsNonUniformAMD(uvec3);" |
| "uvec4 minInvocationsNonUniformAMD(uvec4);" |
| |
| "double minInvocationsNonUniformAMD(double);" |
| "dvec2 minInvocationsNonUniformAMD(dvec2);" |
| "dvec3 minInvocationsNonUniformAMD(dvec3);" |
| "dvec4 minInvocationsNonUniformAMD(dvec4);" |
| |
| "int64_t minInvocationsNonUniformAMD(int64_t);" |
| "i64vec2 minInvocationsNonUniformAMD(i64vec2);" |
| "i64vec3 minInvocationsNonUniformAMD(i64vec3);" |
| "i64vec4 minInvocationsNonUniformAMD(i64vec4);" |
| |
| "uint64_t minInvocationsNonUniformAMD(uint64_t);" |
| "u64vec2 minInvocationsNonUniformAMD(u64vec2);" |
| "u64vec3 minInvocationsNonUniformAMD(u64vec3);" |
| "u64vec4 minInvocationsNonUniformAMD(u64vec4);" |
| |
| "float16_t minInvocationsNonUniformAMD(float16_t);" |
| "f16vec2 minInvocationsNonUniformAMD(f16vec2);" |
| "f16vec3 minInvocationsNonUniformAMD(f16vec3);" |
| "f16vec4 minInvocationsNonUniformAMD(f16vec4);" |
| |
| "float minInvocationsInclusiveScanNonUniformAMD(float);" |
| "vec2 minInvocationsInclusiveScanNonUniformAMD(vec2);" |
| "vec3 minInvocationsInclusiveScanNonUniformAMD(vec3);" |
| "vec4 minInvocationsInclusiveScanNonUniformAMD(vec4);" |
| |
| "int minInvocationsInclusiveScanNonUniformAMD(int);" |
| "ivec2 minInvocationsInclusiveScanNonUniformAMD(ivec2);" |
| "ivec3 minInvocationsInclusiveScanNonUniformAMD(ivec3);" |
| "ivec4 minInvocationsInclusiveScanNonUniformAMD(ivec4);" |
| |
| "uint minInvocationsInclusiveScanNonUniformAMD(uint);" |
| "uvec2 minInvocationsInclusiveScanNonUniformAMD(uvec2);" |
| "uvec3 minInvocationsInclusiveScanNonUniformAMD(uvec3);" |
| "uvec4 minInvocationsInclusiveScanNonUniformAMD(uvec4);" |
| |
| "double minInvocationsInclusiveScanNonUniformAMD(double);" |
| "dvec2 minInvocationsInclusiveScanNonUniformAMD(dvec2);" |
| "dvec3 minInvocationsInclusiveScanNonUniformAMD(dvec3);" |
| "dvec4 minInvocationsInclusiveScanNonUniformAMD(dvec4);" |
| |
| "int64_t minInvocationsInclusiveScanNonUniformAMD(int64_t);" |
| "i64vec2 minInvocationsInclusiveScanNonUniformAMD(i64vec2);" |
| "i64vec3 minInvocationsInclusiveScanNonUniformAMD(i64vec3);" |
| "i64vec4 minInvocationsInclusiveScanNonUniformAMD(i64vec4);" |
| |
| "uint64_t minInvocationsInclusiveScanNonUniformAMD(uint64_t);" |
| "u64vec2 minInvocationsInclusiveScanNonUniformAMD(u64vec2);" |
| "u64vec3 minInvocationsInclusiveScanNonUniformAMD(u64vec3);" |
| "u64vec4 minInvocationsInclusiveScanNonUniformAMD(u64vec4);" |
| |
| "float16_t minInvocationsInclusiveScanNonUniformAMD(float16_t);" |
| "f16vec2 minInvocationsInclusiveScanNonUniformAMD(f16vec2);" |
| "f16vec3 minInvocationsInclusiveScanNonUniformAMD(f16vec3);" |
| "f16vec4 minInvocationsInclusiveScanNonUniformAMD(f16vec4);" |
| |
| "float minInvocationsExclusiveScanNonUniformAMD(float);" |
| "vec2 minInvocationsExclusiveScanNonUniformAMD(vec2);" |
| "vec3 minInvocationsExclusiveScanNonUniformAMD(vec3);" |
| "vec4 minInvocationsExclusiveScanNonUniformAMD(vec4);" |
| |
| "int minInvocationsExclusiveScanNonUniformAMD(int);" |
| "ivec2 minInvocationsExclusiveScanNonUniformAMD(ivec2);" |
| "ivec3 minInvocationsExclusiveScanNonUniformAMD(ivec3);" |
| "ivec4 minInvocationsExclusiveScanNonUniformAMD(ivec4);" |
| |
| "uint minInvocationsExclusiveScanNonUniformAMD(uint);" |
| "uvec2 minInvocationsExclusiveScanNonUniformAMD(uvec2);" |
| "uvec3 minInvocationsExclusiveScanNonUniformAMD(uvec3);" |
| "uvec4 minInvocationsExclusiveScanNonUniformAMD(uvec4);" |
| |
| "double minInvocationsExclusiveScanNonUniformAMD(double);" |
| "dvec2 minInvocationsExclusiveScanNonUniformAMD(dvec2);" |
| "dvec3 minInvocationsExclusiveScanNonUniformAMD(dvec3);" |
| "dvec4 minInvocationsExclusiveScanNonUniformAMD(dvec4);" |
| |
| "int64_t minInvocationsExclusiveScanNonUniformAMD(int64_t);" |
| "i64vec2 minInvocationsExclusiveScanNonUniformAMD(i64vec2);" |
| "i64vec3 minInvocationsExclusiveScanNonUniformAMD(i64vec3);" |
| "i64vec4 minInvocationsExclusiveScanNonUniformAMD(i64vec4);" |
| |
| "uint64_t minInvocationsExclusiveScanNonUniformAMD(uint64_t);" |
| "u64vec2 minInvocationsExclusiveScanNonUniformAMD(u64vec2);" |
| "u64vec3 minInvocationsExclusiveScanNonUniformAMD(u64vec3);" |
| "u64vec4 minInvocationsExclusiveScanNonUniformAMD(u64vec4);" |
| |
| "float16_t minInvocationsExclusiveScanNonUniformAMD(float16_t);" |
| "f16vec2 minInvocationsExclusiveScanNonUniformAMD(f16vec2);" |
| "f16vec3 minInvocationsExclusiveScanNonUniformAMD(f16vec3);" |
| "f16vec4 minInvocationsExclusiveScanNonUniformAMD(f16vec4);" |
| |
| "float maxInvocationsNonUniformAMD(float);" |
| "vec2 maxInvocationsNonUniformAMD(vec2);" |
| "vec3 maxInvocationsNonUniformAMD(vec3);" |
| "vec4 maxInvocationsNonUniformAMD(vec4);" |
| |
| "int maxInvocationsNonUniformAMD(int);" |
| "ivec2 maxInvocationsNonUniformAMD(ivec2);" |
| "ivec3 maxInvocationsNonUniformAMD(ivec3);" |
| "ivec4 maxInvocationsNonUniformAMD(ivec4);" |
| |
| "uint maxInvocationsNonUniformAMD(uint);" |
| "uvec2 maxInvocationsNonUniformAMD(uvec2);" |
| "uvec3 maxInvocationsNonUniformAMD(uvec3);" |
| "uvec4 maxInvocationsNonUniformAMD(uvec4);" |
| |
| "double maxInvocationsNonUniformAMD(double);" |
| "dvec2 maxInvocationsNonUniformAMD(dvec2);" |
| "dvec3 maxInvocationsNonUniformAMD(dvec3);" |
| "dvec4 maxInvocationsNonUniformAMD(dvec4);" |
| |
| "int64_t maxInvocationsNonUniformAMD(int64_t);" |
| "i64vec2 maxInvocationsNonUniformAMD(i64vec2);" |
| "i64vec3 maxInvocationsNonUniformAMD(i64vec3);" |
| "i64vec4 maxInvocationsNonUniformAMD(i64vec4);" |
| |
| "uint64_t maxInvocationsNonUniformAMD(uint64_t);" |
| "u64vec2 maxInvocationsNonUniformAMD(u64vec2);" |
| "u64vec3 maxInvocationsNonUniformAMD(u64vec3);" |
| "u64vec4 maxInvocationsNonUniformAMD(u64vec4);" |
| |
| "float16_t maxInvocationsNonUniformAMD(float16_t);" |
| "f16vec2 maxInvocationsNonUniformAMD(f16vec2);" |
| "f16vec3 maxInvocationsNonUniformAMD(f16vec3);" |
| "f16vec4 maxInvocationsNonUniformAMD(f16vec4);" |
| |
| "float maxInvocationsInclusiveScanNonUniformAMD(float);" |
| "vec2 maxInvocationsInclusiveScanNonUniformAMD(vec2);" |
| "vec3 maxInvocationsInclusiveScanNonUniformAMD(vec3);" |
| "vec4 maxInvocationsInclusiveScanNonUniformAMD(vec4);" |
| |
| "int maxInvocationsInclusiveScanNonUniformAMD(int);" |
| "ivec2 maxInvocationsInclusiveScanNonUniformAMD(ivec2);" |
| "ivec3 maxInvocationsInclusiveScanNonUniformAMD(ivec3);" |
| "ivec4 maxInvocationsInclusiveScanNonUniformAMD(ivec4);" |
| |
| "uint maxInvocationsInclusiveScanNonUniformAMD(uint);" |
| "uvec2 maxInvocationsInclusiveScanNonUniformAMD(uvec2);" |
| "uvec3 maxInvocationsInclusiveScanNonUniformAMD(uvec3);" |
| "uvec4 maxInvocationsInclusiveScanNonUniformAMD(uvec4);" |
| |
| "double maxInvocationsInclusiveScanNonUniformAMD(double);" |
| "dvec2 maxInvocationsInclusiveScanNonUniformAMD(dvec2);" |
| "dvec3 maxInvocationsInclusiveScanNonUniformAMD(dvec3);" |
| "dvec4 maxInvocationsInclusiveScanNonUniformAMD(dvec4);" |
| |
| "int64_t maxInvocationsInclusiveScanNonUniformAMD(int64_t);" |
| "i64vec2 maxInvocationsInclusiveScanNonUniformAMD(i64vec2);" |
| "i64vec3 maxInvocationsInclusiveScanNonUniformAMD(i64vec3);" |
| "i64vec4 maxInvocationsInclusiveScanNonUniformAMD(i64vec4);" |
| |
| "uint64_t maxInvocationsInclusiveScanNonUniformAMD(uint64_t);" |
| "u64vec2 maxInvocationsInclusiveScanNonUniformAMD(u64vec2);" |
| "u64vec3 maxInvocationsInclusiveScanNonUniformAMD(u64vec3);" |
| "u64vec4 maxInvocationsInclusiveScanNonUniformAMD(u64vec4);" |
| |
| "float16_t maxInvocationsInclusiveScanNonUniformAMD(float16_t);" |
| "f16vec2 maxInvocationsInclusiveScanNonUniformAMD(f16vec2);" |
| "f16vec3 maxInvocationsInclusiveScanNonUniformAMD(f16vec3);" |
| "f16vec4 maxInvocationsInclusiveScanNonUniformAMD(f16vec4);" |
| |
| "float maxInvocationsExclusiveScanNonUniformAMD(float);" |
| "vec2 maxInvocationsExclusiveScanNonUniformAMD(vec2);" |
| "vec3 maxInvocationsExclusiveScanNonUniformAMD(vec3);" |
| "vec4 maxInvocationsExclusiveScanNonUniformAMD(vec4);" |
| |
| "int maxInvocationsExclusiveScanNonUniformAMD(int);" |
| "ivec2 maxInvocationsExclusiveScanNonUniformAMD(ivec2);" |
| "ivec3 maxInvocationsExclusiveScanNonUniformAMD(ivec3);" |
| "ivec4 maxInvocationsExclusiveScanNonUniformAMD(ivec4);" |
| |
| "uint maxInvocationsExclusiveScanNonUniformAMD(uint);" |
| "uvec2 maxInvocationsExclusiveScanNonUniformAMD(uvec2);" |
| "uvec3 maxInvocationsExclusiveScanNonUniformAMD(uvec3);" |
| "uvec4 maxInvocationsExclusiveScanNonUniformAMD(uvec4);" |
| |
| "double maxInvocationsExclusiveScanNonUniformAMD(double);" |
| "dvec2 maxInvocationsExclusiveScanNonUniformAMD(dvec2);" |
| "dvec3 maxInvocationsExclusiveScanNonUniformAMD(dvec3);" |
| "dvec4 maxInvocationsExclusiveScanNonUniformAMD(dvec4);" |
| |
| "int64_t maxInvocationsExclusiveScanNonUniformAMD(int64_t);" |
| "i64vec2 maxInvocationsExclusiveScanNonUniformAMD(i64vec2);" |
| "i64vec3 maxInvocationsExclusiveScanNonUniformAMD(i64vec3);" |
| "i64vec4 maxInvocationsExclusiveScanNonUniformAMD(i64vec4);" |
| |
| "uint64_t maxInvocationsExclusiveScanNonUniformAMD(uint64_t);" |
| "u64vec2 maxInvocationsExclusiveScanNonUniformAMD(u64vec2);" |
| "u64vec3 maxInvocationsExclusiveScanNonUniformAMD(u64vec3);" |
| "u64vec4 maxInvocationsExclusiveScanNonUniformAMD(u64vec4);" |
| |
| "float16_t maxInvocationsExclusiveScanNonUniformAMD(float16_t);" |
| "f16vec2 maxInvocationsExclusiveScanNonUniformAMD(f16vec2);" |
| "f16vec3 maxInvocationsExclusiveScanNonUniformAMD(f16vec3);" |
| "f16vec4 maxInvocationsExclusiveScanNonUniformAMD(f16vec4);" |
| |
| "float addInvocationsNonUniformAMD(float);" |
| "vec2 addInvocationsNonUniformAMD(vec2);" |
| "vec3 addInvocationsNonUniformAMD(vec3);" |
| "vec4 addInvocationsNonUniformAMD(vec4);" |
| |
| "int addInvocationsNonUniformAMD(int);" |
| "ivec2 addInvocationsNonUniformAMD(ivec2);" |
| "ivec3 addInvocationsNonUniformAMD(ivec3);" |
| "ivec4 addInvocationsNonUniformAMD(ivec4);" |
| |
| "uint addInvocationsNonUniformAMD(uint);" |
| "uvec2 addInvocationsNonUniformAMD(uvec2);" |
| "uvec3 addInvocationsNonUniformAMD(uvec3);" |
| "uvec4 addInvocationsNonUniformAMD(uvec4);" |
| |
| "double addInvocationsNonUniformAMD(double);" |
| "dvec2 addInvocationsNonUniformAMD(dvec2);" |
| "dvec3 addInvocationsNonUniformAMD(dvec3);" |
| "dvec4 addInvocationsNonUniformAMD(dvec4);" |
| |
| "int64_t addInvocationsNonUniformAMD(int64_t);" |
| "i64vec2 addInvocationsNonUniformAMD(i64vec2);" |
| "i64vec3 addInvocationsNonUniformAMD(i64vec3);" |
| "i64vec4 addInvocationsNonUniformAMD(i64vec4);" |
| |
| "uint64_t addInvocationsNonUniformAMD(uint64_t);" |
| "u64vec2 addInvocationsNonUniformAMD(u64vec2);" |
| "u64vec3 addInvocationsNonUniformAMD(u64vec3);" |
| "u64vec4 addInvocationsNonUniformAMD(u64vec4);" |
| |
| "float16_t addInvocationsNonUniformAMD(float16_t);" |
| "f16vec2 addInvocationsNonUniformAMD(f16vec2);" |
| "f16vec3 addInvocationsNonUniformAMD(f16vec3);" |
| "f16vec4 addInvocationsNonUniformAMD(f16vec4);" |
| |
| "float addInvocationsInclusiveScanNonUniformAMD(float);" |
| "vec2 addInvocationsInclusiveScanNonUniformAMD(vec2);" |
| "vec3 addInvocationsInclusiveScanNonUniformAMD(vec3);" |
| "vec4 addInvocationsInclusiveScanNonUniformAMD(vec4);" |
| |
| "int addInvocationsInclusiveScanNonUniformAMD(int);" |
| "ivec2 addInvocationsInclusiveScanNonUniformAMD(ivec2);" |
| "ivec3 addInvocationsInclusiveScanNonUniformAMD(ivec3);" |
| "ivec4 addInvocationsInclusiveScanNonUniformAMD(ivec4);" |
| |
| "uint addInvocationsInclusiveScanNonUniformAMD(uint);" |
| "uvec2 addInvocationsInclusiveScanNonUniformAMD(uvec2);" |
| "uvec3 addInvocationsInclusiveScanNonUniformAMD(uvec3);" |
| "uvec4 addInvocationsInclusiveScanNonUniformAMD(uvec4);" |
| |
| "double addInvocationsInclusiveScanNonUniformAMD(double);" |
| "dvec2 addInvocationsInclusiveScanNonUniformAMD(dvec2);" |
| "dvec3 addInvocationsInclusiveScanNonUniformAMD(dvec3);" |
| "dvec4 addInvocationsInclusiveScanNonUniformAMD(dvec4);" |
| |
| "int64_t addInvocationsInclusiveScanNonUniformAMD(int64_t);" |
| "i64vec2 addInvocationsInclusiveScanNonUniformAMD(i64vec2);" |
| "i64vec3 addInvocationsInclusiveScanNonUniformAMD(i64vec3);" |
| "i64vec4 addInvocationsInclusiveScanNonUniformAMD(i64vec4);" |
| |
| "uint64_t addInvocationsInclusiveScanNonUniformAMD(uint64_t);" |
| "u64vec2 addInvocationsInclusiveScanNonUniformAMD(u64vec2);" |
| "u64vec3 addInvocationsInclusiveScanNonUniformAMD(u64vec3);" |
| "u64vec4 addInvocationsInclusiveScanNonUniformAMD(u64vec4);" |
| |
| "float16_t addInvocationsInclusiveScanNonUniformAMD(float16_t);" |
| "f16vec2 addInvocationsInclusiveScanNonUniformAMD(f16vec2);" |
| "f16vec3 addInvocationsInclusiveScanNonUniformAMD(f16vec3);" |
| "f16vec4 addInvocationsInclusiveScanNonUniformAMD(f16vec4);" |
| |
| "float addInvocationsExclusiveScanNonUniformAMD(float);" |
| "vec2 addInvocationsExclusiveScanNonUniformAMD(vec2);" |
| "vec3 addInvocationsExclusiveScanNonUniformAMD(vec3);" |
| "vec4 addInvocationsExclusiveScanNonUniformAMD(vec4);" |
| |
| "int addInvocationsExclusiveScanNonUniformAMD(int);" |
| "ivec2 addInvocationsExclusiveScanNonUniformAMD(ivec2);" |
| "ivec3 addInvocationsExclusiveScanNonUniformAMD(ivec3);" |
| "ivec4 addInvocationsExclusiveScanNonUniformAMD(ivec4);" |
| |
| "uint addInvocationsExclusiveScanNonUniformAMD(uint);" |
| "uvec2 addInvocationsExclusiveScanNonUniformAMD(uvec2);" |
| "uvec3 addInvocationsExclusiveScanNonUniformAMD(uvec3);" |
| "uvec4 addInvocationsExclusiveScanNonUniformAMD(uvec4);" |
| |
| "double addInvocationsExclusiveScanNonUniformAMD(double);" |
| "dvec2 addInvocationsExclusiveScanNonUniformAMD(dvec2);" |
| "dvec3 addInvocationsExclusiveScanNonUniformAMD(dvec3);" |
| "dvec4 addInvocationsExclusiveScanNonUniformAMD(dvec4);" |
| |
| "int64_t addInvocationsExclusiveScanNonUniformAMD(int64_t);" |
| "i64vec2 addInvocationsExclusiveScanNonUniformAMD(i64vec2);" |
| "i64vec3 addInvocationsExclusiveScanNonUniformAMD(i64vec3);" |
| "i64vec4 addInvocationsExclusiveScanNonUniformAMD(i64vec4);" |
| |
| "uint64_t addInvocationsExclusiveScanNonUniformAMD(uint64_t);" |
| "u64vec2 addInvocationsExclusiveScanNonUniformAMD(u64vec2);" |
| "u64vec3 addInvocationsExclusiveScanNonUniformAMD(u64vec3);" |
| "u64vec4 addInvocationsExclusiveScanNonUniformAMD(u64vec4);" |
| |
| "float16_t addInvocationsExclusiveScanNonUniformAMD(float16_t);" |
| "f16vec2 addInvocationsExclusiveScanNonUniformAMD(f16vec2);" |
| "f16vec3 addInvocationsExclusiveScanNonUniformAMD(f16vec3);" |
| "f16vec4 addInvocationsExclusiveScanNonUniformAMD(f16vec4);" |
| |
| "float swizzleInvocationsAMD(float, uvec4);" |
| "vec2 swizzleInvocationsAMD(vec2, uvec4);" |
| "vec3 swizzleInvocationsAMD(vec3, uvec4);" |
| "vec4 swizzleInvocationsAMD(vec4, uvec4);" |
| |
| "int swizzleInvocationsAMD(int, uvec4);" |
| "ivec2 swizzleInvocationsAMD(ivec2, uvec4);" |
| "ivec3 swizzleInvocationsAMD(ivec3, uvec4);" |
| "ivec4 swizzleInvocationsAMD(ivec4, uvec4);" |
| |
| "uint swizzleInvocationsAMD(uint, uvec4);" |
| "uvec2 swizzleInvocationsAMD(uvec2, uvec4);" |
| "uvec3 swizzleInvocationsAMD(uvec3, uvec4);" |
| "uvec4 swizzleInvocationsAMD(uvec4, uvec4);" |
| |
| "float swizzleInvocationsMaskedAMD(float, uvec3);" |
| "vec2 swizzleInvocationsMaskedAMD(vec2, uvec3);" |
| "vec3 swizzleInvocationsMaskedAMD(vec3, uvec3);" |
| "vec4 swizzleInvocationsMaskedAMD(vec4, uvec3);" |
| |
| "int swizzleInvocationsMaskedAMD(int, uvec3);" |
| "ivec2 swizzleInvocationsMaskedAMD(ivec2, uvec3);" |
| "ivec3 swizzleInvocationsMaskedAMD(ivec3, uvec3);" |
| "ivec4 swizzleInvocationsMaskedAMD(ivec4, uvec3);" |
| |
| "uint swizzleInvocationsMaskedAMD(uint, uvec3);" |
| "uvec2 swizzleInvocationsMaskedAMD(uvec2, uvec3);" |
| "uvec3 swizzleInvocationsMaskedAMD(uvec3, uvec3);" |
| "uvec4 swizzleInvocationsMaskedAMD(uvec4, uvec3);" |
| |
| "float writeInvocationAMD(float, float, uint);" |
| "vec2 writeInvocationAMD(vec2, vec2, uint);" |
| "vec3 writeInvocationAMD(vec3, vec3, uint);" |
| "vec4 writeInvocationAMD(vec4, vec4, uint);" |
| |
| "int writeInvocationAMD(int, int, uint);" |
| "ivec2 writeInvocationAMD(ivec2, ivec2, uint);" |
| "ivec3 writeInvocationAMD(ivec3, ivec3, uint);" |
| "ivec4 writeInvocationAMD(ivec4, ivec4, uint);" |
| |
| "uint writeInvocationAMD(uint, uint, uint);" |
| "uvec2 writeInvocationAMD(uvec2, uvec2, uint);" |
| "uvec3 writeInvocationAMD(uvec3, uvec3, uint);" |
| "uvec4 writeInvocationAMD(uvec4, uvec4, uint);" |
| |
| "uint mbcntAMD(uint64_t);" |
| |
| "\n"); |
| } |
| |
| // GL_AMD_gcn_shader |
| if (profile != EEsProfile && version >= 450) { |
| commonBuiltins.append( |
| "float cubeFaceIndexAMD(vec3);" |
| "vec2 cubeFaceCoordAMD(vec3);" |
| "uint64_t timeAMD();" |
| |
| "\n"); |
| } |
| |
| // GL_AMD_shader_fragment_mask |
| if (profile != EEsProfile && version >= 450) { |
| commonBuiltins.append( |
| "uint fragmentMaskFetchAMD(sampler2DMS, ivec2);" |
| "uint fragmentMaskFetchAMD(isampler2DMS, ivec2);" |
| "uint fragmentMaskFetchAMD(usampler2DMS, ivec2);" |
| |
| "uint fragmentMaskFetchAMD(sampler2DMSArray, ivec3);" |
| "uint fragmentMaskFetchAMD(isampler2DMSArray, ivec3);" |
| "uint fragmentMaskFetchAMD(usampler2DMSArray, ivec3);" |
| |
| "vec4 fragmentFetchAMD(sampler2DMS, ivec2, uint);" |
| "ivec4 fragmentFetchAMD(isampler2DMS, ivec2, uint);" |
| "uvec4 fragmentFetchAMD(usampler2DMS, ivec2, uint);" |
| |
| "vec4 fragmentFetchAMD(sampler2DMSArray, ivec3, uint);" |
| "ivec4 fragmentFetchAMD(isampler2DMSArray, ivec3, uint);" |
| "uvec4 fragmentFetchAMD(usampler2DMSArray, ivec3, uint);" |
| |
| "\n"); |
| } |
| |
| #endif // AMD_EXTENSIONS |
| |
| // GL_AMD_gpu_shader_half_float/Explicit types |
| if (profile != EEsProfile && version >= 450) { |
| commonBuiltins.append( |
| "float16_t radians(float16_t);" |
| "f16vec2 radians(f16vec2);" |
| "f16vec3 radians(f16vec3);" |
| "f16vec4 radians(f16vec4);" |
| |
| "float16_t degrees(float16_t);" |
| "f16vec2 degrees(f16vec2);" |
| "f16vec3 degrees(f16vec3);" |
| "f16vec4 degrees(f16vec4);" |
| |
| "float16_t sin(float16_t);" |
| "f16vec2 sin(f16vec2);" |
| "f16vec3 sin(f16vec3);" |
| "f16vec4 sin(f16vec4);" |
| |
| "float16_t cos(float16_t);" |
| "f16vec2 cos(f16vec2);" |
| "f16vec3 cos(f16vec3);" |
| "f16vec4 cos(f16vec4);" |
| |
| "float16_t tan(float16_t);" |
| "f16vec2 tan(f16vec2);" |
| "f16vec3 tan(f16vec3);" |
| "f16vec4 tan(f16vec4);" |
| |
| "float16_t asin(float16_t);" |
| "f16vec2 asin(f16vec2);" |
| "f16vec3 asin(f16vec3);" |
| "f16vec4 asin(f16vec4);" |
| |
| "float16_t acos(float16_t);" |
| "f16vec2 acos(f16vec2);" |
| "f16vec3 acos(f16vec3);" |
| "f16vec4 acos(f16vec4);" |
| |
| "float16_t atan(float16_t, float16_t);" |
| "f16vec2 atan(f16vec2, f16vec2);" |
| "f16vec3 atan(f16vec3, f16vec3);" |
| "f16vec4 atan(f16vec4, f16vec4);" |
| |
| "float16_t atan(float16_t);" |
| "f16vec2 atan(f16vec2);" |
| "f16vec3 atan(f16vec3);" |
| "f16vec4 atan(f16vec4);" |
| |
| "float16_t sinh(float16_t);" |
| "f16vec2 sinh(f16vec2);" |
| "f16vec3 sinh(f16vec3);" |
| "f16vec4 sinh(f16vec4);" |
| |
| "float16_t cosh(float16_t);" |
| "f16vec2 cosh(f16vec2);" |
| "f16vec3 cosh(f16vec3);" |
| "f16vec4 cosh(f16vec4);" |
| |
| "float16_t tanh(float16_t);" |
| "f16vec2 tanh(f16vec2);" |
| "f16vec3 tanh(f16vec3);" |
| "f16vec4 tanh(f16vec4);" |
| |
| "float16_t asinh(float16_t);" |
| "f16vec2 asinh(f16vec2);" |
| "f16vec3 asinh(f16vec3);" |
| "f16vec4 asinh(f16vec4);" |
| |
| "float16_t acosh(float16_t);" |
| "f16vec2 acosh(f16vec2);" |
| "f16vec3 acosh(f16vec3);" |
| "f16vec4 acosh(f16vec4);" |
| |
| "float16_t atanh(float16_t);" |
| "f16vec2 atanh(f16vec2);" |
| "f16vec3 atanh(f16vec3);" |
| "f16vec4 atanh(f16vec4);" |
| |
| "float16_t pow(float16_t, float16_t);" |
| "f16vec2 pow(f16vec2, f16vec2);" |
| "f16vec3 pow(f16vec3, f16vec3);" |
| "f16vec4 pow(f16vec4, f16vec4);" |
| |
| "float16_t exp(float16_t);" |
| "f16vec2 exp(f16vec2);" |
| "f16vec3 exp(f16vec3);" |
| "f16vec4 exp(f16vec4);" |
| |
| "float16_t log(float16_t);" |
| "f16vec2 log(f16vec2);" |
| "f16vec3 log(f16vec3);" |
| "f16vec4 log(f16vec4);" |
| |
| "float16_t exp2(float16_t);" |
| "f16vec2 exp2(f16vec2);" |
| "f16vec3 exp2(f16vec3);" |
| "f16vec4 exp2(f16vec4);" |
| |
| "float16_t log2(float16_t);" |
| "f16vec2 log2(f16vec2);" |
| "f16vec3 log2(f16vec3);" |
| "f16vec4 log2(f16vec4);" |
| |
| "float16_t sqrt(float16_t);" |
| "f16vec2 sqrt(f16vec2);" |
| "f16vec3 sqrt(f16vec3);" |
| "f16vec4 sqrt(f16vec4);" |
| |
| "float16_t inversesqrt(float16_t);" |
| "f16vec2 inversesqrt(f16vec2);" |
| "f16vec3 inversesqrt(f16vec3);" |
| "f16vec4 inversesqrt(f16vec4);" |
| |
| "float16_t abs(float16_t);" |
| "f16vec2 abs(f16vec2);" |
| "f16vec3 abs(f16vec3);" |
| "f16vec4 abs(f16vec4);" |
| |
| "float16_t sign(float16_t);" |
| "f16vec2 sign(f16vec2);" |
| "f16vec3 sign(f16vec3);" |
| "f16vec4 sign(f16vec4);" |
| |
| "float16_t floor(float16_t);" |
| "f16vec2 floor(f16vec2);" |
| "f16vec3 floor(f16vec3);" |
| "f16vec4 floor(f16vec4);" |
| |
| "float16_t trunc(float16_t);" |
| "f16vec2 trunc(f16vec2);" |
| "f16vec3 trunc(f16vec3);" |
| "f16vec4 trunc(f16vec4);" |
| |
| "float16_t round(float16_t);" |
| "f16vec2 round(f16vec2);" |
| "f16vec3 round(f16vec3);" |
| "f16vec4 round(f16vec4);" |
| |
| "float16_t roundEven(float16_t);" |
| "f16vec2 roundEven(f16vec2);" |
| "f16vec3 roundEven(f16vec3);" |
| "f16vec4 roundEven(f16vec4);" |
| |
| "float16_t ceil(float16_t);" |
| "f16vec2 ceil(f16vec2);" |
| "f16vec3 ceil(f16vec3);" |
| "f16vec4 ceil(f16vec4);" |
| |
| "float16_t fract(float16_t);" |
| "f16vec2 fract(f16vec2);" |
| "f16vec3 fract(f16vec3);" |
| "f16vec4 fract(f16vec4);" |
| |
| "float16_t mod(float16_t, float16_t);" |
| "f16vec2 mod(f16vec2, float16_t);" |
| "f16vec3 mod(f16vec3, float16_t);" |
| "f16vec4 mod(f16vec4, float16_t);" |
| "f16vec2 mod(f16vec2, f16vec2);" |
| "f16vec3 mod(f16vec3, f16vec3);" |
| "f16vec4 mod(f16vec4, f16vec4);" |
| |
| "float16_t modf(float16_t, out float16_t);" |
| "f16vec2 modf(f16vec2, out f16vec2);" |
| "f16vec3 modf(f16vec3, out f16vec3);" |
| "f16vec4 modf(f16vec4, out f16vec4);" |
| |
| "float16_t min(float16_t, float16_t);" |
| "f16vec2 min(f16vec2, float16_t);" |
| "f16vec3 min(f16vec3, float16_t);" |
| "f16vec4 min(f16vec4, float16_t);" |
| "f16vec2 min(f16vec2, f16vec2);" |
| "f16vec3 min(f16vec3, f16vec3);" |
| "f16vec4 min(f16vec4, f16vec4);" |
| |
| "float16_t max(float16_t, float16_t);" |
| "f16vec2 max(f16vec2, float16_t);" |
| "f16vec3 max(f16vec3, float16_t);" |
| "f16vec4 max(f16vec4, float16_t);" |
| "f16vec2 max(f16vec2, f16vec2);" |
| "f16vec3 max(f16vec3, f16vec3);" |
| "f16vec4 max(f16vec4, f16vec4);" |
| |
| "float16_t clamp(float16_t, float16_t, float16_t);" |
| "f16vec2 clamp(f16vec2, float16_t, float16_t);" |
| "f16vec3 clamp(f16vec3, float16_t, float16_t);" |
| "f16vec4 clamp(f16vec4, float16_t, float16_t);" |
| "f16vec2 clamp(f16vec2, f16vec2, f16vec2);" |
| "f16vec3 clamp(f16vec3, f16vec3, f16vec3);" |
| "f16vec4 clamp(f16vec4, f16vec4, f16vec4);" |
| |
| "float16_t mix(float16_t, float16_t, float16_t);" |
| "f16vec2 mix(f16vec2, f16vec2, float16_t);" |
| "f16vec3 mix(f16vec3, f16vec3, float16_t);" |
| "f16vec4 mix(f16vec4, f16vec4, float16_t);" |
| "f16vec2 mix(f16vec2, f16vec2, f16vec2);" |
| "f16vec3 mix(f16vec3, f16vec3, f16vec3);" |
| "f16vec4 mix(f16vec4, f16vec4, f16vec4);" |
| "float16_t mix(float16_t, float16_t, bool);" |
| "f16vec2 mix(f16vec2, f16vec2, bvec2);" |
| "f16vec3 mix(f16vec3, f16vec3, bvec3);" |
| "f16vec4 mix(f16vec4, f16vec4, bvec4);" |
| |
| "float16_t step(float16_t, float16_t);" |
| "f16vec2 step(f16vec2, f16vec2);" |
| "f16vec3 step(f16vec3, f16vec3);" |
| "f16vec4 step(f16vec4, f16vec4);" |
| "f16vec2 step(float16_t, f16vec2);" |
| "f16vec3 step(float16_t, f16vec3);" |
| "f16vec4 step(float16_t, f16vec4);" |
| |
| "float16_t smoothstep(float16_t, float16_t, float16_t);" |
| "f16vec2 smoothstep(f16vec2, f16vec2, f16vec2);" |
| "f16vec3 smoothstep(f16vec3, f16vec3, f16vec3);" |
| "f16vec4 smoothstep(f16vec4, f16vec4, f16vec4);" |
| "f16vec2 smoothstep(float16_t, float16_t, f16vec2);" |
| "f16vec3 smoothstep(float16_t, float16_t, f16vec3);" |
| "f16vec4 smoothstep(float16_t, float16_t, f16vec4);" |
| |
| "bool isnan(float16_t);" |
| "bvec2 isnan(f16vec2);" |
| "bvec3 isnan(f16vec3);" |
| "bvec4 isnan(f16vec4);" |
| |
| "bool isinf(float16_t);" |
| "bvec2 isinf(f16vec2);" |
| "bvec3 isinf(f16vec3);" |
| "bvec4 isinf(f16vec4);" |
| |
| "float16_t fma(float16_t, float16_t, float16_t);" |
| "f16vec2 fma(f16vec2, f16vec2, f16vec2);" |
| "f16vec3 fma(f16vec3, f16vec3, f16vec3);" |
| "f16vec4 fma(f16vec4, f16vec4, f16vec4);" |
| |
| "float16_t frexp(float16_t, out int);" |
| "f16vec2 frexp(f16vec2, out ivec2);" |
| "f16vec3 frexp(f16vec3, out ivec3);" |
| "f16vec4 frexp(f16vec4, out ivec4);" |
| |
| "float16_t ldexp(float16_t, in int);" |
| "f16vec2 ldexp(f16vec2, in ivec2);" |
| "f16vec3 ldexp(f16vec3, in ivec3);" |
| "f16vec4 ldexp(f16vec4, in ivec4);" |
| |
| "uint packFloat2x16(f16vec2);" |
| "f16vec2 unpackFloat2x16(uint);" |
| |
| "float16_t length(float16_t);" |
| "float16_t length(f16vec2);" |
| "float16_t length(f16vec3);" |
| "float16_t length(f16vec4);" |
| |
| "float16_t distance(float16_t, float16_t);" |
| "float16_t distance(f16vec2, f16vec2);" |
| "float16_t distance(f16vec3, f16vec3);" |
| "float16_t distance(f16vec4, f16vec4);" |
| |
| "float16_t dot(float16_t, float16_t);" |
| "float16_t dot(f16vec2, f16vec2);" |
| "float16_t dot(f16vec3, f16vec3);" |
| "float16_t dot(f16vec4, f16vec4);" |
| |
| "f16vec3 cross(f16vec3, f16vec3);" |
| |
| "float16_t normalize(float16_t);" |
| "f16vec2 normalize(f16vec2);" |
| "f16vec3 normalize(f16vec3);" |
| "f16vec4 normalize(f16vec4);" |
| |
| "float16_t faceforward(float16_t, float16_t, float16_t);" |
| "f16vec2 faceforward(f16vec2, f16vec2, f16vec2);" |
| "f16vec3 faceforward(f16vec3, f16vec3, f16vec3);" |
| "f16vec4 faceforward(f16vec4, f16vec4, f16vec4);" |
| |
| "float16_t reflect(float16_t, float16_t);" |
| "f16vec2 reflect(f16vec2, f16vec2);" |
| "f16vec3 reflect(f16vec3, f16vec3);" |
| "f16vec4 reflect(f16vec4, f16vec4);" |
| |
| "float16_t refract(float16_t, float16_t, float16_t);" |
| "f16vec2 refract(f16vec2, f16vec2, float16_t);" |
| "f16vec3 refract(f16vec3, f16vec3, float16_t);" |
| "f16vec4 refract(f16vec4, f16vec4, float16_t);" |
| |
| "f16mat2 matrixCompMult(f16mat2, f16mat2);" |
| "f16mat3 matrixCompMult(f16mat3, f16mat3);" |
| "f16mat4 matrixCompMult(f16mat4, f16mat4);" |
| "f16mat2x3 matrixCompMult(f16mat2x3, f16mat2x3);" |
| "f16mat2x4 matrixCompMult(f16mat2x4, f16mat2x4);" |
| "f16mat3x2 matrixCompMult(f16mat3x2, f16mat3x2);" |
| "f16mat3x4 matrixCompMult(f16mat3x4, f16mat3x4);" |
| "f16mat4x2 matrixCompMult(f16mat4x2, f16mat4x2);" |
| "f16mat4x3 matrixCompMult(f16mat4x3, f16mat4x3);" |
| |
| "f16mat2 outerProduct(f16vec2, f16vec2);" |
| "f16mat3 outerProduct(f16vec3, f16vec3);" |
| "f16mat4 outerProduct(f16vec4, f16vec4);" |
| "f16mat2x3 outerProduct(f16vec3, f16vec2);" |
| "f16mat3x2 outerProduct(f16vec2, f16vec3);" |
| "f16mat2x4 outerProduct(f16vec4, f16vec2);" |
| "f16mat4x2 outerProduct(f16vec2, f16vec4);" |
| "f16mat3x4 outerProduct(f16vec4, f16vec3);" |
| "f16mat4x3 outerProduct(f16vec3, f16vec4);" |
| |
| "f16mat2 transpose(f16mat2);" |
| "f16mat3 transpose(f16mat3);" |
| "f16mat4 transpose(f16mat4);" |
| "f16mat2x3 transpose(f16mat3x2);" |
| "f16mat3x2 transpose(f16mat2x3);" |
| "f16mat2x4 transpose(f16mat4x2);" |
| "f16mat4x2 transpose(f16mat2x4);" |
| "f16mat3x4 transpose(f16mat4x3);" |
| "f16mat4x3 transpose(f16mat3x4);" |
| |
| "float16_t determinant(f16mat2);" |
| "float16_t determinant(f16mat3);" |
| "float16_t determinant(f16mat4);" |
| |
| "f16mat2 inverse(f16mat2);" |
| "f16mat3 inverse(f16mat3);" |
| "f16mat4 inverse(f16mat4);" |
| |
| "bvec2 lessThan(f16vec2, f16vec2);" |
| "bvec3 lessThan(f16vec3, f16vec3);" |
| "bvec4 lessThan(f16vec4, f16vec4);" |
| |
| "bvec2 lessThanEqual(f16vec2, f16vec2);" |
| "bvec3 lessThanEqual(f16vec3, f16vec3);" |
| "bvec4 lessThanEqual(f16vec4, f16vec4);" |
| |
| "bvec2 greaterThan(f16vec2, f16vec2);" |
| "bvec3 greaterThan(f16vec3, f16vec3);" |
| "bvec4 greaterThan(f16vec4, f16vec4);" |
| |
| "bvec2 greaterThanEqual(f16vec2, f16vec2);" |
| "bvec3 greaterThanEqual(f16vec3, f16vec3);" |
| "bvec4 greaterThanEqual(f16vec4, f16vec4);" |
| |
| "bvec2 equal(f16vec2, f16vec2);" |
| "bvec3 equal(f16vec3, f16vec3);" |
| "bvec4 equal(f16vec4, f16vec4);" |
| |
| "bvec2 notEqual(f16vec2, f16vec2);" |
| "bvec3 notEqual(f16vec3, f16vec3);" |
| "bvec4 notEqual(f16vec4, f16vec4);" |
| |
| "\n"); |
| } |
| |
| // Explicit types |
| if (profile != EEsProfile && version >= 450) { |
| commonBuiltins.append( |
| "int8_t abs(int8_t);" |
| "i8vec2 abs(i8vec2);" |
| "i8vec3 abs(i8vec3);" |
| "i8vec4 abs(i8vec4);" |
| |
| "int8_t sign(int8_t);" |
| "i8vec2 sign(i8vec2);" |
| "i8vec3 sign(i8vec3);" |
| "i8vec4 sign(i8vec4);" |
| |
| "int8_t min(int8_t x, int8_t y);" |
| "i8vec2 min(i8vec2 x, int8_t y);" |
| "i8vec3 min(i8vec3 x, int8_t y);" |
| "i8vec4 min(i8vec4 x, int8_t y);" |
| "i8vec2 min(i8vec2 x, i8vec2 y);" |
| "i8vec3 min(i8vec3 x, i8vec3 y);" |
| "i8vec4 min(i8vec4 x, i8vec4 y);" |
| |
| "uint8_t min(uint8_t x, uint8_t y);" |
| "u8vec2 min(u8vec2 x, uint8_t y);" |
| "u8vec3 min(u8vec3 x, uint8_t y);" |
| "u8vec4 min(u8vec4 x, uint8_t y);" |
| "u8vec2 min(u8vec2 x, u8vec2 y);" |
| "u8vec3 min(u8vec3 x, u8vec3 y);" |
| "u8vec4 min(u8vec4 x, u8vec4 y);" |
| |
| "int8_t max(int8_t x, int8_t y);" |
| "i8vec2 max(i8vec2 x, int8_t y);" |
| "i8vec3 max(i8vec3 x, int8_t y);" |
| "i8vec4 max(i8vec4 x, int8_t y);" |
| "i8vec2 max(i8vec2 x, i8vec2 y);" |
| "i8vec3 max(i8vec3 x, i8vec3 y);" |
| "i8vec4 max(i8vec4 x, i8vec4 y);" |
| |
| "uint8_t max(uint8_t x, uint8_t y);" |
| "u8vec2 max(u8vec2 x, uint8_t y);" |
| "u8vec3 max(u8vec3 x, uint8_t y);" |
| "u8vec4 max(u8vec4 x, uint8_t y);" |
| "u8vec2 max(u8vec2 x, u8vec2 y);" |
| "u8vec3 max(u8vec3 x, u8vec3 y);" |
| "u8vec4 max(u8vec4 x, u8vec4 y);" |
| |
| "int8_t clamp(int8_t x, int8_t minVal, int8_t maxVal);" |
| "i8vec2 clamp(i8vec2 x, int8_t minVal, int8_t maxVal);" |
| "i8vec3 clamp(i8vec3 x, int8_t minVal, int8_t maxVal);" |
| "i8vec4 clamp(i8vec4 x, int8_t minVal, int8_t maxVal);" |
| "i8vec2 clamp(i8vec2 x, i8vec2 minVal, i8vec2 maxVal);" |
| "i8vec3 clamp(i8vec3 x, i8vec3 minVal, i8vec3 maxVal);" |
| "i8vec4 clamp(i8vec4 x, i8vec4 minVal, i8vec4 maxVal);" |
| |
| "uint8_t clamp(uint8_t x, uint8_t minVal, uint8_t maxVal);" |
| "u8vec2 clamp(u8vec2 x, uint8_t minVal, uint8_t maxVal);" |
| "u8vec3 clamp(u8vec3 x, uint8_t minVal, uint8_t maxVal);" |
| "u8vec4 clamp(u8vec4 x, uint8_t minVal, uint8_t maxVal);" |
| "u8vec2 clamp(u8vec2 x, u8vec2 minVal, u8vec2 maxVal);" |
| "u8vec3 clamp(u8vec3 x, u8vec3 minVal, u8vec3 maxVal);" |
| "u8vec4 clamp(u8vec4 x, u8vec4 minVal, u8vec4 maxVal);" |
| |
| "int8_t mix(int8_t, int8_t, bool);" |
| "i8vec2 mix(i8vec2, i8vec2, bvec2);" |
| "i8vec3 mix(i8vec3, i8vec3, bvec3);" |
| "i8vec4 mix(i8vec4, i8vec4, bvec4);" |
| "uint8_t mix(uint8_t, uint8_t, bool);" |
| "u8vec2 mix(u8vec2, u8vec2, bvec2);" |
| "u8vec3 mix(u8vec3, u8vec3, bvec3);" |
| "u8vec4 mix(u8vec4, u8vec4, bvec4);" |
| |
| "bvec2 lessThan(i8vec2, i8vec2);" |
| "bvec3 lessThan(i8vec3, i8vec3);" |
| "bvec4 lessThan(i8vec4, i8vec4);" |
| "bvec2 lessThan(u8vec2, u8vec2);" |
| "bvec3 lessThan(u8vec3, u8vec3);" |
| "bvec4 lessThan(u8vec4, u8vec4);" |
| |
| "bvec2 lessThanEqual(i8vec2, i8vec2);" |
| "bvec3 lessThanEqual(i8vec3, i8vec3);" |
| "bvec4 lessThanEqual(i8vec4, i8vec4);" |
| "bvec2 lessThanEqual(u8vec2, u8vec2);" |
| "bvec3 lessThanEqual(u8vec3, u8vec3);" |
| "bvec4 lessThanEqual(u8vec4, u8vec4);" |
| |
| "bvec2 greaterThan(i8vec2, i8vec2);" |
| "bvec3 greaterThan(i8vec3, i8vec3);" |
| "bvec4 greaterThan(i8vec4, i8vec4);" |
| "bvec2 greaterThan(u8vec2, u8vec2);" |
| "bvec3 greaterThan(u8vec3, u8vec3);" |
| "bvec4 greaterThan(u8vec4, u8vec4);" |
| |
| "bvec2 greaterThanEqual(i8vec2, i8vec2);" |
| "bvec3 greaterThanEqual(i8vec3, i8vec3);" |
| "bvec4 greaterThanEqual(i8vec4, i8vec4);" |
| "bvec2 greaterThanEqual(u8vec2, u8vec2);" |
| "bvec3 greaterThanEqual(u8vec3, u8vec3);" |
| "bvec4 greaterThanEqual(u8vec4, u8vec4);" |
| |
| "bvec2 equal(i8vec2, i8vec2);" |
| "bvec3 equal(i8vec3, i8vec3);" |
| "bvec4 equal(i8vec4, i8vec4);" |
| "bvec2 equal(u8vec2, u8vec2);" |
| "bvec3 equal(u8vec3, u8vec3);" |
| "bvec4 equal(u8vec4, u8vec4);" |
| |
| "bvec2 notEqual(i8vec2, i8vec2);" |
| "bvec3 notEqual(i8vec3, i8vec3);" |
| "bvec4 notEqual(i8vec4, i8vec4);" |
| "bvec2 notEqual(u8vec2, u8vec2);" |
| "bvec3 notEqual(u8vec3, u8vec3);" |
| "bvec4 notEqual(u8vec4, u8vec4);" |
| |
| " int8_t bitfieldExtract( int8_t, int8_t, int8_t);" |
| "i8vec2 bitfieldExtract(i8vec2, int8_t, int8_t);" |
| "i8vec3 bitfieldExtract(i8vec3, int8_t, int8_t);" |
| "i8vec4 bitfieldExtract(i8vec4, int8_t, int8_t);" |
| |
| " uint8_t bitfieldExtract( uint8_t, int8_t, int8_t);" |
| "u8vec2 bitfieldExtract(u8vec2, int8_t, int8_t);" |
| "u8vec3 bitfieldExtract(u8vec3, int8_t, int8_t);" |
| "u8vec4 bitfieldExtract(u8vec4, int8_t, int8_t);" |
| |
| " int8_t bitfieldInsert( int8_t base, int8_t, int8_t, int8_t);" |
| "i8vec2 bitfieldInsert(i8vec2 base, i8vec2, int8_t, int8_t);" |
| "i8vec3 bitfieldInsert(i8vec3 base, i8vec3, int8_t, int8_t);" |
| "i8vec4 bitfieldInsert(i8vec4 base, i8vec4, int8_t, int8_t);" |
| |
| " uint8_t bitfieldInsert( uint8_t base, uint8_t, int8_t, int8_t);" |
| "u8vec2 bitfieldInsert(u8vec2 base, u8vec2, int8_t, int8_t);" |
| "u8vec3 bitfieldInsert(u8vec3 base, u8vec3, int8_t, int8_t);" |
| "u8vec4 bitfieldInsert(u8vec4 base, u8vec4, int8_t, int8_t);" |
| |
| " int8_t bitCount( int8_t);" |
| "i8vec2 bitCount(i8vec2);" |
| "i8vec3 bitCount(i8vec3);" |
| "i8vec4 bitCount(i8vec4);" |
| |
| " int8_t bitCount( uint8_t);" |
| "i8vec2 bitCount(u8vec2);" |
| "i8vec3 bitCount(u8vec3);" |
| "i8vec4 bitCount(u8vec4);" |
| |
| " int8_t findLSB( int8_t);" |
| "i8vec2 findLSB(i8vec2);" |
| "i8vec3 findLSB(i8vec3);" |
| "i8vec4 findLSB(i8vec4);" |
| |
| " int8_t findLSB( uint8_t);" |
| "i8vec2 findLSB(u8vec2);" |
| "i8vec3 findLSB(u8vec3);" |
| "i8vec4 findLSB(u8vec4);" |
| |
| " int8_t findMSB( int8_t);" |
| "i8vec2 findMSB(i8vec2);" |
| "i8vec3 findMSB(i8vec3);" |
| "i8vec4 findMSB(i8vec4);" |
| |
| " int8_t findMSB( uint8_t);" |
| "i8vec2 findMSB(u8vec2);" |
| "i8vec3 findMSB(u8vec3);" |
| "i8vec4 findMSB(u8vec4);" |
| |
| "int16_t abs(int16_t);" |
| "i16vec2 abs(i16vec2);" |
| "i16vec3 abs(i16vec3);" |
| "i16vec4 abs(i16vec4);" |
| |
| "int16_t sign(int16_t);" |
| "i16vec2 sign(i16vec2);" |
| "i16vec3 sign(i16vec3);" |
| "i16vec4 sign(i16vec4);" |
| |
| "int16_t min(int16_t x, int16_t y);" |
| "i16vec2 min(i16vec2 x, int16_t y);" |
| "i16vec3 min(i16vec3 x, int16_t y);" |
| "i16vec4 min(i16vec4 x, int16_t y);" |
| "i16vec2 min(i16vec2 x, i16vec2 y);" |
| "i16vec3 min(i16vec3 x, i16vec3 y);" |
| "i16vec4 min(i16vec4 x, i16vec4 y);" |
| |
| "uint16_t min(uint16_t x, uint16_t y);" |
| "u16vec2 min(u16vec2 x, uint16_t y);" |
| "u16vec3 min(u16vec3 x, uint16_t y);" |
| "u16vec4 min(u16vec4 x, uint16_t y);" |
| "u16vec2 min(u16vec2 x, u16vec2 y);" |
| "u16vec3 min(u16vec3 x, u16vec3 y);" |
| "u16vec4 min(u16vec4 x, u16vec4 y);" |
| |
| "int16_t max(int16_t x, int16_t y);" |
| "i16vec2 max(i16vec2 x, int16_t y);" |
| "i16vec3 max(i16vec3 x, int16_t y);" |
| "i16vec4 max(i16vec4 x, int16_t y);" |
| "i16vec2 max(i16vec2 x, i16vec2 y);" |
| "i16vec3 max(i16vec3 x, i16vec3 y);" |
| "i16vec4 max(i16vec4 x, i16vec4 y);" |
| |
| "uint16_t max(uint16_t x, uint16_t y);" |
| "u16vec2 max(u16vec2 x, uint16_t y);" |
| "u16vec3 max(u16vec3 x, uint16_t y);" |
| "u16vec4 max(u16vec4 x, uint16_t y);" |
| "u16vec2 max(u16vec2 x, u16vec2 y);" |
| "u16vec3 max(u16vec3 x, u16vec3 y);" |
| "u16vec4 max(u16vec4 x, u16vec4 y);" |
| |
| "int16_t clamp(int16_t x, int16_t minVal, int16_t maxVal);" |
| "i16vec2 clamp(i16vec2 x, int16_t minVal, int16_t maxVal);" |
| "i16vec3 clamp(i16vec3 x, int16_t minVal, int16_t maxVal);" |
| "i16vec4 clamp(i16vec4 x, int16_t minVal, int16_t maxVal);" |
| "i16vec2 clamp(i16vec2 x, i16vec2 minVal, i16vec2 maxVal);" |
| "i16vec3 clamp(i16vec3 x, i16vec3 minVal, i16vec3 maxVal);" |
| "i16vec4 clamp(i16vec4 x, i16vec4 minVal, i16vec4 maxVal);" |
| |
| "uint16_t clamp(uint16_t x, uint16_t minVal, uint16_t maxVal);" |
| "u16vec2 clamp(u16vec2 x, uint16_t minVal, uint16_t maxVal);" |
| "u16vec3 clamp(u16vec3 x, uint16_t minVal, uint16_t maxVal);" |
| "u16vec4 clamp(u16vec4 x, uint16_t minVal, uint16_t maxVal);" |
| "u16vec2 clamp(u16vec2 x, u16vec2
|