| // |
| // Copyright (C) 2002-2005 3Dlabs Inc. Ltd. |
| // Copyright (C) 2012-2015 LunarG, Inc. |
| // Copyright (C) 2015-2018 Google, Inc. |
| // Copyright (C) 2017 ARM Limited. |
| // |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions |
| // are met: |
| // |
| // Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // |
| // Redistributions in binary form must reproduce the above |
| // copyright notice, this list of conditions and the following |
| // disclaimer in the documentation and/or other materials provided |
| // with the distribution. |
| // |
| // Neither the name of 3Dlabs Inc. Ltd. nor the names of its |
| // contributors may be used to endorse or promote products derived |
| // from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
| // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| // POSSIBILITY OF SUCH DAMAGE. |
| // |
| |
| #include "ParseHelper.h" |
| #include "Scan.h" |
| |
| #include "../OSDependent/osinclude.h" |
| #include <algorithm> |
| |
| #include "preprocessor/PpContext.h" |
| |
| extern int yyparse(glslang::TParseContext*); |
| |
| namespace glslang { |
| |
| TParseContext::TParseContext(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins, |
| int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, |
| TInfoSink& infoSink, bool forwardCompatible, EShMessages messages, |
| const TString* entryPoint) : |
| TParseContextBase(symbolTable, interm, parsingBuiltins, version, profile, spvVersion, language, |
| infoSink, forwardCompatible, messages, entryPoint), |
| inMain(false), |
| blockName(nullptr), |
| limits(resources.limits) |
| #ifndef GLSLANG_WEB |
| , |
| atomicUintOffsets(nullptr), anyIndexLimits(false) |
| #endif |
| { |
| // decide whether precision qualifiers should be ignored or respected |
| if (isEsProfile() || spvVersion.vulkan > 0) { |
| precisionManager.respectPrecisionQualifiers(); |
| if (! parsingBuiltins && language == EShLangFragment && !isEsProfile() && spvVersion.vulkan > 0) |
| precisionManager.warnAboutDefaults(); |
| } |
| |
| setPrecisionDefaults(); |
| |
| globalUniformDefaults.clear(); |
| globalUniformDefaults.layoutMatrix = ElmColumnMajor; |
| globalUniformDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd140 : ElpShared; |
| |
| globalBufferDefaults.clear(); |
| globalBufferDefaults.layoutMatrix = ElmColumnMajor; |
| globalBufferDefaults.layoutPacking = spvVersion.spv != 0 ? ElpStd430 : ElpShared; |
| |
| // use storage buffer on SPIR-V 1.3 and up |
| if (spvVersion.spv >= EShTargetSpv_1_3) |
| intermediate.setUseStorageBuffer(); |
| |
| globalInputDefaults.clear(); |
| globalOutputDefaults.clear(); |
| |
| #ifndef GLSLANG_WEB |
| // "Shaders in the transform |
| // feedback capturing mode have an initial global default of |
| // layout(xfb_buffer = 0) out;" |
| if (language == EShLangVertex || |
| language == EShLangTessControl || |
| language == EShLangTessEvaluation || |
| language == EShLangGeometry) |
| globalOutputDefaults.layoutXfbBuffer = 0; |
| |
| if (language == EShLangGeometry) |
| globalOutputDefaults.layoutStream = 0; |
| #endif |
| |
| if (entryPoint != nullptr && entryPoint->size() > 0 && *entryPoint != "main") |
| infoSink.info.message(EPrefixError, "Source entry point must be \"main\""); |
| } |
| |
| TParseContext::~TParseContext() |
| { |
| #ifndef GLSLANG_WEB |
| delete [] atomicUintOffsets; |
| #endif |
| } |
| |
| // Set up all default precisions as needed by the current environment. |
| // Intended just as a TParseContext constructor helper. |
| void TParseContext::setPrecisionDefaults() |
| { |
| // Set all precision defaults to EpqNone, which is correct for all types |
| // when not obeying precision qualifiers, and correct for types that don't |
| // have defaults (thus getting an error on use) when obeying precision |
| // qualifiers. |
| |
| for (int type = 0; type < EbtNumTypes; ++type) |
| defaultPrecision[type] = EpqNone; |
| |
| for (int type = 0; type < maxSamplerIndex; ++type) |
| defaultSamplerPrecision[type] = EpqNone; |
| |
| // replace with real precision defaults for those that have them |
| if (obeyPrecisionQualifiers()) { |
| if (isEsProfile()) { |
| // Most don't have defaults, a few default to lowp. |
| TSampler sampler; |
| sampler.set(EbtFloat, Esd2D); |
| defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow; |
| sampler.set(EbtFloat, EsdCube); |
| defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow; |
| sampler.set(EbtFloat, Esd2D); |
| sampler.setExternal(true); |
| defaultSamplerPrecision[computeSamplerTypeIndex(sampler)] = EpqLow; |
| } |
| |
| // If we are parsing built-in computational variables/functions, it is meaningful to record |
| // whether the built-in has no precision qualifier, as that ambiguity |
| // is used to resolve the precision from the supplied arguments/operands instead. |
| // So, we don't actually want to replace EpqNone with a default precision for built-ins. |
| if (! parsingBuiltins) { |
| if (isEsProfile() && language == EShLangFragment) { |
| defaultPrecision[EbtInt] = EpqMedium; |
| defaultPrecision[EbtUint] = EpqMedium; |
| } else { |
| defaultPrecision[EbtInt] = EpqHigh; |
| defaultPrecision[EbtUint] = EpqHigh; |
| defaultPrecision[EbtFloat] = EpqHigh; |
| } |
| |
| if (!isEsProfile()) { |
| // Non-ES profile |
| // All sampler precisions default to highp. |
| for (int type = 0; type < maxSamplerIndex; ++type) |
| defaultSamplerPrecision[type] = EpqHigh; |
| } |
| } |
| |
| defaultPrecision[EbtSampler] = EpqLow; |
| defaultPrecision[EbtAtomicUint] = EpqHigh; |
| } |
| } |
| |
| void TParseContext::setLimits(const TBuiltInResource& r) |
| { |
| resources = r; |
| intermediate.setLimits(r); |
| |
| #ifndef GLSLANG_WEB |
| anyIndexLimits = ! limits.generalAttributeMatrixVectorIndexing || |
| ! limits.generalConstantMatrixVectorIndexing || |
| ! limits.generalSamplerIndexing || |
| ! limits.generalUniformIndexing || |
| ! limits.generalVariableIndexing || |
| ! limits.generalVaryingIndexing; |
| |
| |
| // "Each binding point tracks its own current default offset for |
| // inheritance of subsequent variables using the same binding. The initial state of compilation is that all |
| // binding points have an offset of 0." |
| atomicUintOffsets = new int[resources.maxAtomicCounterBindings]; |
| for (int b = 0; b < resources.maxAtomicCounterBindings; ++b) |
| atomicUintOffsets[b] = 0; |
| #endif |
| } |
| |
| // |
| // Parse an array of strings using yyparse, going through the |
| // preprocessor to tokenize the shader strings, then through |
| // the GLSL scanner. |
| // |
| // Returns true for successful acceptance of the shader, false if any errors. |
| // |
| bool TParseContext::parseShaderStrings(TPpContext& ppContext, TInputScanner& input, bool versionWillBeError) |
| { |
| currentScanner = &input; |
| ppContext.setInput(input, versionWillBeError); |
| yyparse(this); |
| |
| finish(); |
| |
| return numErrors == 0; |
| } |
| |
| // This is called from bison when it has a parse (syntax) error |
| // Note though that to stop cascading errors, we set EOF, which |
| // will usually cause a syntax error, so be more accurate that |
| // compilation is terminating. |
| void TParseContext::parserError(const char* s) |
| { |
| if (! getScanner()->atEndOfInput() || numErrors == 0) |
| error(getCurrentLoc(), "", "", s, ""); |
| else |
| error(getCurrentLoc(), "compilation terminated", "", ""); |
| } |
| |
| void TParseContext::handlePragma(const TSourceLoc& loc, const TVector<TString>& tokens) |
| { |
| #ifndef GLSLANG_WEB |
| if (pragmaCallback) |
| pragmaCallback(loc.line, tokens); |
| |
| if (tokens.size() == 0) |
| return; |
| |
| if (tokens[0].compare("optimize") == 0) { |
| if (tokens.size() != 4) { |
| error(loc, "optimize pragma syntax is incorrect", "#pragma", ""); |
| return; |
| } |
| |
| if (tokens[1].compare("(") != 0) { |
| error(loc, "\"(\" expected after 'optimize' keyword", "#pragma", ""); |
| return; |
| } |
| |
| if (tokens[2].compare("on") == 0) |
| contextPragma.optimize = true; |
| else if (tokens[2].compare("off") == 0) |
| contextPragma.optimize = false; |
| else { |
| error(loc, "\"on\" or \"off\" expected after '(' for 'optimize' pragma", "#pragma", ""); |
| return; |
| } |
| |
| if (tokens[3].compare(")") != 0) { |
| error(loc, "\")\" expected to end 'optimize' pragma", "#pragma", ""); |
| return; |
| } |
| } else if (tokens[0].compare("debug") == 0) { |
| if (tokens.size() != 4) { |
| error(loc, "debug pragma syntax is incorrect", "#pragma", ""); |
| return; |
| } |
| |
| if (tokens[1].compare("(") != 0) { |
| error(loc, "\"(\" expected after 'debug' keyword", "#pragma", ""); |
| return; |
| } |
| |
| if (tokens[2].compare("on") == 0) |
| contextPragma.debug = true; |
| else if (tokens[2].compare("off") == 0) |
| contextPragma.debug = false; |
| else { |
| error(loc, "\"on\" or \"off\" expected after '(' for 'debug' pragma", "#pragma", ""); |
| return; |
| } |
| |
| if (tokens[3].compare(")") != 0) { |
| error(loc, "\")\" expected to end 'debug' pragma", "#pragma", ""); |
| return; |
| } |
| } else if (spvVersion.spv > 0 && tokens[0].compare("use_storage_buffer") == 0) { |
| if (tokens.size() != 1) |
| error(loc, "extra tokens", "#pragma", ""); |
| intermediate.setUseStorageBuffer(); |
| } else if (spvVersion.spv > 0 && tokens[0].compare("use_vulkan_memory_model") == 0) { |
| if (tokens.size() != 1) |
| error(loc, "extra tokens", "#pragma", ""); |
| intermediate.setUseVulkanMemoryModel(); |
| } else if (spvVersion.spv > 0 && tokens[0].compare("use_variable_pointers") == 0) { |
| if (tokens.size() != 1) |
| error(loc, "extra tokens", "#pragma", ""); |
| if (spvVersion.spv < glslang::EShTargetSpv_1_3) |
| error(loc, "requires SPIR-V 1.3", "#pragma use_variable_pointers", ""); |
| intermediate.setUseVariablePointers(); |
| } else if (tokens[0].compare("once") == 0) { |
| warn(loc, "not implemented", "#pragma once", ""); |
| } else if (tokens[0].compare("glslang_binary_double_output") == 0) |
| intermediate.setBinaryDoubleOutput(); |
| #endif |
| } |
| |
| // |
| // Handle seeing a variable identifier in the grammar. |
| // |
| TIntermTyped* TParseContext::handleVariable(const TSourceLoc& loc, TSymbol* symbol, const TString* string) |
| { |
| TIntermTyped* node = nullptr; |
| |
| // Error check for requiring specific extensions present. |
| if (symbol && symbol->getNumExtensions()) |
| requireExtensions(loc, symbol->getNumExtensions(), symbol->getExtensions(), symbol->getName().c_str()); |
| |
| #ifndef GLSLANG_WEB |
| if (symbol && symbol->isReadOnly()) { |
| // All shared things containing an unsized array must be copied up |
| // on first use, so that all future references will share its array structure, |
| // so that editing the implicit size will effect all nodes consuming it, |
| // and so that editing the implicit size won't change the shared one. |
| // |
| // If this is a variable or a block, check it and all it contains, but if this |
| // is a member of an anonymous block, check the whole block, as the whole block |
| // will need to be copied up if it contains an unsized array. |
| // |
| // This check is being done before the block-name check further down, so guard |
| // for that too. |
| if (!symbol->getType().isUnusableName()) { |
| if (symbol->getType().containsUnsizedArray() || |
| (symbol->getAsAnonMember() && |
| symbol->getAsAnonMember()->getAnonContainer().getType().containsUnsizedArray())) |
| makeEditable(symbol); |
| } |
| } |
| #endif |
| |
| const TVariable* variable; |
| const TAnonMember* anon = symbol ? symbol->getAsAnonMember() : nullptr; |
| if (anon) { |
| // It was a member of an anonymous container. |
| |
| // Create a subtree for its dereference. |
| variable = anon->getAnonContainer().getAsVariable(); |
| TIntermTyped* container = intermediate.addSymbol(*variable, loc); |
| TIntermTyped* constNode = intermediate.addConstantUnion(anon->getMemberNumber(), loc); |
| node = intermediate.addIndex(EOpIndexDirectStruct, container, constNode, loc); |
| |
| node->setType(*(*variable->getType().getStruct())[anon->getMemberNumber()].type); |
| if (node->getType().hiddenMember()) |
| error(loc, "member of nameless block was not redeclared", string->c_str(), ""); |
| } else { |
| // Not a member of an anonymous container. |
| |
| // The symbol table search was done in the lexical phase. |
| // See if it was a variable. |
| variable = symbol ? symbol->getAsVariable() : nullptr; |
| if (variable) { |
| if (variable->getType().isUnusableName()) { |
| error(loc, "cannot be used (maybe an instance name is needed)", string->c_str(), ""); |
| variable = nullptr; |
| } |
| } else { |
| if (symbol) |
| error(loc, "variable name expected", string->c_str(), ""); |
| } |
| |
| // Recovery, if it wasn't found or was not a variable. |
| if (! variable) |
| variable = new TVariable(string, TType(EbtVoid)); |
| |
| if (variable->getType().getQualifier().isFrontEndConstant()) |
| node = intermediate.addConstantUnion(variable->getConstArray(), variable->getType(), loc); |
| else |
| node = intermediate.addSymbol(*variable, loc); |
| } |
| |
| if (variable->getType().getQualifier().isIo()) |
| intermediate.addIoAccessed(*string); |
| |
| if (variable->getType().isReference() && |
| variable->getType().getQualifier().bufferReferenceNeedsVulkanMemoryModel()) { |
| intermediate.setUseVulkanMemoryModel(); |
| } |
| |
| return node; |
| } |
| |
| // |
| // Handle seeing a base[index] dereference in the grammar. |
| // |
| TIntermTyped* TParseContext::handleBracketDereference(const TSourceLoc& loc, TIntermTyped* base, TIntermTyped* index) |
| { |
| int indexValue = 0; |
| if (index->getQualifier().isFrontEndConstant()) |
| indexValue = index->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| |
| // basic type checks... |
| variableCheck(base); |
| |
| if (! base->isArray() && ! base->isMatrix() && ! base->isVector() && ! base->getType().isCoopMat() && |
| ! base->isReference()) { |
| if (base->getAsSymbolNode()) |
| error(loc, " left of '[' is not of type array, matrix, or vector ", base->getAsSymbolNode()->getName().c_str(), ""); |
| else |
| error(loc, " left of '[' is not of type array, matrix, or vector ", "expression", ""); |
| |
| // Insert dummy error-recovery result |
| return intermediate.addConstantUnion(0.0, EbtFloat, loc); |
| } |
| |
| if (!base->isArray() && base->isVector()) { |
| if (base->getType().contains16BitFloat()) |
| requireFloat16Arithmetic(loc, "[", "does not operate on types containing float16"); |
| if (base->getType().contains16BitInt()) |
| requireInt16Arithmetic(loc, "[", "does not operate on types containing (u)int16"); |
| if (base->getType().contains8BitInt()) |
| requireInt8Arithmetic(loc, "[", "does not operate on types containing (u)int8"); |
| } |
| |
| // check for constant folding |
| if (base->getType().getQualifier().isFrontEndConstant() && index->getQualifier().isFrontEndConstant()) { |
| // both base and index are front-end constants |
| checkIndex(loc, base->getType(), indexValue); |
| return intermediate.foldDereference(base, indexValue, loc); |
| } |
| |
| // at least one of base and index is not a front-end constant variable... |
| TIntermTyped* result = nullptr; |
| |
| #ifndef GLSLANG_WEB |
| if (base->isReference() && ! base->isArray()) { |
| requireExtensions(loc, 1, &E_GL_EXT_buffer_reference2, "buffer reference indexing"); |
| result = intermediate.addBinaryMath(EOpAdd, base, index, loc); |
| result->setType(base->getType()); |
| return result; |
| } |
| if (base->getAsSymbolNode() && isIoResizeArray(base->getType())) |
| handleIoResizeArrayAccess(loc, base); |
| #endif |
| |
| if (index->getQualifier().isFrontEndConstant()) |
| checkIndex(loc, base->getType(), indexValue); |
| |
| if (index->getQualifier().isFrontEndConstant()) { |
| #ifndef GLSLANG_WEB |
| if (base->getType().isUnsizedArray()) { |
| base->getWritableType().updateImplicitArraySize(indexValue + 1); |
| // For 2D per-view builtin arrays, update the inner dimension size in parent type |
| if (base->getQualifier().isPerView() && base->getQualifier().builtIn != EbvNone) { |
| TIntermBinary* binaryNode = base->getAsBinaryNode(); |
| if (binaryNode) { |
| TType& leftType = binaryNode->getLeft()->getWritableType(); |
| TArraySizes& arraySizes = *leftType.getArraySizes(); |
| assert(arraySizes.getNumDims() == 2); |
| arraySizes.setDimSize(1, std::max(arraySizes.getDimSize(1), indexValue + 1)); |
| } |
| } |
| } else |
| #endif |
| checkIndex(loc, base->getType(), indexValue); |
| result = intermediate.addIndex(EOpIndexDirect, base, index, loc); |
| } else { |
| #ifndef GLSLANG_WEB |
| if (base->getType().isUnsizedArray()) { |
| // we have a variable index into an unsized array, which is okay, |
| // depending on the situation |
| if (base->getAsSymbolNode() && isIoResizeArray(base->getType())) |
| error(loc, "", "[", "array must be sized by a redeclaration or layout qualifier before being indexed with a variable"); |
| else { |
| // it is okay for a run-time sized array |
| checkRuntimeSizable(loc, *base); |
| } |
| base->getWritableType().setArrayVariablyIndexed(); |
| } |
| #endif |
| if (base->getBasicType() == EbtBlock) { |
| if (base->getQualifier().storage == EvqBuffer) |
| requireProfile(base->getLoc(), ~EEsProfile, "variable indexing buffer block array"); |
| else if (base->getQualifier().storage == EvqUniform) |
| profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, |
| "variable indexing uniform block array"); |
| else { |
| // input/output blocks either don't exist or can't be variably indexed |
| } |
| } else if (language == EShLangFragment && base->getQualifier().isPipeOutput()) |
| requireProfile(base->getLoc(), ~EEsProfile, "variable indexing fragment shader output array"); |
| else if (base->getBasicType() == EbtSampler && version >= 130) { |
| const char* explanation = "variable indexing sampler array"; |
| requireProfile(base->getLoc(), EEsProfile | ECoreProfile | ECompatibilityProfile, explanation); |
| profileRequires(base->getLoc(), EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, explanation); |
| profileRequires(base->getLoc(), ECoreProfile | ECompatibilityProfile, 400, nullptr, explanation); |
| } |
| |
| result = intermediate.addIndex(EOpIndexIndirect, base, index, loc); |
| } |
| |
| // Insert valid dereferenced result type |
| TType newType(base->getType(), 0); |
| if (base->getType().getQualifier().isConstant() && index->getQualifier().isConstant()) { |
| newType.getQualifier().storage = EvqConst; |
| // If base or index is a specialization constant, the result should also be a specialization constant. |
| if (base->getType().getQualifier().isSpecConstant() || index->getQualifier().isSpecConstant()) { |
| newType.getQualifier().makeSpecConstant(); |
| } |
| } else { |
| newType.getQualifier().storage = EvqTemporary; |
| newType.getQualifier().specConstant = false; |
| } |
| result->setType(newType); |
| |
| #ifndef GLSLANG_WEB |
| inheritMemoryQualifiers(base->getQualifier(), result->getWritableType().getQualifier()); |
| |
| // Propagate nonuniform |
| if (base->getQualifier().isNonUniform() || index->getQualifier().isNonUniform()) |
| result->getWritableType().getQualifier().nonUniform = true; |
| |
| if (anyIndexLimits) |
| handleIndexLimits(loc, base, index); |
| #endif |
| |
| return result; |
| } |
| |
| #ifndef GLSLANG_WEB |
| |
| // for ES 2.0 (version 100) limitations for almost all index operations except vertex-shader uniforms |
| void TParseContext::handleIndexLimits(const TSourceLoc& /*loc*/, TIntermTyped* base, TIntermTyped* index) |
| { |
| if ((! limits.generalSamplerIndexing && base->getBasicType() == EbtSampler) || |
| (! limits.generalUniformIndexing && base->getQualifier().isUniformOrBuffer() && language != EShLangVertex) || |
| (! limits.generalAttributeMatrixVectorIndexing && base->getQualifier().isPipeInput() && language == EShLangVertex && (base->getType().isMatrix() || base->getType().isVector())) || |
| (! limits.generalConstantMatrixVectorIndexing && base->getAsConstantUnion()) || |
| (! limits.generalVariableIndexing && ! base->getType().getQualifier().isUniformOrBuffer() && |
| ! base->getType().getQualifier().isPipeInput() && |
| ! base->getType().getQualifier().isPipeOutput() && |
| ! base->getType().getQualifier().isConstant()) || |
| (! limits.generalVaryingIndexing && (base->getType().getQualifier().isPipeInput() || |
| base->getType().getQualifier().isPipeOutput()))) { |
| // it's too early to know what the inductive variables are, save it for post processing |
| needsIndexLimitationChecking.push_back(index); |
| } |
| } |
| |
| // Make a shared symbol have a non-shared version that can be edited by the current |
| // compile, such that editing its type will not change the shared version and will |
| // effect all nodes sharing it. |
| void TParseContext::makeEditable(TSymbol*& symbol) |
| { |
| TParseContextBase::makeEditable(symbol); |
| |
| // See if it's tied to IO resizing |
| if (isIoResizeArray(symbol->getType())) |
| ioArraySymbolResizeList.push_back(symbol); |
| } |
| |
| // Return true if this is a geometry shader input array or tessellation control output array |
| // or mesh shader output array. |
| bool TParseContext::isIoResizeArray(const TType& type) const |
| { |
| return type.isArray() && |
| ((language == EShLangGeometry && type.getQualifier().storage == EvqVaryingIn) || |
| (language == EShLangTessControl && type.getQualifier().storage == EvqVaryingOut && |
| ! type.getQualifier().patch) || |
| (language == EShLangFragment && type.getQualifier().storage == EvqVaryingIn && |
| type.getQualifier().pervertexNV) || |
| (language == EShLangMeshNV && type.getQualifier().storage == EvqVaryingOut && |
| !type.getQualifier().perTaskNV)); |
| } |
| |
| // If an array is not isIoResizeArray() but is an io array, make sure it has the right size |
| void TParseContext::fixIoArraySize(const TSourceLoc& loc, TType& type) |
| { |
| if (! type.isArray() || type.getQualifier().patch || symbolTable.atBuiltInLevel()) |
| return; |
| |
| assert(! isIoResizeArray(type)); |
| |
| if (type.getQualifier().storage != EvqVaryingIn || type.getQualifier().patch) |
| return; |
| |
| if (language == EShLangTessControl || language == EShLangTessEvaluation) { |
| if (type.getOuterArraySize() != resources.maxPatchVertices) { |
| if (type.isSizedArray()) |
| error(loc, "tessellation input array size must be gl_MaxPatchVertices or implicitly sized", "[]", ""); |
| type.changeOuterArraySize(resources.maxPatchVertices); |
| } |
| } |
| } |
| |
| // Issue any errors if the non-array object is missing arrayness WRT |
| // shader I/O that has array requirements. |
| // All arrayness checking is handled in array paths, this is for |
| void TParseContext::ioArrayCheck(const TSourceLoc& loc, const TType& type, const TString& identifier) |
| { |
| if (! type.isArray() && ! symbolTable.atBuiltInLevel()) { |
| if (type.getQualifier().isArrayedIo(language) && !type.getQualifier().layoutPassthrough) |
| error(loc, "type must be an array:", type.getStorageQualifierString(), identifier.c_str()); |
| } |
| } |
| |
| // Handle a dereference of a geometry shader input array or tessellation control output array. |
| // See ioArraySymbolResizeList comment in ParseHelper.h. |
| // |
| void TParseContext::handleIoResizeArrayAccess(const TSourceLoc& /*loc*/, TIntermTyped* base) |
| { |
| TIntermSymbol* symbolNode = base->getAsSymbolNode(); |
| assert(symbolNode); |
| if (! symbolNode) |
| return; |
| |
| // fix array size, if it can be fixed and needs to be fixed (will allow variable indexing) |
| if (symbolNode->getType().isUnsizedArray()) { |
| int newSize = getIoArrayImplicitSize(symbolNode->getType().getQualifier()); |
| if (newSize > 0) |
| symbolNode->getWritableType().changeOuterArraySize(newSize); |
| } |
| } |
| |
| // If there has been an input primitive declaration (geometry shader) or an output |
| // number of vertices declaration(tessellation shader), make sure all input array types |
| // match it in size. Types come either from nodes in the AST or symbols in the |
| // symbol table. |
| // |
| // Types without an array size will be given one. |
| // Types already having a size that is wrong will get an error. |
| // |
| void TParseContext::checkIoArraysConsistency(const TSourceLoc &loc, bool tailOnly) |
| { |
| int requiredSize = 0; |
| TString featureString; |
| size_t listSize = ioArraySymbolResizeList.size(); |
| size_t i = 0; |
| |
| // If tailOnly = true, only check the last array symbol in the list. |
| if (tailOnly) { |
| i = listSize - 1; |
| } |
| for (bool firstIteration = true; i < listSize; ++i) { |
| TType &type = ioArraySymbolResizeList[i]->getWritableType(); |
| |
| // As I/O array sizes don't change, fetch requiredSize only once, |
| // except for mesh shaders which could have different I/O array sizes based on type qualifiers. |
| if (firstIteration || (language == EShLangMeshNV)) { |
| requiredSize = getIoArrayImplicitSize(type.getQualifier(), &featureString); |
| if (requiredSize == 0) |
| break; |
| firstIteration = false; |
| } |
| |
| checkIoArrayConsistency(loc, requiredSize, featureString.c_str(), type, |
| ioArraySymbolResizeList[i]->getName()); |
| } |
| } |
| |
| int TParseContext::getIoArrayImplicitSize(const TQualifier &qualifier, TString *featureString) const |
| { |
| int expectedSize = 0; |
| TString str = "unknown"; |
| unsigned int maxVertices = intermediate.getVertices() != TQualifier::layoutNotSet ? intermediate.getVertices() : 0; |
| |
| if (language == EShLangGeometry) { |
| expectedSize = TQualifier::mapGeometryToSize(intermediate.getInputPrimitive()); |
| str = TQualifier::getGeometryString(intermediate.getInputPrimitive()); |
| } |
| else if (language == EShLangTessControl) { |
| expectedSize = maxVertices; |
| str = "vertices"; |
| } else if (language == EShLangFragment) { |
| // Number of vertices for Fragment shader is always three. |
| expectedSize = 3; |
| str = "vertices"; |
| } else if (language == EShLangMeshNV) { |
| unsigned int maxPrimitives = |
| intermediate.getPrimitives() != TQualifier::layoutNotSet ? intermediate.getPrimitives() : 0; |
| if (qualifier.builtIn == EbvPrimitiveIndicesNV) { |
| expectedSize = maxPrimitives * TQualifier::mapGeometryToSize(intermediate.getOutputPrimitive()); |
| str = "max_primitives*"; |
| str += TQualifier::getGeometryString(intermediate.getOutputPrimitive()); |
| } |
| else if (qualifier.isPerPrimitive()) { |
| expectedSize = maxPrimitives; |
| str = "max_primitives"; |
| } |
| else { |
| expectedSize = maxVertices; |
| str = "max_vertices"; |
| } |
| } |
| if (featureString) |
| *featureString = str; |
| return expectedSize; |
| } |
| |
| void TParseContext::checkIoArrayConsistency(const TSourceLoc& loc, int requiredSize, const char* feature, TType& type, const TString& name) |
| { |
| if (type.isUnsizedArray()) |
| type.changeOuterArraySize(requiredSize); |
| else if (type.getOuterArraySize() != requiredSize) { |
| if (language == EShLangGeometry) |
| error(loc, "inconsistent input primitive for array size of", feature, name.c_str()); |
| else if (language == EShLangTessControl) |
| error(loc, "inconsistent output number of vertices for array size of", feature, name.c_str()); |
| else if (language == EShLangFragment) { |
| if (type.getOuterArraySize() > requiredSize) |
| error(loc, " cannot be greater than 3 for pervertexNV", feature, name.c_str()); |
| } |
| else if (language == EShLangMeshNV) |
| error(loc, "inconsistent output array size of", feature, name.c_str()); |
| else |
| assert(0); |
| } |
| } |
| |
| #endif // GLSLANG_WEB |
| |
| // Handle seeing a binary node with a math operation. |
| // Returns nullptr if not semantically allowed. |
| TIntermTyped* TParseContext::handleBinaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right) |
| { |
| rValueErrorCheck(loc, str, left->getAsTyped()); |
| rValueErrorCheck(loc, str, right->getAsTyped()); |
| |
| bool allowed = true; |
| switch (op) { |
| // TODO: Bring more source language-specific checks up from intermediate.cpp |
| // to the specific parse helpers for that source language. |
| case EOpLessThan: |
| case EOpGreaterThan: |
| case EOpLessThanEqual: |
| case EOpGreaterThanEqual: |
| if (! left->isScalar() || ! right->isScalar()) |
| allowed = false; |
| break; |
| default: |
| break; |
| } |
| |
| if (((left->getType().contains16BitFloat() || right->getType().contains16BitFloat()) && !float16Arithmetic()) || |
| ((left->getType().contains16BitInt() || right->getType().contains16BitInt()) && !int16Arithmetic()) || |
| ((left->getType().contains8BitInt() || right->getType().contains8BitInt()) && !int8Arithmetic())) { |
| allowed = false; |
| } |
| |
| TIntermTyped* result = nullptr; |
| if (allowed) |
| result = intermediate.addBinaryMath(op, left, right, loc); |
| |
| if (result == nullptr) |
| binaryOpError(loc, str, left->getCompleteString(), right->getCompleteString()); |
| |
| return result; |
| } |
| |
| // Handle seeing a unary node with a math operation. |
| TIntermTyped* TParseContext::handleUnaryMath(const TSourceLoc& loc, const char* str, TOperator op, TIntermTyped* childNode) |
| { |
| rValueErrorCheck(loc, str, childNode); |
| |
| bool allowed = true; |
| if ((childNode->getType().contains16BitFloat() && !float16Arithmetic()) || |
| (childNode->getType().contains16BitInt() && !int16Arithmetic()) || |
| (childNode->getType().contains8BitInt() && !int8Arithmetic())) { |
| allowed = false; |
| } |
| |
| TIntermTyped* result = nullptr; |
| if (allowed) |
| result = intermediate.addUnaryMath(op, childNode, loc); |
| |
| if (result) |
| return result; |
| else |
| unaryOpError(loc, str, childNode->getCompleteString()); |
| |
| return childNode; |
| } |
| |
| // |
| // Handle seeing a base.field dereference in the grammar. |
| // |
| TIntermTyped* TParseContext::handleDotDereference(const TSourceLoc& loc, TIntermTyped* base, const TString& field) |
| { |
| variableCheck(base); |
| |
| // |
| // .length() can't be resolved until we later see the function-calling syntax. |
| // Save away the name in the AST for now. Processing is completed in |
| // handleLengthMethod(). |
| // |
| if (field == "length") { |
| if (base->isArray()) { |
| profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, ".length"); |
| profileRequires(loc, EEsProfile, 300, nullptr, ".length"); |
| } else if (base->isVector() || base->isMatrix()) { |
| const char* feature = ".length() on vectors and matrices"; |
| requireProfile(loc, ~EEsProfile, feature); |
| profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, feature); |
| } else if (!base->getType().isCoopMat()) { |
| error(loc, "does not operate on this type:", field.c_str(), base->getType().getCompleteString().c_str()); |
| |
| return base; |
| } |
| |
| return intermediate.addMethod(base, TType(EbtInt), &field, loc); |
| } |
| |
| // It's not .length() if we get to here. |
| |
| if (base->isArray()) { |
| error(loc, "cannot apply to an array:", ".", field.c_str()); |
| |
| return base; |
| } |
| |
| if (base->getType().isCoopMat()) { |
| error(loc, "cannot apply to a cooperative matrix type:", ".", field.c_str()); |
| return base; |
| } |
| |
| // It's neither an array nor .length() if we get here, |
| // leaving swizzles and struct/block dereferences. |
| |
| TIntermTyped* result = base; |
| if ((base->isVector() || base->isScalar()) && |
| (base->isFloatingDomain() || base->isIntegerDomain() || base->getBasicType() == EbtBool)) { |
| if (base->isScalar()) { |
| const char* dotFeature = "scalar swizzle"; |
| requireProfile(loc, ~EEsProfile, dotFeature); |
| profileRequires(loc, ~EEsProfile, 420, E_GL_ARB_shading_language_420pack, dotFeature); |
| } |
| |
| TSwizzleSelectors<TVectorSelector> selectors; |
| parseSwizzleSelector(loc, field, base->getVectorSize(), selectors); |
| |
| if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitFloat()) |
| requireFloat16Arithmetic(loc, ".", "can't swizzle types containing float16"); |
| if (base->isVector() && selectors.size() != 1 && base->getType().contains16BitInt()) |
| requireInt16Arithmetic(loc, ".", "can't swizzle types containing (u)int16"); |
| if (base->isVector() && selectors.size() != 1 && base->getType().contains8BitInt()) |
| requireInt8Arithmetic(loc, ".", "can't swizzle types containing (u)int8"); |
| |
| if (base->isScalar()) { |
| if (selectors.size() == 1) |
| return result; |
| else { |
| TType type(base->getBasicType(), EvqTemporary, selectors.size()); |
| // Swizzle operations propagate specialization-constantness |
| if (base->getQualifier().isSpecConstant()) |
| type.getQualifier().makeSpecConstant(); |
| return addConstructor(loc, base, type); |
| } |
| } |
| |
| if (base->getType().getQualifier().isFrontEndConstant()) |
| result = intermediate.foldSwizzle(base, selectors, loc); |
| else { |
| if (selectors.size() == 1) { |
| TIntermTyped* index = intermediate.addConstantUnion(selectors[0], loc); |
| result = intermediate.addIndex(EOpIndexDirect, base, index, loc); |
| result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision)); |
| } else { |
| TIntermTyped* index = intermediate.addSwizzle(selectors, loc); |
| result = intermediate.addIndex(EOpVectorSwizzle, base, index, loc); |
| result->setType(TType(base->getBasicType(), EvqTemporary, base->getType().getQualifier().precision, selectors.size())); |
| } |
| // Swizzle operations propagate specialization-constantness |
| if (base->getType().getQualifier().isSpecConstant()) |
| result->getWritableType().getQualifier().makeSpecConstant(); |
| } |
| } else if (base->isStruct() || base->isReference()) { |
| const TTypeList* fields = base->isReference() ? |
| base->getType().getReferentType()->getStruct() : |
| base->getType().getStruct(); |
| bool fieldFound = false; |
| int member; |
| for (member = 0; member < (int)fields->size(); ++member) { |
| if ((*fields)[member].type->getFieldName() == field) { |
| fieldFound = true; |
| break; |
| } |
| } |
| if (fieldFound) { |
| if (base->getType().getQualifier().isFrontEndConstant()) |
| result = intermediate.foldDereference(base, member, loc); |
| else { |
| blockMemberExtensionCheck(loc, base, member, field); |
| TIntermTyped* index = intermediate.addConstantUnion(member, loc); |
| result = intermediate.addIndex(EOpIndexDirectStruct, base, index, loc); |
| result->setType(*(*fields)[member].type); |
| if ((*fields)[member].type->getQualifier().isIo()) |
| intermediate.addIoAccessed(field); |
| } |
| inheritMemoryQualifiers(base->getQualifier(), result->getWritableType().getQualifier()); |
| } else |
| error(loc, "no such field in structure", field.c_str(), ""); |
| } else |
| error(loc, "does not apply to this type:", field.c_str(), base->getType().getCompleteString().c_str()); |
| |
| // Propagate noContraction up the dereference chain |
| if (base->getQualifier().isNoContraction()) |
| result->getWritableType().getQualifier().setNoContraction(); |
| |
| // Propagate nonuniform |
| if (base->getQualifier().isNonUniform()) |
| result->getWritableType().getQualifier().nonUniform = true; |
| |
| return result; |
| } |
| |
| void TParseContext::blockMemberExtensionCheck(const TSourceLoc& loc, const TIntermTyped* base, int member, const TString& memberName) |
| { |
| // a block that needs extension checking is either 'base', or if arrayed, |
| // one level removed to the left |
| const TIntermSymbol* baseSymbol = nullptr; |
| if (base->getAsBinaryNode() == nullptr) |
| baseSymbol = base->getAsSymbolNode(); |
| else |
| baseSymbol = base->getAsBinaryNode()->getLeft()->getAsSymbolNode(); |
| if (baseSymbol == nullptr) |
| return; |
| const TSymbol* symbol = symbolTable.find(baseSymbol->getName()); |
| if (symbol == nullptr) |
| return; |
| const TVariable* variable = symbol->getAsVariable(); |
| if (variable == nullptr) |
| return; |
| if (!variable->hasMemberExtensions()) |
| return; |
| |
| // We now have a variable that is the base of a dot reference |
| // with members that need extension checking. |
| if (variable->getNumMemberExtensions(member) > 0) |
| requireExtensions(loc, variable->getNumMemberExtensions(member), variable->getMemberExtensions(member), memberName.c_str()); |
| } |
| |
| // |
| // Handle seeing a function declarator in the grammar. This is the precursor |
| // to recognizing a function prototype or function definition. |
| // |
| TFunction* TParseContext::handleFunctionDeclarator(const TSourceLoc& loc, TFunction& function, bool prototype) |
| { |
| // ES can't declare prototypes inside functions |
| if (! symbolTable.atGlobalLevel()) |
| requireProfile(loc, ~EEsProfile, "local function declaration"); |
| |
| // |
| // Multiple declarations of the same function name are allowed. |
| // |
| // If this is a definition, the definition production code will check for redefinitions |
| // (we don't know at this point if it's a definition or not). |
| // |
| // Redeclarations (full signature match) are allowed. But, return types and parameter qualifiers must also match. |
| // - except ES 100, which only allows a single prototype |
| // |
| // ES 100 does not allow redefining, but does allow overloading of built-in functions. |
| // ES 300 does not allow redefining or overloading of built-in functions. |
| // |
| bool builtIn; |
| TSymbol* symbol = symbolTable.find(function.getMangledName(), &builtIn); |
| if (symbol && symbol->getAsFunction() && builtIn) |
| requireProfile(loc, ~EEsProfile, "redefinition of built-in function"); |
| const TFunction* prevDec = symbol ? symbol->getAsFunction() : 0; |
| if (prevDec) { |
| if (prevDec->isPrototyped() && prototype) |
| profileRequires(loc, EEsProfile, 300, nullptr, "multiple prototypes for same function"); |
| if (prevDec->getType() != function.getType()) |
| error(loc, "overloaded functions must have the same return type", function.getName().c_str(), ""); |
| for (int i = 0; i < prevDec->getParamCount(); ++i) { |
| if ((*prevDec)[i].type->getQualifier().storage != function[i].type->getQualifier().storage) |
| error(loc, "overloaded functions must have the same parameter storage qualifiers for argument", function[i].type->getStorageQualifierString(), "%d", i+1); |
| |
| if ((*prevDec)[i].type->getQualifier().precision != function[i].type->getQualifier().precision) |
| error(loc, "overloaded functions must have the same parameter precision qualifiers for argument", function[i].type->getPrecisionQualifierString(), "%d", i+1); |
| } |
| } |
| |
| arrayObjectCheck(loc, function.getType(), "array in function return type"); |
| |
| if (prototype) { |
| // All built-in functions are defined, even though they don't have a body. |
| // Count their prototype as a definition instead. |
| if (symbolTable.atBuiltInLevel()) |
| function.setDefined(); |
| else { |
| if (prevDec && ! builtIn) |
| symbol->getAsFunction()->setPrototyped(); // need a writable one, but like having prevDec as a const |
| function.setPrototyped(); |
| } |
| } |
| |
| // This insert won't actually insert it if it's a duplicate signature, but it will still check for |
| // other forms of name collisions. |
| if (! symbolTable.insert(function)) |
| error(loc, "function name is redeclaration of existing name", function.getName().c_str(), ""); |
| |
| // |
| // If this is a redeclaration, it could also be a definition, |
| // in which case, we need to use the parameter names from this one, and not the one that's |
| // being redeclared. So, pass back this declaration, not the one in the symbol table. |
| // |
| return &function; |
| } |
| |
| // |
| // Handle seeing the function prototype in front of a function definition in the grammar. |
| // The body is handled after this function returns. |
| // |
| TIntermAggregate* TParseContext::handleFunctionDefinition(const TSourceLoc& loc, TFunction& function) |
| { |
| currentCaller = function.getMangledName(); |
| TSymbol* symbol = symbolTable.find(function.getMangledName()); |
| TFunction* prevDec = symbol ? symbol->getAsFunction() : nullptr; |
| |
| if (! prevDec) |
| error(loc, "can't find function", function.getName().c_str(), ""); |
| // Note: 'prevDec' could be 'function' if this is the first time we've seen function |
| // as it would have just been put in the symbol table. Otherwise, we're looking up |
| // an earlier occurrence. |
| |
| if (prevDec && prevDec->isDefined()) { |
| // Then this function already has a body. |
| error(loc, "function already has a body", function.getName().c_str(), ""); |
| } |
| if (prevDec && ! prevDec->isDefined()) { |
| prevDec->setDefined(); |
| |
| // Remember the return type for later checking for RETURN statements. |
| currentFunctionType = &(prevDec->getType()); |
| } else |
| currentFunctionType = new TType(EbtVoid); |
| functionReturnsValue = false; |
| |
| // Check for entry point |
| if (function.getName().compare(intermediate.getEntryPointName().c_str()) == 0) { |
| intermediate.setEntryPointMangledName(function.getMangledName().c_str()); |
| intermediate.incrementEntryPointCount(); |
| inMain = true; |
| } else |
| inMain = false; |
| |
| // |
| // Raise error message if main function takes any parameters or returns anything other than void |
| // |
| if (inMain) { |
| if (function.getParamCount() > 0) |
| error(loc, "function cannot take any parameter(s)", function.getName().c_str(), ""); |
| if (function.getType().getBasicType() != EbtVoid) |
| error(loc, "", function.getType().getBasicTypeString().c_str(), "entry point cannot return a value"); |
| } |
| |
| // |
| // New symbol table scope for body of function plus its arguments |
| // |
| symbolTable.push(); |
| |
| // |
| // Insert parameters into the symbol table. |
| // If the parameter has no name, it's not an error, just don't insert it |
| // (could be used for unused args). |
| // |
| // Also, accumulate the list of parameters into the HIL, so lower level code |
| // knows where to find parameters. |
| // |
| TIntermAggregate* paramNodes = new TIntermAggregate; |
| for (int i = 0; i < function.getParamCount(); i++) { |
| TParameter& param = function[i]; |
| if (param.name != nullptr) { |
| TVariable *variable = new TVariable(param.name, *param.type); |
| |
| // Insert the parameters with name in the symbol table. |
| if (! symbolTable.insert(*variable)) |
| error(loc, "redefinition", variable->getName().c_str(), ""); |
| else { |
| // Transfer ownership of name pointer to symbol table. |
| param.name = nullptr; |
| |
| // Add the parameter to the HIL |
| paramNodes = intermediate.growAggregate(paramNodes, |
| intermediate.addSymbol(*variable, loc), |
| loc); |
| } |
| } else |
| paramNodes = intermediate.growAggregate(paramNodes, intermediate.addSymbol(*param.type, loc), loc); |
| } |
| intermediate.setAggregateOperator(paramNodes, EOpParameters, TType(EbtVoid), loc); |
| loopNestingLevel = 0; |
| statementNestingLevel = 0; |
| controlFlowNestingLevel = 0; |
| postEntryPointReturn = false; |
| |
| return paramNodes; |
| } |
| |
| // |
| // Handle seeing function call syntax in the grammar, which could be any of |
| // - .length() method |
| // - constructor |
| // - a call to a built-in function mapped to an operator |
| // - a call to a built-in function that will remain a function call (e.g., texturing) |
| // - user function |
| // - subroutine call (not implemented yet) |
| // |
| TIntermTyped* TParseContext::handleFunctionCall(const TSourceLoc& loc, TFunction* function, TIntermNode* arguments) |
| { |
| TIntermTyped* result = nullptr; |
| |
| if (function->getBuiltInOp() == EOpArrayLength) |
| result = handleLengthMethod(loc, function, arguments); |
| else if (function->getBuiltInOp() != EOpNull) { |
| // |
| // Then this should be a constructor. |
| // Don't go through the symbol table for constructors. |
| // Their parameters will be verified algorithmically. |
| // |
| TType type(EbtVoid); // use this to get the type back |
| if (! constructorError(loc, arguments, *function, function->getBuiltInOp(), type)) { |
| // |
| // It's a constructor, of type 'type'. |
| // |
| result = addConstructor(loc, arguments, type); |
| if (result == nullptr) |
| error(loc, "cannot construct with these arguments", type.getCompleteString().c_str(), ""); |
| } |
| } else { |
| // |
| // Find it in the symbol table. |
| // |
| const TFunction* fnCandidate; |
| bool builtIn; |
| fnCandidate = findFunction(loc, *function, builtIn); |
| if (fnCandidate) { |
| // This is a declared function that might map to |
| // - a built-in operator, |
| // - a built-in function not mapped to an operator, or |
| // - a user function. |
| |
| // Error check for a function requiring specific extensions present. |
| if (builtIn && fnCandidate->getNumExtensions()) |
| requireExtensions(loc, fnCandidate->getNumExtensions(), fnCandidate->getExtensions(), fnCandidate->getName().c_str()); |
| |
| if (builtIn && fnCandidate->getType().contains16BitFloat()) |
| requireFloat16Arithmetic(loc, "built-in function", "float16 types can only be in uniform block or buffer storage"); |
| if (builtIn && fnCandidate->getType().contains16BitInt()) |
| requireInt16Arithmetic(loc, "built-in function", "(u)int16 types can only be in uniform block or buffer storage"); |
| if (builtIn && fnCandidate->getType().contains8BitInt()) |
| requireInt8Arithmetic(loc, "built-in function", "(u)int8 types can only be in uniform block or buffer storage"); |
| |
| if (arguments != nullptr) { |
| // Make sure qualifications work for these arguments. |
| TIntermAggregate* aggregate = arguments->getAsAggregate(); |
| for (int i = 0; i < fnCandidate->getParamCount(); ++i) { |
| // At this early point there is a slight ambiguity between whether an aggregate 'arguments' |
| // is the single argument itself or its children are the arguments. Only one argument |
| // means take 'arguments' itself as the one argument. |
| TIntermNode* arg = fnCandidate->getParamCount() == 1 ? arguments : (aggregate ? aggregate->getSequence()[i] : arguments); |
| TQualifier& formalQualifier = (*fnCandidate)[i].type->getQualifier(); |
| if (formalQualifier.isParamOutput()) { |
| if (lValueErrorCheck(arguments->getLoc(), "assign", arg->getAsTyped())) |
| error(arguments->getLoc(), "Non-L-value cannot be passed for 'out' or 'inout' parameters.", "out", ""); |
| } |
| const TType& argType = arg->getAsTyped()->getType(); |
| const TQualifier& argQualifier = argType.getQualifier(); |
| if (argQualifier.isMemory() && (argType.containsOpaque() || argType.isReference())) { |
| const char* message = "argument cannot drop memory qualifier when passed to formal parameter"; |
| if (argQualifier.volatil && ! formalQualifier.volatil) |
| error(arguments->getLoc(), message, "volatile", ""); |
| if (argQualifier.coherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent)) |
| error(arguments->getLoc(), message, "coherent", ""); |
| if (argQualifier.devicecoherent && ! (formalQualifier.devicecoherent || formalQualifier.coherent)) |
| error(arguments->getLoc(), message, "devicecoherent", ""); |
| if (argQualifier.queuefamilycoherent && ! (formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent)) |
| error(arguments->getLoc(), message, "queuefamilycoherent", ""); |
| if (argQualifier.workgroupcoherent && ! (formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent)) |
| error(arguments->getLoc(), message, "workgroupcoherent", ""); |
| if (argQualifier.subgroupcoherent && ! (formalQualifier.subgroupcoherent || formalQualifier.workgroupcoherent || formalQualifier.queuefamilycoherent || formalQualifier.devicecoherent || formalQualifier.coherent)) |
| error(arguments->getLoc(), message, "subgroupcoherent", ""); |
| if (argQualifier.readonly && ! formalQualifier.readonly) |
| error(arguments->getLoc(), message, "readonly", ""); |
| if (argQualifier.writeonly && ! formalQualifier.writeonly) |
| error(arguments->getLoc(), message, "writeonly", ""); |
| // Don't check 'restrict', it is different than the rest: |
| // "...but only restrict can be taken away from a calling argument, by a formal parameter that |
| // lacks the restrict qualifier..." |
| } |
| if (!builtIn && argQualifier.getFormat() != formalQualifier.getFormat()) { |
| // we have mismatched formats, which should only be allowed if writeonly |
| // and at least one format is unknown |
| if (!formalQualifier.isWriteOnly() || (formalQualifier.getFormat() != ElfNone && |
| argQualifier.getFormat() != ElfNone)) |
| error(arguments->getLoc(), "image formats must match", "format", ""); |
| } |
| if (builtIn && arg->getAsTyped()->getType().contains16BitFloat()) |
| requireFloat16Arithmetic(arguments->getLoc(), "built-in function", "float16 types can only be in uniform block or buffer storage"); |
| if (builtIn && arg->getAsTyped()->getType().contains16BitInt()) |
| requireInt16Arithmetic(arguments->getLoc(), "built-in function", "(u)int16 types can only be in uniform block or buffer storage"); |
| if (builtIn && arg->getAsTyped()->getType().contains8BitInt()) |
| requireInt8Arithmetic(arguments->getLoc(), "built-in function", "(u)int8 types can only be in uniform block or buffer storage"); |
| |
| // TODO 4.5 functionality: A shader will fail to compile |
| // if the value passed to the memargument of an atomic memory function does not correspond to a buffer or |
| // shared variable. It is acceptable to pass an element of an array or a single component of a vector to the |
| // memargument of an atomic memory function, as long as the underlying array or vector is a buffer or |
| // shared variable. |
| } |
| |
| // Convert 'in' arguments |
| addInputArgumentConversions(*fnCandidate, arguments); // arguments may be modified if it's just a single argument node |
| } |
| |
| if (builtIn && fnCandidate->getBuiltInOp() != EOpNull) { |
| // A function call mapped to a built-in operation. |
| result = handleBuiltInFunctionCall(loc, arguments, *fnCandidate); |
| } else { |
| // This is a function call not mapped to built-in operator. |
| // It could still be a built-in function, but only if PureOperatorBuiltins == false. |
| result = intermediate.setAggregateOperator(arguments, EOpFunctionCall, fnCandidate->getType(), loc); |
| TIntermAggregate* call = result->getAsAggregate(); |
| call->setName(fnCandidate->getMangledName()); |
| |
| // this is how we know whether the given function is a built-in function or a user-defined function |
| // if builtIn == false, it's a userDefined -> could be an overloaded built-in function also |
| // if builtIn == true, it's definitely a built-in function with EOpNull |
| if (! builtIn) { |
| call->setUserDefined(); |
| if (symbolTable.atGlobalLevel()) { |
| requireProfile(loc, ~EEsProfile, "calling user function from global scope"); |
| intermediate.addToCallGraph(infoSink, "main(", fnCandidate->getMangledName()); |
| } else |
| intermediate.addToCallGraph(infoSink, currentCaller, fnCandidate->getMangledName()); |
| } |
| |
| #ifndef GLSLANG_WEB |
| if (builtIn) |
| nonOpBuiltInCheck(loc, *fnCandidate, *call); |
| else |
| #endif |
| userFunctionCallCheck(loc, *call); |
| } |
| |
| // Convert 'out' arguments. If it was a constant folded built-in, it won't be an aggregate anymore. |
| // Built-ins with a single argument aren't called with an aggregate, but they also don't have an output. |
| // Also, build the qualifier list for user function calls, which are always called with an aggregate. |
| if (result->getAsAggregate()) { |
| TQualifierList& qualifierList = result->getAsAggregate()->getQualifierList(); |
| for (int i = 0; i < fnCandidate->getParamCount(); ++i) { |
| TStorageQualifier qual = (*fnCandidate)[i].type->getQualifier().storage; |
| qualifierList.push_back(qual); |
| } |
| result = addOutputArgumentConversions(*fnCandidate, *result->getAsAggregate()); |
| } |
| |
| if (result->getAsTyped()->getType().isCoopMat() && |
| !result->getAsTyped()->getType().isParameterized()) { |
| assert(fnCandidate->getBuiltInOp() == EOpCooperativeMatrixMulAdd); |
| |
| result->setType(result->getAsAggregate()->getSequence()[2]->getAsTyped()->getType()); |
| } |
| } |
| } |
| |
| // generic error recovery |
| // TODO: simplification: localize all the error recoveries that look like this, and taking type into account to reduce cascades |
| if (result == nullptr) |
| result = intermediate.addConstantUnion(0.0, EbtFloat, loc); |
| |
| return result; |
| } |
| |
| TIntermTyped* TParseContext::handleBuiltInFunctionCall(TSourceLoc loc, TIntermNode* arguments, |
| const TFunction& function) |
| { |
| checkLocation(loc, function.getBuiltInOp()); |
| TIntermTyped *result = intermediate.addBuiltInFunctionCall(loc, function.getBuiltInOp(), |
| function.getParamCount() == 1, |
| arguments, function.getType()); |
| if (obeyPrecisionQualifiers()) |
| computeBuiltinPrecisions(*result, function); |
| |
| if (result == nullptr) { |
| if (arguments == nullptr) |
| error(loc, " wrong operand type", "Internal Error", |
| "built in unary operator function. Type: %s", ""); |
| else |
| error(arguments->getLoc(), " wrong operand type", "Internal Error", |
| "built in unary operator function. Type: %s", |
| static_cast<TIntermTyped*>(arguments)->getCompleteString().c_str()); |
| } else if (result->getAsOperator()) |
| builtInOpCheck(loc, function, *result->getAsOperator()); |
| |
| return result; |
| } |
| |
| // "The operation of a built-in function can have a different precision |
| // qualification than the precision qualification of the resulting value. |
| // These two precision qualifications are established as follows. |
| // |
| // The precision qualification of the operation of a built-in function is |
| // based on the precision qualification of its input arguments and formal |
| // parameters: When a formal parameter specifies a precision qualifier, |
| // that is used, otherwise, the precision qualification of the calling |
| // argument is used. The highest precision of these will be the precision |
| // qualification of the operation of the built-in function. Generally, |
| // this is applied across all arguments to a built-in function, with the |
| // exceptions being: |
| // - bitfieldExtract and bitfieldInsert ignore the 'offset' and 'bits' |
| // arguments. |
| // - interpolateAt* functions only look at the 'interpolant' argument. |
| // |
| // The precision qualification of the result of a built-in function is |
| // determined in one of the following ways: |
| // |
| // - For the texture sampling, image load, and image store functions, |
| // the precision of the return type matches the precision of the |
| // sampler type |
| // |
| // Otherwise: |
| // |
| // - For prototypes that do not specify a resulting precision qualifier, |
| // the precision will be the same as the precision of the operation. |
| // |
| // - For prototypes that do specify a resulting precision qualifier, |
| // the specified precision qualifier is the precision qualification of |
| // the result." |
| // |
| void TParseContext::computeBuiltinPrecisions(TIntermTyped& node, const TFunction& function) |
| { |
| TPrecisionQualifier operationPrecision = EpqNone; |
| TPrecisionQualifier resultPrecision = EpqNone; |
| |
| TIntermOperator* opNode = node.getAsOperator(); |
| if (opNode == nullptr) |
| return; |
| |
| if (TIntermUnary* unaryNode = node.getAsUnaryNode()) { |
| operationPrecision = std::max(function[0].type->getQualifier().precision, |
| unaryNode->getOperand()->getType().getQualifier().precision); |
| if (function.getType().getBasicType() != EbtBool) |
| resultPrecision = function.getType().getQualifier().precision == EpqNone ? |
| operationPrecision : |
| function.getType().getQualifier().precision; |
| } else if (TIntermAggregate* agg = node.getAsAggregate()) { |
| TIntermSequence& sequence = agg->getSequence(); |
| unsigned int numArgs = (unsigned int)sequence.size(); |
| switch (agg->getOp()) { |
| case EOpBitfieldExtract: |
| numArgs = 1; |
| break; |
| case EOpBitfieldInsert: |
| numArgs = 2; |
| break; |
| case EOpInterpolateAtCentroid: |
| case EOpInterpolateAtOffset: |
| case EOpInterpolateAtSample: |
| numArgs = 1; |
| break; |
| default: |
| break; |
| } |
| // find the maximum precision from the arguments and parameters |
| for (unsigned int arg = 0; arg < numArgs; ++arg) { |
| operationPrecision = std::max(operationPrecision, sequence[arg]->getAsTyped()->getQualifier().precision); |
| operationPrecision = std::max(operationPrecision, function[arg].type->getQualifier().precision); |
| } |
| // compute the result precision |
| if (agg->isSampling() || |
| agg->getOp() == EOpImageLoad || agg->getOp() == EOpImageStore || |
| agg->getOp() == EOpImageLoadLod || agg->getOp() == EOpImageStoreLod) |
| resultPrecision = sequence[0]->getAsTyped()->getQualifier().precision; |
| else if (function.getType().getBasicType() != EbtBool) |
| resultPrecision = function.getType().getQualifier().precision == EpqNone ? |
| operationPrecision : |
| function.getType().getQualifier().precision; |
| } |
| |
| // Propagate precision through this node and its children. That algorithm stops |
| // when a precision is found, so start by clearing this subroot precision |
| opNode->getQualifier().precision = EpqNone; |
| if (operationPrecision != EpqNone) { |
| opNode->propagatePrecision(operationPrecision); |
| opNode->setOperationPrecision(operationPrecision); |
| } |
| // Now, set the result precision, which might not match |
| opNode->getQualifier().precision = resultPrecision; |
| } |
| |
| TIntermNode* TParseContext::handleReturnValue(const TSourceLoc& loc, TIntermTyped* value) |
| { |
| #ifndef GLSLANG_WEB |
| storage16BitAssignmentCheck(loc, value->getType(), "return"); |
| #endif |
| |
| functionReturnsValue = true; |
| if (currentFunctionType->getBasicType() == EbtVoid) { |
| error(loc, "void function cannot return a value", "return", ""); |
| return intermediate.addBranch(EOpReturn, loc); |
| } else if (*currentFunctionType != value->getType()) { |
| TIntermTyped* converted = intermediate.addConversion(EOpReturn, *currentFunctionType, value); |
| if (converted) { |
| if (*currentFunctionType != converted->getType()) |
| error(loc, "cannot convert return value to function return type", "return", ""); |
| if (version < 420) |
| warn(loc, "type conversion on return values was not explicitly allowed until version 420", "return", ""); |
| return intermediate.addBranch(EOpReturn, converted, loc); |
| } else { |
| error(loc, "type does not match, or is not convertible to, the function's return type", "return", ""); |
| return intermediate.addBranch(EOpReturn, value, loc); |
| } |
| } else |
| return intermediate.addBranch(EOpReturn, value, loc); |
| } |
| |
| // See if the operation is being done in an illegal location. |
| void TParseContext::checkLocation(const TSourceLoc& loc, TOperator op) |
| { |
| #ifndef GLSLANG_WEB |
| switch (op) { |
| case EOpBarrier: |
| if (language == EShLangTessControl) { |
| if (controlFlowNestingLevel > 0) |
| error(loc, "tessellation control barrier() cannot be placed within flow control", "", ""); |
| if (! inMain) |
| error(loc, "tessellation control barrier() must be in main()", "", ""); |
| else if (postEntryPointReturn) |
| error(loc, "tessellation control barrier() cannot be placed after a return from main()", "", ""); |
| } |
| break; |
| case EOpBeginInvocationInterlock: |
| if (language != EShLangFragment) |
| error(loc, "beginInvocationInterlockARB() must be in a fragment shader", "", ""); |
| if (! inMain) |
| error(loc, "beginInvocationInterlockARB() must be in main()", "", ""); |
| else if (postEntryPointReturn) |
| error(loc, "beginInvocationInterlockARB() cannot be placed after a return from main()", "", ""); |
| if (controlFlowNestingLevel > 0) |
| error(loc, "beginInvocationInterlockARB() cannot be placed within flow control", "", ""); |
| |
| if (beginInvocationInterlockCount > 0) |
| error(loc, "beginInvocationInterlockARB() must only be called once", "", ""); |
| if (endInvocationInterlockCount > 0) |
| error(loc, "beginInvocationInterlockARB() must be called before endInvocationInterlockARB()", "", ""); |
| |
| beginInvocationInterlockCount++; |
| |
| // default to pixel_interlock_ordered |
| if (intermediate.getInterlockOrdering() == EioNone) |
| intermediate.setInterlockOrdering(EioPixelInterlockOrdered); |
| break; |
| case EOpEndInvocationInterlock: |
| if (language != EShLangFragment) |
| error(loc, "endInvocationInterlockARB() must be in a fragment shader", "", ""); |
| if (! inMain) |
| error(loc, "endInvocationInterlockARB() must be in main()", "", ""); |
| else if (postEntryPointReturn) |
| error(loc, "endInvocationInterlockARB() cannot be placed after a return from main()", "", ""); |
| if (controlFlowNestingLevel > 0) |
| error(loc, "endInvocationInterlockARB() cannot be placed within flow control", "", ""); |
| |
| if (endInvocationInterlockCount > 0) |
| error(loc, "endInvocationInterlockARB() must only be called once", "", ""); |
| if (beginInvocationInterlockCount == 0) |
| error(loc, "beginInvocationInterlockARB() must be called before endInvocationInterlockARB()", "", ""); |
| |
| endInvocationInterlockCount++; |
| break; |
| default: |
| break; |
| } |
| #endif |
| } |
| |
| // Finish processing object.length(). This started earlier in handleDotDereference(), where |
| // the ".length" part was recognized and semantically checked, and finished here where the |
| // function syntax "()" is recognized. |
| // |
| // Return resulting tree node. |
| TIntermTyped* TParseContext::handleLengthMethod(const TSourceLoc& loc, TFunction* function, TIntermNode* intermNode) |
| { |
| int length = 0; |
| |
| if (function->getParamCount() > 0) |
| error(loc, "method does not accept any arguments", function->getName().c_str(), ""); |
| else { |
| const TType& type = intermNode->getAsTyped()->getType(); |
| if (type.isArray()) { |
| if (type.isUnsizedArray()) { |
| #ifndef GLSLANG_WEB |
| if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) { |
| // We could be between a layout declaration that gives a built-in io array implicit size and |
| // a user redeclaration of that array, meaning we have to substitute its implicit size here |
| // without actually redeclaring the array. (It is an error to use a member before the |
| // redeclaration, but not an error to use the array name itself.) |
| const TString& name = intermNode->getAsSymbolNode()->getName(); |
| if (name == "gl_in" || name == "gl_out" || name == "gl_MeshVerticesNV" || |
| name == "gl_MeshPrimitivesNV") { |
| length = getIoArrayImplicitSize(type.getQualifier()); |
| } |
| } |
| #endif |
| if (length == 0) { |
| #ifndef GLSLANG_WEB |
| if (intermNode->getAsSymbolNode() && isIoResizeArray(type)) |
| error(loc, "", function->getName().c_str(), "array must first be sized by a redeclaration or layout qualifier"); |
| else if (isRuntimeLength(*intermNode->getAsTyped())) { |
| // Create a unary op and let the back end handle it |
| return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt)); |
| } else |
| #endif |
| error(loc, "", function->getName().c_str(), "array must be declared with a size before using this method"); |
| } |
| } else if (type.getOuterArrayNode()) { |
| // If the array's outer size is specified by an intermediate node, it means the array's length |
| // was specified by a specialization constant. In such a case, we should return the node of the |
| // specialization constants to represent the length. |
| return type.getOuterArrayNode(); |
| } else |
| length = type.getOuterArraySize(); |
| } else if (type.isMatrix()) |
| length = type.getMatrixCols(); |
| else if (type.isVector()) |
| length = type.getVectorSize(); |
| else if (type.isCoopMat()) |
| return intermediate.addBuiltInFunctionCall(loc, EOpArrayLength, true, intermNode, TType(EbtInt)); |
| else { |
| // we should not get here, because earlier semantic checking should have prevented this path |
| error(loc, ".length()", "unexpected use of .length()", ""); |
| } |
| } |
| |
| if (length == 0) |
| length = 1; |
| |
| return intermediate.addConstantUnion(length, loc); |
| } |
| |
| // |
| // Add any needed implicit conversions for function-call arguments to input parameters. |
| // |
| void TParseContext::addInputArgumentConversions(const TFunction& function, TIntermNode*& arguments) const |
| { |
| #ifndef GLSLANG_WEB |
| TIntermAggregate* aggregate = arguments->getAsAggregate(); |
| |
| // Process each argument's conversion |
| for (int i = 0; i < function.getParamCount(); ++i) { |
| // At this early point there is a slight ambiguity between whether an aggregate 'arguments' |
| // is the single argument itself or its children are the arguments. Only one argument |
| // means take 'arguments' itself as the one argument. |
| TIntermTyped* arg = function.getParamCount() == 1 ? arguments->getAsTyped() : (aggregate ? aggregate->getSequence()[i]->getAsTyped() : arguments->getAsTyped()); |
| if (*function[i].type != arg->getType()) { |
| if (function[i].type->getQualifier().isParamInput() && |
| !function[i].type->isCoopMat()) { |
| // In-qualified arguments just need an extra node added above the argument to |
| // convert to the correct type. |
| arg = intermediate.addConversion(EOpFunctionCall, *function[i].type, arg); |
| if (arg) { |
| if (function.getParamCount() == 1) |
| arguments = arg; |
| else { |
| if (aggregate) |
| aggregate->getSequence()[i] = arg; |
| else |
| arguments = arg; |
| } |
| } |
| } |
| } |
| } |
| #endif |
| } |
| |
| // |
| // Add any needed implicit output conversions for function-call arguments. This |
| // can require a new tree topology, complicated further by whether the function |
| // has a return value. |
| // |
| // Returns a node of a subtree that evaluates to the return value of the function. |
| // |
| TIntermTyped* TParseContext::addOutputArgumentConversions(const TFunction& function, TIntermAggregate& intermNode) const |
| { |
| #ifdef GLSLANG_WEB |
| return &intermNode; |
| #else |
| TIntermSequence& arguments = intermNode.getSequence(); |
| |
| // Will there be any output conversions? |
| bool outputConversions = false; |
| for (int i = 0; i < function.getParamCount(); ++i) { |
| if (*function[i].type != arguments[i]->getAsTyped()->getType() && function[i].type->getQualifier().isParamOutput()) { |
| outputConversions = true; |
| break; |
| } |
| } |
| |
| if (! outputConversions) |
| return &intermNode; |
| |
| // Setup for the new tree, if needed: |
| // |
| // Output conversions need a different tree topology. |
| // Out-qualified arguments need a temporary of the correct type, with the call |
| // followed by an assignment of the temporary to the original argument: |
| // void: function(arg, ...) -> ( function(tempArg, ...), arg = tempArg, ...) |
| // ret = function(arg, ...) -> ret = (tempRet = function(tempArg, ...), arg = tempArg, ..., tempRet) |
| // Where the "tempArg" type needs no conversion as an argument, but will convert on assignment. |
| TIntermTyped* conversionTree = nullptr; |
| TVariable* tempRet = nullptr; |
| if (intermNode.getBasicType() != EbtVoid) { |
| // do the "tempRet = function(...), " bit from above |
| tempRet = makeInternalVariable("tempReturn", intermNode.getType()); |
| TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc()); |
| conversionTree = intermediate.addAssign(EOpAssign, tempRetNode, &intermNode, intermNode.getLoc()); |
| } else |
| conversionTree = &intermNode; |
| |
| conversionTree = intermediate.makeAggregate(conversionTree); |
| |
| // Process each argument's conversion |
| for (int i = 0; i < function.getParamCount(); ++i) { |
| if (*function[i].type != arguments[i]->getAsTyped()->getType()) { |
| if (function[i].type->getQualifier().isParamOutput()) { |
| // Out-qualified arguments need to use the topology set up above. |
| // do the " ...(tempArg, ...), arg = tempArg" bit from above |
| TType paramType; |
| paramType.shallowCopy(*function[i].type); |
| if (arguments[i]->getAsTyped()->getType().isParameterized() && |
| !paramType.isParameterized()) { |
| paramType.shallowCopy(arguments[i]->getAsTyped()->getType()); |
| paramType.copyTypeParameters(*arguments[i]->getAsTyped()->getType().getTypeParameters()); |
| } |
| TVariable* tempArg = makeInternalVariable("tempArg", paramType); |
| tempArg->getWritableType().getQualifier().makeTemporary(); |
| TIntermSymbol* tempArgNode = intermediate.addSymbol(*tempArg, intermNode.getLoc()); |
| TIntermTyped* tempAssign = intermediate.addAssign(EOpAssign, arguments[i]->getAsTyped(), tempArgNode, arguments[i]->getLoc()); |
| conversionTree = intermediate.growAggregate(conversionTree, tempAssign, arguments[i]->getLoc()); |
| // replace the argument with another node for the same tempArg variable |
| arguments[i] = intermediate.addSymbol(*tempArg, intermNode.getLoc()); |
| } |
| } |
| } |
| |
| // Finalize the tree topology (see bigger comment above). |
| if (tempRet) { |
| // do the "..., tempRet" bit from above |
| TIntermSymbol* tempRetNode = intermediate.addSymbol(*tempRet, intermNode.getLoc()); |
| conversionTree = intermediate.growAggregate(conversionTree, tempRetNode, intermNode.getLoc()); |
| } |
| conversionTree = intermediate.setAggregateOperator(conversionTree, EOpComma, intermNode.getType(), intermNode.getLoc()); |
| |
| return conversionTree; |
| #endif |
| } |
| |
| void TParseContext::memorySemanticsCheck(const TSourceLoc& loc, const TFunction& fnCandidate, const TIntermOperator& callNode) |
| { |
| const TIntermSequence* argp = &callNode.getAsAggregate()->getSequence(); |
| |
| //const int gl_SemanticsRelaxed = 0x0; |
| const int gl_SemanticsAcquire = 0x2; |
| const int gl_SemanticsRelease = 0x4; |
| const int gl_SemanticsAcquireRelease = 0x8; |
| const int gl_SemanticsMakeAvailable = 0x2000; |
| const int gl_SemanticsMakeVisible = 0x4000; |
| const int gl_SemanticsVolatile = 0x8000; |
| |
| //const int gl_StorageSemanticsNone = 0x0; |
| const int gl_StorageSemanticsBuffer = 0x40; |
| const int gl_StorageSemanticsShared = 0x100; |
| const int gl_StorageSemanticsImage = 0x800; |
| const int gl_StorageSemanticsOutput = 0x1000; |
| |
| |
| unsigned int semantics = 0, storageClassSemantics = 0; |
| unsigned int semantics2 = 0, storageClassSemantics2 = 0; |
| |
| // Grab the semantics and storage class semantics from the operands, based on opcode |
| switch (callNode.getOp()) { |
| case EOpAtomicAdd: |
| case EOpAtomicMin: |
| case EOpAtomicMax: |
| case EOpAtomicAnd: |
| case EOpAtomicOr: |
| case EOpAtomicXor: |
| case EOpAtomicExchange: |
| case EOpAtomicStore: |
| storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| break; |
| case EOpAtomicLoad: |
| storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| break; |
| case EOpAtomicCompSwap: |
| storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| storageClassSemantics2 = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| break; |
| |
| case EOpImageAtomicAdd: |
| case EOpImageAtomicMin: |
| case EOpImageAtomicMax: |
| case EOpImageAtomicAnd: |
| case EOpImageAtomicOr: |
| case EOpImageAtomicXor: |
| case EOpImageAtomicExchange: |
| case EOpImageAtomicStore: |
| storageClassSemantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| break; |
| case EOpImageAtomicLoad: |
| storageClassSemantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics = (*argp)[4]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| break; |
| case EOpImageAtomicCompSwap: |
| storageClassSemantics = (*argp)[5]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics = (*argp)[6]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| storageClassSemantics2 = (*argp)[7]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics2 = (*argp)[8]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| break; |
| |
| case EOpBarrier: |
| storageClassSemantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics = (*argp)[3]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| break; |
| case EOpMemoryBarrier: |
| storageClassSemantics = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| semantics = (*argp)[2]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| break; |
| default: |
| break; |
| } |
| |
| if ((semantics & gl_SemanticsAcquire) && |
| (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore)) { |
| error(loc, "gl_SemanticsAcquire must not be used with (image) atomic store", |
| fnCandidate.getName().c_str(), ""); |
| } |
| if ((semantics & gl_SemanticsRelease) && |
| (callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) { |
| error(loc, "gl_SemanticsRelease must not be used with (image) atomic load", |
| fnCandidate.getName().c_str(), ""); |
| } |
| if ((semantics & gl_SemanticsAcquireRelease) && |
| (callNode.getOp() == EOpAtomicStore || callNode.getOp() == EOpImageAtomicStore || |
| callNode.getOp() == EOpAtomicLoad || callNode.getOp() == EOpImageAtomicLoad)) { |
| error(loc, "gl_SemanticsAcquireRelease must not be used with (image) atomic load/store", |
| fnCandidate.getName().c_str(), ""); |
| } |
| if (((semantics | semantics2) & ~(gl_SemanticsAcquire | |
| gl_SemanticsRelease | |
| gl_SemanticsAcquireRelease | |
| gl_SemanticsMakeAvailable | |
| gl_SemanticsMakeVisible | |
| gl_SemanticsVolatile))) { |
| error(loc, "Invalid semantics value", fnCandidate.getName().c_str(), ""); |
| } |
| if (((storageClassSemantics | storageClassSemantics2) & ~(gl_StorageSemanticsBuffer | |
| gl_StorageSemanticsShared | |
| gl_StorageSemanticsImage | |
| gl_StorageSemanticsOutput))) { |
| error(loc, "Invalid storage class semantics value", fnCandidate.getName().c_str(), ""); |
| } |
| |
| if (callNode.getOp() == EOpMemoryBarrier) { |
| if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { |
| error(loc, "Semantics must include exactly one of gl_SemanticsRelease, gl_SemanticsAcquire, or " |
| "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), ""); |
| } |
| } else { |
| if (semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) { |
| if (!IsPow2(semantics & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { |
| error(loc, "Semantics must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or " |
| "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), ""); |
| } |
| } |
| if (semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease)) { |
| if (!IsPow2(semantics2 & (gl_SemanticsAcquire | gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { |
| error(loc, "semUnequal must not include multiple of gl_SemanticsRelease, gl_SemanticsAcquire, or " |
| "gl_SemanticsAcquireRelease", fnCandidate.getName().c_str(), ""); |
| } |
| } |
| } |
| if (callNode.getOp() == EOpMemoryBarrier) { |
| if (storageClassSemantics == 0) { |
| error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), ""); |
| } |
| } |
| if (callNode.getOp() == EOpBarrier && semantics != 0 && storageClassSemantics == 0) { |
| error(loc, "Storage class semantics must not be zero", fnCandidate.getName().c_str(), ""); |
| } |
| if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) && |
| (semantics2 & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { |
| error(loc, "semUnequal must not be gl_SemanticsRelease or gl_SemanticsAcquireRelease", |
| fnCandidate.getName().c_str(), ""); |
| } |
| if ((semantics & gl_SemanticsMakeAvailable) && |
| !(semantics & (gl_SemanticsRelease | gl_SemanticsAcquireRelease))) { |
| error(loc, "gl_SemanticsMakeAvailable requires gl_SemanticsRelease or gl_SemanticsAcquireRelease", |
| fnCandidate.getName().c_str(), ""); |
| } |
| if ((semantics & gl_SemanticsMakeVisible) && |
| !(semantics & (gl_SemanticsAcquire | gl_SemanticsAcquireRelease))) { |
| error(loc, "gl_SemanticsMakeVisible requires gl_SemanticsAcquire or gl_SemanticsAcquireRelease", |
| fnCandidate.getName().c_str(), ""); |
| } |
| if ((semantics & gl_SemanticsVolatile) && |
| (callNode.getOp() == EOpMemoryBarrier || callNode.getOp() == EOpBarrier)) { |
| error(loc, "gl_SemanticsVolatile must not be used with memoryBarrier or controlBarrier", |
| fnCandidate.getName().c_str(), ""); |
| } |
| if ((callNode.getOp() == EOpAtomicCompSwap || callNode.getOp() == EOpImageAtomicCompSwap) && |
| ((semantics ^ semantics2) & gl_SemanticsVolatile)) { |
| error(loc, "semEqual and semUnequal must either both include gl_SemanticsVolatile or neither", |
| fnCandidate.getName().c_str(), ""); |
| } |
| } |
| |
| // |
| // Do additional checking of built-in function calls that is not caught |
| // by normal semantic checks on argument type, extension tagging, etc. |
| // |
| // Assumes there has been a semantically correct match to a built-in function prototype. |
| // |
| void TParseContext::builtInOpCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermOperator& callNode) |
| { |
| // Set up convenience accessors to the argument(s). There is almost always |
| // multiple arguments for the cases below, but when there might be one, |
| // check the unaryArg first. |
| const TIntermSequence* argp = nullptr; // confusing to use [] syntax on a pointer, so this is to help get a reference |
| const TIntermTyped* unaryArg = nullptr; |
| const TIntermTyped* arg0 = nullptr; |
| if (callNode.getAsAggregate()) { |
| argp = &callNode.getAsAggregate()->getSequence(); |
| if (argp->size() > 0) |
| arg0 = (*argp)[0]->getAsTyped(); |
| } else { |
| assert(callNode.getAsUnaryNode()); |
| unaryArg = callNode.getAsUnaryNode()->getOperand(); |
| arg0 = unaryArg; |
| } |
| |
| TString featureString; |
| const char* feature = nullptr; |
| switch (callNode.getOp()) { |
| #ifndef GLSLANG_WEB |
| case EOpTextureGather: |
| case EOpTextureGatherOffset: |
| case EOpTextureGatherOffsets: |
| { |
| // Figure out which variants are allowed by what extensions, |
| // and what arguments must be constant for which situations. |
| |
| featureString = fnCandidate.getName(); |
| featureString += "(...)"; |
| feature = featureString.c_str(); |
| profileRequires(loc, EEsProfile, 310, nullptr, feature); |
| int compArg = -1; // track which argument, if any, is the constant component argument |
| switch (callNode.getOp()) { |
| case EOpTextureGather: |
| // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5, |
| // otherwise, need GL_ARB_texture_gather. |
| if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) { |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); |
| if (! fnCandidate[0].type->getSampler().shadow) |
| compArg = 2; |
| } else |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); |
| break; |
| case EOpTextureGatherOffset: |
| // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument |
| if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3) |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); |
| else |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); |
| if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion()) |
| profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, |
| "non-constant offset argument"); |
| if (! fnCandidate[0].type->getSampler().shadow) |
| compArg = 3; |
| break; |
| case EOpTextureGatherOffsets: |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); |
| if (! fnCandidate[0].type->getSampler().shadow) |
| compArg = 3; |
| // check for constant offsets |
| if (! (*argp)[fnCandidate[0].type->getSampler().shadow ? 3 : 2]->getAsConstantUnion()) |
| error(loc, "must be a compile-time constant:", feature, "offsets argument"); |
| break; |
| default: |
| break; |
| } |
| |
| if (compArg > 0 && compArg < fnCandidate.getParamCount()) { |
| if ((*argp)[compArg]->getAsConstantUnion()) { |
| int value = (*argp)[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| if (value < 0 || value > 3) |
| error(loc, "must be 0, 1, 2, or 3:", feature, "component argument"); |
| } else |
| error(loc, "must be a compile-time constant:", feature, "component argument"); |
| } |
| |
| bool bias = false; |
| if (callNode.getOp() == EOpTextureGather) |
| bias = fnCandidate.getParamCount() > 3; |
| else if (callNode.getOp() == EOpTextureGatherOffset || |
| callNode.getOp() == EOpTextureGatherOffsets) |
| bias = fnCandidate.getParamCount() > 4; |
| |
| if (bias) { |
| featureString = fnCandidate.getName(); |
| featureString += "with bias argument"; |
| feature = featureString.c_str(); |
| profileRequires(loc, ~EEsProfile, 450, nullptr, feature); |
| requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature); |
| } |
| break; |
| } |
| case EOpSparseTextureGather: |
| case EOpSparseTextureGatherOffset: |
| case EOpSparseTextureGatherOffsets: |
| { |
| bool bias = false; |
| if (callNode.getOp() == EOpSparseTextureGather) |
| bias = fnCandidate.getParamCount() > 4; |
| else if (callNode.getOp() == EOpSparseTextureGatherOffset || |
| callNode.getOp() == EOpSparseTextureGatherOffsets) |
| bias = fnCandidate.getParamCount() > 5; |
| |
| if (bias) { |
| featureString = fnCandidate.getName(); |
| featureString += "with bias argument"; |
| feature = featureString.c_str(); |
| profileRequires(loc, ~EEsProfile, 450, nullptr, feature); |
| requireExtensions(loc, 1, &E_GL_AMD_texture_gather_bias_lod, feature); |
| } |
| |
| break; |
| } |
| |
| case EOpSparseTextureGatherLod: |
| case EOpSparseTextureGatherLodOffset: |
| case EOpSparseTextureGatherLodOffsets: |
| { |
| requireExtensions(loc, 1, &E_GL_ARB_sparse_texture2, fnCandidate.getName().c_str()); |
| break; |
| } |
| |
| case EOpSwizzleInvocations: |
| { |
| if (! (*argp)[1]->getAsConstantUnion()) |
| error(loc, "argument must be compile-time constant", "offset", ""); |
| else { |
| unsigned offset[4] = {}; |
| offset[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst(); |
| offset[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst(); |
| offset[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst(); |
| offset[3] = (*argp)[1]->getAsConstantUnion()->getConstArray()[3].getUConst(); |
| if (offset[0] > 3 || offset[1] > 3 || offset[2] > 3 || offset[3] > 3) |
| error(loc, "components must be in the range [0, 3]", "offset", ""); |
| } |
| |
| break; |
| } |
| |
| case EOpSwizzleInvocationsMasked: |
| { |
| if (! (*argp)[1]->getAsConstantUnion()) |
| error(loc, "argument must be compile-time constant", "mask", ""); |
| else { |
| unsigned mask[3] = {}; |
| mask[0] = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst(); |
| mask[1] = (*argp)[1]->getAsConstantUnion()->getConstArray()[1].getUConst(); |
| mask[2] = (*argp)[1]->getAsConstantUnion()->getConstArray()[2].getUConst(); |
| if (mask[0] > 31 || mask[1] > 31 || mask[2] > 31) |
| error(loc, "components must be in the range [0, 31]", "mask", ""); |
| } |
| |
| break; |
| } |
| #endif |
| |
| case EOpTextureOffset: |
| case EOpTextureFetchOffset: |
| case EOpTextureProjOffset: |
| case EOpTextureLodOffset: |
| case EOpTextureProjLodOffset: |
| case EOpTextureGradOffset: |
| case EOpTextureProjGradOffset: |
| { |
| // Handle texture-offset limits checking |
| // Pick which argument has to hold constant offsets |
| int arg = -1; |
| switch (callNode.getOp()) { |
| case EOpTextureOffset: arg = 2; break; |
| case EOpTextureFetchOffset: arg = (arg0->getType().getSampler().isRect()) ? 2 : 3; break; |
| case EOpTextureProjOffset: arg = 2; break; |
| case EOpTextureLodOffset: arg = 3; break; |
| case EOpTextureProjLodOffset: arg = 3; break; |
| case EOpTextureGradOffset: arg = 4; break; |
| case EOpTextureProjGradOffset: arg = 4; break; |
| default: |
| assert(0); |
| break; |
| } |
| |
| if (arg > 0) { |
| |
| #ifndef GLSLANG_WEB |
| bool f16ShadowCompare = (*argp)[1]->getAsTyped()->getBasicType() == EbtFloat16 && arg0->getType().getSampler().shadow; |
| if (f16ShadowCompare) |
| ++arg; |
| #endif |
| if (! (*argp)[arg]->getAsConstantUnion()) |
| error(loc, "argument must be compile-time constant", "texel offset", ""); |
| else { |
| const TType& type = (*argp)[arg]->getAsTyped()->getType(); |
| for (int c = 0; c < type.getVectorSize(); ++c) { |
| int offset = (*argp)[arg]->getAsConstantUnion()->getConstArray()[c].getIConst(); |
| if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset) |
| error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]"); |
| } |
| } |
| } |
| |
| break; |
| } |
| |
| #ifndef GLSLANG_WEB |
| case EOpTraceNV: |
| if (!(*argp)[10]->getAsConstantUnion()) |
| error(loc, "argument must be compile-time constant", "payload number", ""); |
| break; |
| case EOpExecuteCallableNV: |
| if (!(*argp)[1]->getAsConstantUnion()) |
| error(loc, "argument must be compile-time constant", "callable data number", ""); |
| break; |
| |
| case EOpTextureQuerySamples: |
| case EOpImageQuerySamples: |
| // GL_ARB_shader_texture_image_samples |
| profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples"); |
| break; |
| |
| case EOpImageAtomicAdd: |
| case EOpImageAtomicMin: |
| case EOpImageAtomicMax: |
| case EOpImageAtomicAnd: |
| case EOpImageAtomicOr: |
| case EOpImageAtomicXor: |
| case EOpImageAtomicExchange: |
| case EOpImageAtomicCompSwap: |
| case EOpImageAtomicLoad: |
| case EOpImageAtomicStore: |
| { |
| // Make sure the image types have the correct layout() format and correct argument types |
| const TType& imageType = arg0->getType(); |
| if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) { |
| if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui) |
| error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), ""); |
| } else { |
| if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0) |
| error(loc, "only supported on integer images", fnCandidate.getName().c_str(), ""); |
| else if (imageType.getQualifier().getFormat() != ElfR32f && isEsProfile()) |
| error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), ""); |
| } |
| |
| const size_t maxArgs = imageType.getSampler().isMultiSample() ? 5 : 4; |
| if (argp->size() > maxArgs) { |
| requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str()); |
| memorySemanticsCheck(loc, fnCandidate, callNode); |
| } |
| |
| break; |
| } |
| |
| case EOpAtomicAdd: |
| case EOpAtomicMin: |
| case EOpAtomicMax: |
| case EOpAtomicAnd: |
| case EOpAtomicOr: |
| case EOpAtomicXor: |
| case EOpAtomicExchange: |
| case EOpAtomicCompSwap: |
| case EOpAtomicLoad: |
| case EOpAtomicStore: |
| { |
| if (argp->size() > 3) { |
| requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str()); |
| memorySemanticsCheck(loc, fnCandidate, callNode); |
| } else if (arg0->getType().getBasicType() == EbtInt64 || arg0->getType().getBasicType() == EbtUint64) { |
| const char* const extensions[2] = { E_GL_NV_shader_atomic_int64, |
| E_GL_EXT_shader_atomic_int64 }; |
| requireExtensions(loc, 2, extensions, fnCandidate.getName().c_str()); |
| } |
| break; |
| } |
| |
| case EOpInterpolateAtCentroid: |
| case EOpInterpolateAtSample: |
| case EOpInterpolateAtOffset: |
| case EOpInterpolateAtVertex: |
| // Make sure the first argument is an interpolant, or an array element of an interpolant |
| if (arg0->getType().getQualifier().storage != EvqVaryingIn) { |
| // It might still be an array element. |
| // |
| // We could check more, but the semantics of the first argument are already met; the |
| // only way to turn an array into a float/vec* is array dereference and swizzle. |
| // |
| // ES and desktop 4.3 and earlier: swizzles may not be used |
| // desktop 4.4 and later: swizzles may be used |
| bool swizzleOkay = (!isEsProfile()) && (version >= 440); |
| const TIntermTyped* base = TIntermediate::findLValueBase(arg0, swizzleOkay); |
| if (base == nullptr || base->getType().getQualifier().storage != EvqVaryingIn) |
| error(loc, "first argument must be an interpolant, or interpolant-array element", fnCandidate.getName().c_str(), ""); |
| } |
| |
| if (callNode.getOp() == EOpInterpolateAtVertex) { |
| if (!arg0->getType().getQualifier().isExplicitInterpolation()) |
| error(loc, "argument must be qualified as __explicitInterpAMD in", "interpolant", ""); |
| else { |
| if (! (*argp)[1]->getAsConstantUnion()) |
| error(loc, "argument must be compile-time constant", "vertex index", ""); |
| else { |
| unsigned vertexIdx = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getUConst(); |
| if (vertexIdx > 2) |
| error(loc, "must be in the range [0, 2]", "vertex index", ""); |
| } |
| } |
| } |
| break; |
| |
| case EOpEmitStreamVertex: |
| case EOpEndStreamPrimitive: |
| intermediate.setMultiStream(); |
| break; |
| |
| case EOpSubgroupClusteredAdd: |
| case EOpSubgroupClusteredMul: |
| case EOpSubgroupClusteredMin: |
| case EOpSubgroupClusteredMax: |
| case EOpSubgroupClusteredAnd: |
| case EOpSubgroupClusteredOr: |
| case EOpSubgroupClusteredXor: |
| // The <clusterSize> as used in the subgroupClustered<op>() operations must be: |
| // - An integral constant expression. |
| // - At least 1. |
| // - A power of 2. |
| if ((*argp)[1]->getAsConstantUnion() == nullptr) |
| error(loc, "argument must be compile-time constant", "cluster size", ""); |
| else { |
| int size = (*argp)[1]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| if (size < 1) |
| error(loc, "argument must be at least 1", "cluster size", ""); |
| else if (!IsPow2(size)) |
| error(loc, "argument must be a power of 2", "cluster size", ""); |
| } |
| break; |
| |
| case EOpSubgroupBroadcast: |
| if (spvVersion.spv < EShTargetSpv_1_5) { |
| // <id> must be an integral constant expression. |
| if ((*argp)[1]->getAsConstantUnion() == nullptr) |
| error(loc, "argument must be compile-time constant", "id", ""); |
| } |
| break; |
| |
| case EOpBarrier: |
| case EOpMemoryBarrier: |
| if (argp->size() > 0) { |
| requireExtensions(loc, 1, &E_GL_KHR_memory_scope_semantics, fnCandidate.getName().c_str()); |
| memorySemanticsCheck(loc, fnCandidate, callNode); |
| } |
| break; |
| #endif |
| |
| default: |
| break; |
| } |
| |
| // Texture operations on texture objects (aside from texelFetch on a |
| // textureBuffer) require EXT_samplerless_texture_functions. |
| switch (callNode.getOp()) { |
| case EOpTextureQuerySize: |
| case EOpTextureQueryLevels: |
| case EOpTextureQuerySamples: |
| case EOpTextureFetch: |
| case EOpTextureFetchOffset: |
| { |
| const TSampler& sampler = fnCandidate[0].type->getSampler(); |
| |
| const bool isTexture = sampler.isTexture() && !sampler.isCombined(); |
| const bool isBuffer = sampler.isBuffer(); |
| const bool isFetch = callNode.getOp() == EOpTextureFetch || callNode.getOp() == EOpTextureFetchOffset; |
| |
| if (isTexture && (!isBuffer || !isFetch)) |
| requireExtensions(loc, 1, &E_GL_EXT_samplerless_texture_functions, fnCandidate.getName().c_str()); |
| |
| break; |
| } |
| |
| default: |
| break; |
| } |
| |
| if (callNode.getOp() > EOpSubgroupGuardStart && callNode.getOp() < EOpSubgroupGuardStop) { |
| // these require SPIR-V 1.3 |
| if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_3) |
| error(loc, "requires SPIR-V 1.3", "subgroup op", ""); |
| |
| // Check that if extended types are being used that the correct extensions are enabled. |
| if (arg0 != nullptr) { |
| const TType& type = arg0->getType(); |
| switch (type.getBasicType()) { |
| default: |
| break; |
| case EbtInt8: |
| case EbtUint8: |
| requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int8, type.getCompleteString().c_str()); |
| break; |
| case EbtInt16: |
| case EbtUint16: |
| requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int16, type.getCompleteString().c_str()); |
| break; |
| case EbtInt64: |
| case EbtUint64: |
| requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_int64, type.getCompleteString().c_str()); |
| break; |
| case EbtFloat16: |
| requireExtensions(loc, 1, &E_GL_EXT_shader_subgroup_extended_types_float16, type.getCompleteString().c_str()); |
| break; |
| } |
| } |
| } |
| } |
| |
| #ifndef GLSLANG_WEB |
| |
| extern bool PureOperatorBuiltins; |
| |
| // Deprecated! Use PureOperatorBuiltins == true instead, in which case this |
| // functionality is handled in builtInOpCheck() instead of here. |
| // |
| // Do additional checking of built-in function calls that were not mapped |
| // to built-in operations (e.g., texturing functions). |
| // |
| // Assumes there has been a semantically correct match to a built-in function. |
| // |
| void TParseContext::nonOpBuiltInCheck(const TSourceLoc& loc, const TFunction& fnCandidate, TIntermAggregate& callNode) |
| { |
| // Further maintenance of this function is deprecated, because the "correct" |
| // future-oriented design is to not have to do string compares on function names. |
| |
| // If PureOperatorBuiltins == true, then all built-ins should be mapped |
| // to a TOperator, and this function would then never get called. |
| |
| assert(PureOperatorBuiltins == false); |
| |
| // built-in texturing functions get their return value precision from the precision of the sampler |
| if (fnCandidate.getType().getQualifier().precision == EpqNone && |
| fnCandidate.getParamCount() > 0 && fnCandidate[0].type->getBasicType() == EbtSampler) |
| callNode.getQualifier().precision = callNode.getSequence()[0]->getAsTyped()->getQualifier().precision; |
| |
| if (fnCandidate.getName().compare(0, 7, "texture") == 0) { |
| if (fnCandidate.getName().compare(0, 13, "textureGather") == 0) { |
| TString featureString = fnCandidate.getName() + "(...)"; |
| const char* feature = featureString.c_str(); |
| profileRequires(loc, EEsProfile, 310, nullptr, feature); |
| |
| int compArg = -1; // track which argument, if any, is the constant component argument |
| if (fnCandidate.getName().compare("textureGatherOffset") == 0) { |
| // GL_ARB_texture_gather is good enough for 2D non-shadow textures with no component argument |
| if (fnCandidate[0].type->getSampler().dim == Esd2D && ! fnCandidate[0].type->getSampler().shadow && fnCandidate.getParamCount() == 3) |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); |
| else |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); |
| int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2; |
| if (! callNode.getSequence()[offsetArg]->getAsConstantUnion()) |
| profileRequires(loc, EEsProfile, 320, Num_AEP_gpu_shader5, AEP_gpu_shader5, |
| "non-constant offset argument"); |
| if (! fnCandidate[0].type->getSampler().shadow) |
| compArg = 3; |
| } else if (fnCandidate.getName().compare("textureGatherOffsets") == 0) { |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); |
| if (! fnCandidate[0].type->getSampler().shadow) |
| compArg = 3; |
| // check for constant offsets |
| int offsetArg = fnCandidate[0].type->getSampler().shadow ? 3 : 2; |
| if (! callNode.getSequence()[offsetArg]->getAsConstantUnion()) |
| error(loc, "must be a compile-time constant:", feature, "offsets argument"); |
| } else if (fnCandidate.getName().compare("textureGather") == 0) { |
| // More than two arguments needs gpu_shader5, and rectangular or shadow needs gpu_shader5, |
| // otherwise, need GL_ARB_texture_gather. |
| if (fnCandidate.getParamCount() > 2 || fnCandidate[0].type->getSampler().dim == EsdRect || fnCandidate[0].type->getSampler().shadow) { |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_gpu_shader5, feature); |
| if (! fnCandidate[0].type->getSampler().shadow) |
| compArg = 2; |
| } else |
| profileRequires(loc, ~EEsProfile, 400, E_GL_ARB_texture_gather, feature); |
| } |
| |
| if (compArg > 0 && compArg < fnCandidate.getParamCount()) { |
| if (callNode.getSequence()[compArg]->getAsConstantUnion()) { |
| int value = callNode.getSequence()[compArg]->getAsConstantUnion()->getConstArray()[0].getIConst(); |
| if (value < 0 || value > 3) |
| error(loc, "must be 0, 1, 2, or 3:", feature, "component argument"); |
| } else |
| error(loc, "must be a compile-time constant:", feature, "component argument"); |
| } |
| } else { |
| // this is only for functions not starting "textureGather"... |
| if (fnCandidate.getName().find("Offset") != TString::npos) { |
| |
| // Handle texture-offset limits checking |
| int arg = -1; |
| if (fnCandidate.getName().compare("textureOffset") == 0) |
| arg = 2; |
| else if (fnCandidate.getName().compare("texelFetchOffset") == 0) |
| arg = 3; |
| else if (fnCandidate.getName().compare("textureProjOffset") == 0) |
| arg = 2; |
| else if (fnCandidate.getName().compare("textureLodOffset") == 0) |
| arg = 3; |
| else if (fnCandidate.getName().compare("textureProjLodOffset") == 0) |
| arg = 3; |
| else if (fnCandidate.getName().compare("textureGradOffset") == 0) |
| arg = 4; |
| else if (fnCandidate.getName().compare("textureProjGradOffset") == 0) |
| arg = 4; |
| |
| if (arg > 0) { |
| if (! callNode.getSequence()[arg]->getAsConstantUnion()) |
| error(loc, "argument must be compile-time constant", "texel offset", ""); |
| else { |
| const TType& type = callNode.getSequence()[arg]->getAsTyped()->getType(); |
| for (int c = 0; c < type.getVectorSize(); ++c) { |
| int offset = callNode.getSequence()[arg]->getAsConstantUnion()->getConstArray()[c].getIConst(); |
| if (offset > resources.maxProgramTexelOffset || offset < resources.minProgramTexelOffset) |
| error(loc, "value is out of range:", "texel offset", "[gl_MinProgramTexelOffset, gl_MaxProgramTexelOffset]"); |
| } |
| } |
| } |
| } |
| } |
| } |
| |
| // GL_ARB_shader_texture_image_samples |
| if (fnCandidate.getName().compare(0, 14, "textureSamples") == 0 || fnCandidate.getName().compare(0, 12, "imageSamples") == 0) |
| profileRequires(loc, ~EEsProfile, 450, E_GL_ARB_shader_texture_image_samples, "textureSamples and imageSamples"); |
| |
| if (fnCandidate.getName().compare(0, 11, "imageAtomic") == 0) { |
| const TType& imageType = callNode.getSequence()[0]->getAsTyped()->getType(); |
| if (imageType.getSampler().type == EbtInt || imageType.getSampler().type == EbtUint) { |
| if (imageType.getQualifier().getFormat() != ElfR32i && imageType.getQualifier().getFormat() != ElfR32ui) |
| error(loc, "only supported on image with format r32i or r32ui", fnCandidate.getName().c_str(), ""); |
| } else { |
| if (fnCandidate.getName().compare(0, 19, "imageAtomicExchange") != 0) |
| error(loc, "only supported on integer images", fnCandidate.getName().c_str(), ""); |
| else if (imageType.getQualifier().getFormat() != ElfR32f && isEsProfile()) |
| error(loc, "only supported on image with format r32f", fnCandidate.getName().c_str(), ""); |
| } |
| } |
| } |
| |
| #endif |
| |
| // |
| // Do any extra checking for a user function call. |
| // |
| void TParseContext::userFunctionCallCheck(const TSourceLoc& loc, TIntermAggregate& callNode) |
| { |
| TIntermSequence& arguments = callNode.getSequence(); |
| |
| for (int i = 0; i < (int)arguments.size(); ++i) |
| samplerConstructorLocationCheck(loc, "call argument", arguments[i]); |
| } |
| |
| // |
| // Emit an error if this is a sampler constructor |
| // |
| void TParseContext::samplerConstructorLocationCheck(const TSourceLoc& loc, const char* token, TIntermNode* node) |
| { |
| if (node->getAsOperator() && node->getAsOperator()->getOp() == EOpConstructTextureSampler) |
| error(loc, "sampler constructor must appear at point of use", token, ""); |
| } |
| |
| // |
| // Handle seeing a built-in constructor in a grammar production. |
| // |
| TFunction* TParseContext::handleConstructorCall(const TSourceLoc& loc, const TPublicType& publicType) |
| { |
| TType type(publicType); |
| type.getQualifier().precision = EpqNone; |
| |
| if (type.isArray()) { |
| profileRequires(loc, ENoProfile, 120, E_GL_3DL_array_objects, "arrayed constructor"); |
| profileRequires(loc, EEsProfile, 300, nullptr, "arrayed constructor"); |
| } |
| |
| TOperator op = intermediate.mapTypeToConstructorOp(type); |
| |
| if (op == EOpNull) { |
| error(loc, "cannot construct this type", type.getBasicString(), ""); |
| op = EOpConstructFloat; |
| TType errorType(EbtFloat); |
| type.shallowCopy(errorType); |
| } |
| |
| TString empty(""); |
| |
| return new TFunction(&empty, type, op); |
| } |
| |
| // Handle seeing a precision qualifier in the grammar. |
| void TParseContext::handlePrecisionQualifier(const TSourceLoc& /*loc*/, TQualifier& qualifier, TPrecisionQualifier precision) |
| { |
| if (obeyPrecisionQualifiers()) |
| qualifier.precision = precision; |
| } |
| |
| // Check for messages to give on seeing a precision qualifier used in a |
| // declaration in the grammar. |
| void TParseContext::checkPrecisionQualifier(const TSourceLoc& loc, TPrecisionQualifier) |
| { |
| if (precisionManager.shouldWarnAboutDefaults()) { |
| warn(loc, "all default precisions are highp; use precision statements to quiet warning, e.g.:\n" |
| " \"precision mediump int; precision highp float;\"", "", ""); |
| precisionManager.defaultWarningGiven(); |
| } |
| } |
| |
| // |
| // Same error message for all places assignments don't work. |
| // |
| void TParseContext::assignError(const TSourceLoc& loc, const char* op, TString left, TString right) |
| { |
| error(loc, "", op, "cannot convert from '%s' to '%s'", |
| right.c_str(), left.c_str()); |
| } |
| |
| // |
| // Same error message for all places unary operations don't work. |
| // |
| void TParseContext::unaryOpError(const TSourceLoc& loc, const char* op, TString operand) |
| { |
| error(loc, " wrong operand type", op, |
| "no operation '%s' exists that takes an operand of type %s (or there is no acceptable conversion)", |
| op, operand.c_str()); |
| } |
| |
| // |
| // Same error message for all binary operations don't work. |
| // |
| void TParseContext::binaryOpError(const TSourceLoc& loc, const char* op, TString left, TString right) |
| { |
| error(loc, " wrong operand types:", op, |
| "no operation '%s' exists that takes a left-hand operand of type '%s' and " |
| "a right operand of type '%s' (or there is no acceptable conversion)", |
| op, left.c_str(), right.c_str()); |
| } |
| |
| // |
| // A basic type of EbtVoid is a key that the name string was seen in the source, but |
| // it was not found as a variable in the symbol table. If so, give the error |
| // message and insert a dummy variable in the symbol table to prevent future errors. |
| // |
| void TParseContext::variableCheck(TIntermTyped*& nodePtr) |
| { |
| TIntermSymbol* symbol = nodePtr->getAsSymbolNode(); |
| if (! symbol) |
| return; |
| |
| if (symbol->getType().getBasicType() == EbtVoid) { |
| const char *extraInfoFormat = ""; |
| if (spvVersion.vulkan != 0 && symbol->getName() == "gl_VertexID") { |
| extraInfoFormat = "(Did you mean gl_VertexIndex?)"; |
| } else if (spvVersion.vulkan != 0 && symbol->getName() == "gl_InstanceID") { |
| extraInfoFormat = "(Did you mean gl_InstanceIndex?)"; |
| } |
| error(symbol->getLoc(), "undeclared identifier", symbol->getName().c_str(), extraInfoFormat); |
| |
| // Add to symbol table to prevent future error messages on the same name |
| if (symbol->getName().size() > 0) { |
| TVariable* fakeVariable = new TVariable(&symbol->getName(), TType(EbtFloat)); |
| symbolTable.insert(*fakeVariable); |
| |
| // substitute a symbol node for this new variable |
| nodePtr = intermediate.addSymbol(*fakeVariable, symbol->getLoc()); |
| } |
| } else { |
| switch (symbol->getQualifier().storage) { |
| case EvqPointCoord: |
| profileRequires(symbol->getLoc(), ENoProfile, 120, nullptr, "gl_PointCoord"); |
| break; |
| default: break; // some compilers want this |
| } |
| } |
| } |
| |
| // |
| // Both test and if necessary, spit out an error, to see if the node is really |
| // an l-value that can be operated on this way. |
| // |
| // Returns true if there was an error. |
| // |
| bool TParseContext::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node) |
| { |
| TIntermBinary* binaryNode = node->getAsBinaryNode(); |
| |
| if (binaryNode) { |
| bool errorReturn = false; |
| |
| switch(binaryNode->getOp()) { |
| #ifndef GLSLANG_WEB |
| case EOpIndexDirect: |
| case EOpIndexIndirect: |
| // ... tessellation control shader ... |
| // If a per-vertex output variable is used as an l-value, it is a |
| // compile-time or link-time error if the expression indicating the |
| // vertex index is not the identifier gl_InvocationID. |
| if (language == EShLangTessControl) { |
| const TType& leftType = binaryNode->getLeft()->getType(); |
| if (leftType.getQualifier().storage == EvqVaryingOut && ! leftType.getQualifier().patch && binaryNode->getLeft()->getAsSymbolNode()) { |
| // we have a per-vertex output |
| const TIntermSymbol* rightSymbol = binaryNode->getRight()->getAsSymbolNode(); |
| if (! rightSymbol || rightSymbol->getQualifier().builtIn != EbvInvocationId) |
| error(loc, "tessellation-control per-vertex output l-value must be indexed with gl_InvocationID", "[]", ""); |
| } |
| } |
| break; // left node is checked by base class |
| #endif |
| case EOpVectorSwizzle: |
| errorReturn = lValueErrorCheck(loc, op, binaryNode->getLeft()); |
| if (!errorReturn) |