| // |
| // Copyright (C) 2002-2005 3Dlabs Inc. Ltd. |
| // Copyright (C) 2012-2015 LunarG, Inc. |
| // Copyright (C) 2015-2020 Google, Inc. |
| // Copyright (C) 2017 ARM Limited. |
| // |
| // All rights reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions |
| // are met: |
| // |
| // Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // |
| // Redistributions in binary form must reproduce the above |
| // copyright notice, this list of conditions and the following |
| // disclaimer in the documentation and/or other materials provided |
| // with the distribution. |
| // |
| // Neither the name of 3Dlabs Inc. Ltd. nor the names of its |
| // contributors may be used to endorse or promote products derived |
| // from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| // COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| // BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN |
| // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| // POSSIBILITY OF SUCH DAMAGE. |
| // |
| |
| // |
| // Build the intermediate representation. |
| // |
| |
| #include "localintermediate.h" |
| #include "RemoveTree.h" |
| #include "SymbolTable.h" |
| #include "propagateNoContraction.h" |
| |
| #include <cfloat> |
| #include <utility> |
| #include <tuple> |
| |
| namespace glslang { |
| |
| //////////////////////////////////////////////////////////////////////////// |
| // |
| // First set of functions are to help build the intermediate representation. |
| // These functions are not member functions of the nodes. |
| // They are called from parser productions. |
| // |
| ///////////////////////////////////////////////////////////////////////////// |
| |
| // |
| // Add a terminal node for an identifier in an expression. |
| // |
| // Returns the added node. |
| // |
| |
| TIntermSymbol* TIntermediate::addSymbol(int id, const TString& name, const TType& type, const TConstUnionArray& constArray, |
| TIntermTyped* constSubtree, const TSourceLoc& loc) |
| { |
| TIntermSymbol* node = new TIntermSymbol(id, name, type); |
| node->setLoc(loc); |
| node->setConstArray(constArray); |
| node->setConstSubtree(constSubtree); |
| |
| return node; |
| } |
| |
| TIntermSymbol* TIntermediate::addSymbol(const TIntermSymbol& intermSymbol) |
| { |
| return addSymbol(intermSymbol.getId(), |
| intermSymbol.getName(), |
| intermSymbol.getType(), |
| intermSymbol.getConstArray(), |
| intermSymbol.getConstSubtree(), |
| intermSymbol.getLoc()); |
| } |
| |
| TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable) |
| { |
| glslang::TSourceLoc loc; // just a null location |
| loc.init(); |
| |
| return addSymbol(variable, loc); |
| } |
| |
| TIntermSymbol* TIntermediate::addSymbol(const TVariable& variable, const TSourceLoc& loc) |
| { |
| return addSymbol(variable.getUniqueId(), variable.getName(), variable.getType(), variable.getConstArray(), variable.getConstSubtree(), loc); |
| } |
| |
| TIntermSymbol* TIntermediate::addSymbol(const TType& type, const TSourceLoc& loc) |
| { |
| TConstUnionArray unionArray; // just a null constant |
| |
| return addSymbol(0, "", type, unionArray, nullptr, loc); |
| } |
| |
| // |
| // Connect two nodes with a new parent that does a binary operation on the nodes. |
| // |
| // Returns the added node. |
| // |
| // Returns nullptr if the working conversions and promotions could not be found. |
| // |
| TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) |
| { |
| // No operations work on blocks |
| if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock) |
| return nullptr; |
| |
| // Convert "reference +/- int" and "reference - reference" to integer math |
| if ((op == EOpAdd || op == EOpSub) && extensionRequested(E_GL_EXT_buffer_reference2)) { |
| |
| // No addressing math on struct with unsized array. |
| if ((left->isReference() && left->getType().getReferentType()->containsUnsizedArray()) || |
| (right->isReference() && right->getType().getReferentType()->containsUnsizedArray())) { |
| return nullptr; |
| } |
| |
| if (left->isReference() && isTypeInt(right->getBasicType())) { |
| const TType& referenceType = left->getType(); |
| TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(left->getType()), loc, true); |
| left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64)); |
| |
| right = createConversion(EbtInt64, right); |
| right = addBinaryMath(EOpMul, right, size, loc); |
| |
| TIntermTyped *node = addBinaryMath(op, left, right, loc); |
| node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType); |
| return node; |
| } |
| |
| if (op == EOpAdd && right->isReference() && isTypeInt(left->getBasicType())) { |
| const TType& referenceType = right->getType(); |
| TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true); |
| right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64)); |
| |
| left = createConversion(EbtInt64, left); |
| left = addBinaryMath(EOpMul, left, size, loc); |
| |
| TIntermTyped *node = addBinaryMath(op, left, right, loc); |
| node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType); |
| return node; |
| } |
| |
| if (op == EOpSub && left->isReference() && right->isReference()) { |
| TIntermConstantUnion* size = addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true); |
| |
| left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64)); |
| right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64)); |
| |
| left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64)); |
| right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64)); |
| |
| left = addBinaryMath(EOpSub, left, right, loc); |
| |
| TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc); |
| return node; |
| } |
| |
| // No other math operators supported on references |
| if (left->isReference() || right->isReference()) { |
| return nullptr; |
| } |
| } |
| |
| // Try converting the children's base types to compatible types. |
| auto children = addConversion(op, left, right); |
| left = std::get<0>(children); |
| right = std::get<1>(children); |
| |
| if (left == nullptr || right == nullptr) |
| return nullptr; |
| |
| // Convert the children's type shape to be compatible. |
| addBiShapeConversion(op, left, right); |
| if (left == nullptr || right == nullptr) |
| return nullptr; |
| |
| // |
| // Need a new node holding things together. Make |
| // one and promote it to the right type. |
| // |
| TIntermBinary* node = addBinaryNode(op, left, right, loc); |
| if (! promote(node)) |
| return nullptr; |
| |
| node->updatePrecision(); |
| |
| // |
| // If they are both (non-specialization) constants, they must be folded. |
| // (Unless it's the sequence (comma) operator, but that's handled in addComma().) |
| // |
| TIntermConstantUnion *leftTempConstant = node->getLeft()->getAsConstantUnion(); |
| TIntermConstantUnion *rightTempConstant = node->getRight()->getAsConstantUnion(); |
| if (leftTempConstant && rightTempConstant) { |
| TIntermTyped* folded = leftTempConstant->fold(node->getOp(), rightTempConstant); |
| if (folded) |
| return folded; |
| } |
| |
| // If can propagate spec-constantness and if the operation is an allowed |
| // specialization-constant operation, make a spec-constant. |
| if (specConstantPropagates(*node->getLeft(), *node->getRight()) && isSpecializationOperation(*node)) |
| node->getWritableType().getQualifier().makeSpecConstant(); |
| |
| // If must propagate nonuniform, make a nonuniform. |
| if ((node->getLeft()->getQualifier().isNonUniform() || node->getRight()->getQualifier().isNonUniform()) && |
| isNonuniformPropagating(node->getOp())) |
| node->getWritableType().getQualifier().nonUniform = true; |
| |
| return node; |
| } |
| |
| // |
| // Low level: add binary node (no promotions or other argument modifications) |
| // |
| TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) const |
| { |
| // build the node |
| TIntermBinary* node = new TIntermBinary(op); |
| if (loc.line == 0) |
| loc = left->getLoc(); |
| node->setLoc(loc); |
| node->setLeft(left); |
| node->setRight(right); |
| |
| return node; |
| } |
| |
| // |
| // like non-type form, but sets node's type. |
| // |
| TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc, const TType& type) const |
| { |
| TIntermBinary* node = addBinaryNode(op, left, right, loc); |
| node->setType(type); |
| return node; |
| } |
| |
| // |
| // Low level: add unary node (no promotions or other argument modifications) |
| // |
| TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc) const |
| { |
| TIntermUnary* node = new TIntermUnary(op); |
| if (loc.line == 0) |
| loc = child->getLoc(); |
| node->setLoc(loc); |
| node->setOperand(child); |
| |
| return node; |
| } |
| |
| // |
| // like non-type form, but sets node's type. |
| // |
| TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc, const TType& type) const |
| { |
| TIntermUnary* node = addUnaryNode(op, child, loc); |
| node->setType(type); |
| return node; |
| } |
| |
| // |
| // Connect two nodes through an assignment. |
| // |
| // Returns the added node. |
| // |
| // Returns nullptr if the 'right' type could not be converted to match the 'left' type, |
| // or the resulting operation cannot be properly promoted. |
| // |
| TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) |
| { |
| // No block assignment |
| if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock) |
| return nullptr; |
| |
| // Convert "reference += int" to "reference = reference + int". We need this because the |
| // "reference + int" calculation involves a cast back to the original type, which makes it |
| // not an lvalue. |
| if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference() && |
| extensionRequested(E_GL_EXT_buffer_reference2)) { |
| |
| if (!(right->getType().isScalar() && right->getType().isIntegerDomain())) |
| return nullptr; |
| |
| TIntermTyped* node = addBinaryMath(op == EOpAddAssign ? EOpAdd : EOpSub, left, right, loc); |
| if (!node) |
| return nullptr; |
| |
| TIntermSymbol* symbol = left->getAsSymbolNode(); |
| left = addSymbol(*symbol); |
| |
| node = addAssign(EOpAssign, left, node, loc); |
| return node; |
| } |
| |
| // |
| // Like adding binary math, except the conversion can only go |
| // from right to left. |
| // |
| |
| // convert base types, nullptr return means not possible |
| right = addConversion(op, left->getType(), right); |
| if (right == nullptr) |
| return nullptr; |
| |
| // convert shape |
| right = addUniShapeConversion(op, left->getType(), right); |
| |
| // build the node |
| TIntermBinary* node = addBinaryNode(op, left, right, loc); |
| |
| if (! promote(node)) |
| return nullptr; |
| |
| node->updatePrecision(); |
| |
| return node; |
| } |
| |
| // |
| // Connect two nodes through an index operator, where the left node is the base |
| // of an array or struct, and the right node is a direct or indirect offset. |
| // |
| // Returns the added node. |
| // The caller should set the type of the returned node. |
| // |
| TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc loc) |
| { |
| // caller should set the type |
| return addBinaryNode(op, base, index, loc); |
| } |
| |
| // |
| // Add one node as the parent of another that it operates on. |
| // |
| // Returns the added node. |
| // |
| TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, TSourceLoc loc) |
| { |
| if (child == 0) |
| return nullptr; |
| |
| if (child->getType().getBasicType() == EbtBlock) |
| return nullptr; |
| |
| switch (op) { |
| case EOpLogicalNot: |
| if (getSource() == EShSourceHlsl) { |
| break; // HLSL can promote logical not |
| } |
| |
| if (child->getType().getBasicType() != EbtBool || child->getType().isMatrix() || child->getType().isArray() || child->getType().isVector()) { |
| return nullptr; |
| } |
| break; |
| |
| case EOpPostIncrement: |
| case EOpPreIncrement: |
| case EOpPostDecrement: |
| case EOpPreDecrement: |
| case EOpNegative: |
| if (child->getType().getBasicType() == EbtStruct || child->getType().isArray()) |
| return nullptr; |
| default: break; // some compilers want this |
| } |
| |
| // |
| // Do we need to promote the operand? |
| // |
| TBasicType newType = EbtVoid; |
| switch (op) { |
| case EOpConstructBool: newType = EbtBool; break; |
| case EOpConstructFloat: newType = EbtFloat; break; |
| case EOpConstructInt: newType = EbtInt; break; |
| case EOpConstructUint: newType = EbtUint; break; |
| #ifndef GLSLANG_WEB |
| case EOpConstructInt8: newType = EbtInt8; break; |
| case EOpConstructUint8: newType = EbtUint8; break; |
| case EOpConstructInt16: newType = EbtInt16; break; |
| case EOpConstructUint16: newType = EbtUint16; break; |
| case EOpConstructInt64: newType = EbtInt64; break; |
| case EOpConstructUint64: newType = EbtUint64; break; |
| case EOpConstructDouble: newType = EbtDouble; break; |
| case EOpConstructFloat16: newType = EbtFloat16; break; |
| #endif |
| default: break; // some compilers want this |
| } |
| |
| if (newType != EbtVoid) { |
| child = addConversion(op, TType(newType, EvqTemporary, child->getVectorSize(), |
| child->getMatrixCols(), |
| child->getMatrixRows(), |
| child->isVector()), |
| child); |
| if (child == nullptr) |
| return nullptr; |
| } |
| |
| // |
| // For constructors, we are now done, it was all in the conversion. |
| // TODO: but, did this bypass constant folding? |
| // |
| switch (op) { |
| case EOpConstructInt8: |
| case EOpConstructUint8: |
| case EOpConstructInt16: |
| case EOpConstructUint16: |
| case EOpConstructInt: |
| case EOpConstructUint: |
| case EOpConstructInt64: |
| case EOpConstructUint64: |
| case EOpConstructBool: |
| case EOpConstructFloat: |
| case EOpConstructDouble: |
| case EOpConstructFloat16: |
| return child; |
| default: break; // some compilers want this |
| } |
| |
| // |
| // Make a new node for the operator. |
| // |
| TIntermUnary* node = addUnaryNode(op, child, loc); |
| |
| if (! promote(node)) |
| return nullptr; |
| |
| node->updatePrecision(); |
| |
| // If it's a (non-specialization) constant, it must be folded. |
| if (node->getOperand()->getAsConstantUnion()) |
| return node->getOperand()->getAsConstantUnion()->fold(op, node->getType()); |
| |
| // If it's a specialization constant, the result is too, |
| // if the operation is allowed for specialization constants. |
| if (node->getOperand()->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*node)) |
| node->getWritableType().getQualifier().makeSpecConstant(); |
| |
| // If must propagate nonuniform, make a nonuniform. |
| if (node->getOperand()->getQualifier().isNonUniform() && isNonuniformPropagating(node->getOp())) |
| node->getWritableType().getQualifier().nonUniform = true; |
| |
| return node; |
| } |
| |
| TIntermTyped* TIntermediate::addBuiltInFunctionCall(const TSourceLoc& loc, TOperator op, bool unary, |
| TIntermNode* childNode, const TType& returnType) |
| { |
| if (unary) { |
| // |
| // Treat it like a unary operator. |
| // addUnaryMath() should get the type correct on its own; |
| // including constness (which would differ from the prototype). |
| // |
| TIntermTyped* child = childNode->getAsTyped(); |
| if (child == nullptr) |
| return nullptr; |
| |
| if (child->getAsConstantUnion()) { |
| TIntermTyped* folded = child->getAsConstantUnion()->fold(op, returnType); |
| if (folded) |
| return folded; |
| } |
| |
| return addUnaryNode(op, child, child->getLoc(), returnType); |
| } else { |
| // setAggregateOperater() calls fold() for constant folding |
| TIntermTyped* node = setAggregateOperator(childNode, op, returnType, loc); |
| |
| return node; |
| } |
| } |
| |
| // |
| // This is the safe way to change the operator on an aggregate, as it |
| // does lots of error checking and fixing. Especially for establishing |
| // a function call's operation on its set of parameters. Sequences |
| // of instructions are also aggregates, but they just directly set |
| // their operator to EOpSequence. |
| // |
| // Returns an aggregate node, which could be the one passed in if |
| // it was already an aggregate. |
| // |
| TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type, TSourceLoc loc) |
| { |
| TIntermAggregate* aggNode; |
| |
| // |
| // Make sure we have an aggregate. If not turn it into one. |
| // |
| if (node != nullptr) { |
| aggNode = node->getAsAggregate(); |
| if (aggNode == nullptr || aggNode->getOp() != EOpNull) { |
| // |
| // Make an aggregate containing this node. |
| // |
| aggNode = new TIntermAggregate(); |
| aggNode->getSequence().push_back(node); |
| if (loc.line == 0) |
| loc = node->getLoc(); |
| } |
| } else |
| aggNode = new TIntermAggregate(); |
| |
| // |
| // Set the operator. |
| // |
| aggNode->setOperator(op); |
| if (loc.line != 0) |
| aggNode->setLoc(loc); |
| |
| aggNode->setType(type); |
| |
| return fold(aggNode); |
| } |
| |
| bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const |
| { |
| // |
| // Does the base type even allow the operation? |
| // |
| switch (node->getBasicType()) { |
| case EbtVoid: |
| return false; |
| case EbtAtomicUint: |
| case EbtSampler: |
| case EbtAccStruct: |
| // opaque types can be passed to functions |
| if (op == EOpFunction) |
| break; |
| |
| // HLSL can assign samplers directly (no constructor) |
| if (getSource() == EShSourceHlsl && node->getBasicType() == EbtSampler) |
| break; |
| |
| // samplers can get assigned via a sampler constructor |
| // (well, not yet, but code in the rest of this function is ready for it) |
| if (node->getBasicType() == EbtSampler && op == EOpAssign && |
| node->getAsOperator() != nullptr && node->getAsOperator()->getOp() == EOpConstructTextureSampler) |
| break; |
| |
| // otherwise, opaque types can't even be operated on, let alone converted |
| return false; |
| default: |
| break; |
| } |
| |
| return true; |
| } |
| |
| bool TIntermediate::buildConvertOp(TBasicType dst, TBasicType src, TOperator& newOp) const |
| { |
| switch (dst) { |
| #ifndef GLSLANG_WEB |
| case EbtDouble: |
| switch (src) { |
| case EbtUint: newOp = EOpConvUintToDouble; break; |
| case EbtBool: newOp = EOpConvBoolToDouble; break; |
| case EbtFloat: newOp = EOpConvFloatToDouble; break; |
| case EbtInt: newOp = EOpConvIntToDouble; break; |
| case EbtInt8: newOp = EOpConvInt8ToDouble; break; |
| case EbtUint8: newOp = EOpConvUint8ToDouble; break; |
| case EbtInt16: newOp = EOpConvInt16ToDouble; break; |
| case EbtUint16: newOp = EOpConvUint16ToDouble; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToDouble; break; |
| case EbtInt64: newOp = EOpConvInt64ToDouble; break; |
| case EbtUint64: newOp = EOpConvUint64ToDouble; break; |
| default: |
| return false; |
| } |
| break; |
| #endif |
| case EbtFloat: |
| switch (src) { |
| case EbtInt: newOp = EOpConvIntToFloat; break; |
| case EbtUint: newOp = EOpConvUintToFloat; break; |
| case EbtBool: newOp = EOpConvBoolToFloat; break; |
| #ifndef GLSLANG_WEB |
| case EbtDouble: newOp = EOpConvDoubleToFloat; break; |
| case EbtInt8: newOp = EOpConvInt8ToFloat; break; |
| case EbtUint8: newOp = EOpConvUint8ToFloat; break; |
| case EbtInt16: newOp = EOpConvInt16ToFloat; break; |
| case EbtUint16: newOp = EOpConvUint16ToFloat; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToFloat; break; |
| case EbtInt64: newOp = EOpConvInt64ToFloat; break; |
| case EbtUint64: newOp = EOpConvUint64ToFloat; break; |
| #endif |
| default: |
| return false; |
| } |
| break; |
| #ifndef GLSLANG_WEB |
| case EbtFloat16: |
| switch (src) { |
| case EbtInt8: newOp = EOpConvInt8ToFloat16; break; |
| case EbtUint8: newOp = EOpConvUint8ToFloat16; break; |
| case EbtInt16: newOp = EOpConvInt16ToFloat16; break; |
| case EbtUint16: newOp = EOpConvUint16ToFloat16; break; |
| case EbtInt: newOp = EOpConvIntToFloat16; break; |
| case EbtUint: newOp = EOpConvUintToFloat16; break; |
| case EbtBool: newOp = EOpConvBoolToFloat16; break; |
| case EbtFloat: newOp = EOpConvFloatToFloat16; break; |
| case EbtDouble: newOp = EOpConvDoubleToFloat16; break; |
| case EbtInt64: newOp = EOpConvInt64ToFloat16; break; |
| case EbtUint64: newOp = EOpConvUint64ToFloat16; break; |
| default: |
| return false; |
| } |
| break; |
| #endif |
| case EbtBool: |
| switch (src) { |
| case EbtInt: newOp = EOpConvIntToBool; break; |
| case EbtUint: newOp = EOpConvUintToBool; break; |
| case EbtFloat: newOp = EOpConvFloatToBool; break; |
| #ifndef GLSLANG_WEB |
| case EbtDouble: newOp = EOpConvDoubleToBool; break; |
| case EbtInt8: newOp = EOpConvInt8ToBool; break; |
| case EbtUint8: newOp = EOpConvUint8ToBool; break; |
| case EbtInt16: newOp = EOpConvInt16ToBool; break; |
| case EbtUint16: newOp = EOpConvUint16ToBool; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToBool; break; |
| case EbtInt64: newOp = EOpConvInt64ToBool; break; |
| case EbtUint64: newOp = EOpConvUint64ToBool; break; |
| #endif |
| default: |
| return false; |
| } |
| break; |
| #ifndef GLSLANG_WEB |
| case EbtInt8: |
| switch (src) { |
| case EbtUint8: newOp = EOpConvUint8ToInt8; break; |
| case EbtInt16: newOp = EOpConvInt16ToInt8; break; |
| case EbtUint16: newOp = EOpConvUint16ToInt8; break; |
| case EbtInt: newOp = EOpConvIntToInt8; break; |
| case EbtUint: newOp = EOpConvUintToInt8; break; |
| case EbtInt64: newOp = EOpConvInt64ToInt8; break; |
| case EbtUint64: newOp = EOpConvUint64ToInt8; break; |
| case EbtBool: newOp = EOpConvBoolToInt8; break; |
| case EbtFloat: newOp = EOpConvFloatToInt8; break; |
| case EbtDouble: newOp = EOpConvDoubleToInt8; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToInt8; break; |
| default: |
| return false; |
| } |
| break; |
| case EbtUint8: |
| switch (src) { |
| case EbtInt8: newOp = EOpConvInt8ToUint8; break; |
| case EbtInt16: newOp = EOpConvInt16ToUint8; break; |
| case EbtUint16: newOp = EOpConvUint16ToUint8; break; |
| case EbtInt: newOp = EOpConvIntToUint8; break; |
| case EbtUint: newOp = EOpConvUintToUint8; break; |
| case EbtInt64: newOp = EOpConvInt64ToUint8; break; |
| case EbtUint64: newOp = EOpConvUint64ToUint8; break; |
| case EbtBool: newOp = EOpConvBoolToUint8; break; |
| case EbtFloat: newOp = EOpConvFloatToUint8; break; |
| case EbtDouble: newOp = EOpConvDoubleToUint8; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToUint8; break; |
| default: |
| return false; |
| } |
| break; |
| |
| case EbtInt16: |
| switch (src) { |
| case EbtUint8: newOp = EOpConvUint8ToInt16; break; |
| case EbtInt8: newOp = EOpConvInt8ToInt16; break; |
| case EbtUint16: newOp = EOpConvUint16ToInt16; break; |
| case EbtInt: newOp = EOpConvIntToInt16; break; |
| case EbtUint: newOp = EOpConvUintToInt16; break; |
| case EbtInt64: newOp = EOpConvInt64ToInt16; break; |
| case EbtUint64: newOp = EOpConvUint64ToInt16; break; |
| case EbtBool: newOp = EOpConvBoolToInt16; break; |
| case EbtFloat: newOp = EOpConvFloatToInt16; break; |
| case EbtDouble: newOp = EOpConvDoubleToInt16; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToInt16; break; |
| default: |
| return false; |
| } |
| break; |
| case EbtUint16: |
| switch (src) { |
| case EbtInt8: newOp = EOpConvInt8ToUint16; break; |
| case EbtUint8: newOp = EOpConvUint8ToUint16; break; |
| case EbtInt16: newOp = EOpConvInt16ToUint16; break; |
| case EbtInt: newOp = EOpConvIntToUint16; break; |
| case EbtUint: newOp = EOpConvUintToUint16; break; |
| case EbtInt64: newOp = EOpConvInt64ToUint16; break; |
| case EbtUint64: newOp = EOpConvUint64ToUint16; break; |
| case EbtBool: newOp = EOpConvBoolToUint16; break; |
| case EbtFloat: newOp = EOpConvFloatToUint16; break; |
| case EbtDouble: newOp = EOpConvDoubleToUint16; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToUint16; break; |
| default: |
| return false; |
| } |
| break; |
| #endif |
| |
| case EbtInt: |
| switch (src) { |
| case EbtUint: newOp = EOpConvUintToInt; break; |
| case EbtBool: newOp = EOpConvBoolToInt; break; |
| case EbtFloat: newOp = EOpConvFloatToInt; break; |
| #ifndef GLSLANG_WEB |
| case EbtInt8: newOp = EOpConvInt8ToInt; break; |
| case EbtUint8: newOp = EOpConvUint8ToInt; break; |
| case EbtInt16: newOp = EOpConvInt16ToInt; break; |
| case EbtUint16: newOp = EOpConvUint16ToInt; break; |
| case EbtDouble: newOp = EOpConvDoubleToInt; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToInt; break; |
| case EbtInt64: newOp = EOpConvInt64ToInt; break; |
| case EbtUint64: newOp = EOpConvUint64ToInt; break; |
| #endif |
| default: |
| return false; |
| } |
| break; |
| case EbtUint: |
| switch (src) { |
| case EbtInt: newOp = EOpConvIntToUint; break; |
| case EbtBool: newOp = EOpConvBoolToUint; break; |
| case EbtFloat: newOp = EOpConvFloatToUint; break; |
| #ifndef GLSLANG_WEB |
| case EbtInt8: newOp = EOpConvInt8ToUint; break; |
| case EbtUint8: newOp = EOpConvUint8ToUint; break; |
| case EbtInt16: newOp = EOpConvInt16ToUint; break; |
| case EbtUint16: newOp = EOpConvUint16ToUint; break; |
| case EbtDouble: newOp = EOpConvDoubleToUint; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToUint; break; |
| case EbtInt64: newOp = EOpConvInt64ToUint; break; |
| case EbtUint64: newOp = EOpConvUint64ToUint; break; |
| #endif |
| default: |
| return false; |
| } |
| break; |
| #ifndef GLSLANG_WEB |
| case EbtInt64: |
| switch (src) { |
| case EbtInt8: newOp = EOpConvInt8ToInt64; break; |
| case EbtUint8: newOp = EOpConvUint8ToInt64; break; |
| case EbtInt16: newOp = EOpConvInt16ToInt64; break; |
| case EbtUint16: newOp = EOpConvUint16ToInt64; break; |
| case EbtInt: newOp = EOpConvIntToInt64; break; |
| case EbtUint: newOp = EOpConvUintToInt64; break; |
| case EbtBool: newOp = EOpConvBoolToInt64; break; |
| case EbtFloat: newOp = EOpConvFloatToInt64; break; |
| case EbtDouble: newOp = EOpConvDoubleToInt64; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToInt64; break; |
| case EbtUint64: newOp = EOpConvUint64ToInt64; break; |
| default: |
| return false; |
| } |
| break; |
| case EbtUint64: |
| switch (src) { |
| case EbtInt8: newOp = EOpConvInt8ToUint64; break; |
| case EbtUint8: newOp = EOpConvUint8ToUint64; break; |
| case EbtInt16: newOp = EOpConvInt16ToUint64; break; |
| case EbtUint16: newOp = EOpConvUint16ToUint64; break; |
| case EbtInt: newOp = EOpConvIntToUint64; break; |
| case EbtUint: newOp = EOpConvUintToUint64; break; |
| case EbtBool: newOp = EOpConvBoolToUint64; break; |
| case EbtFloat: newOp = EOpConvFloatToUint64; break; |
| case EbtDouble: newOp = EOpConvDoubleToUint64; break; |
| case EbtFloat16: newOp = EOpConvFloat16ToUint64; break; |
| case EbtInt64: newOp = EOpConvInt64ToUint64; break; |
| default: |
| return false; |
| } |
| break; |
| #endif |
| default: |
| return false; |
| } |
| return true; |
| } |
| |
| // This is 'mechanism' here, it does any conversion told. |
| // It is about basic type, not about shape. |
| // The policy comes from the shader or the calling code. |
| TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped* node) const |
| { |
| // |
| // Add a new newNode for the conversion. |
| // |
| |
| #ifndef GLSLANG_WEB |
| bool convertToIntTypes = (convertTo == EbtInt8 || convertTo == EbtUint8 || |
| convertTo == EbtInt16 || convertTo == EbtUint16 || |
| convertTo == EbtInt || convertTo == EbtUint || |
| convertTo == EbtInt64 || convertTo == EbtUint64); |
| |
| bool convertFromIntTypes = (node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8 || |
| node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16 || |
| node->getBasicType() == EbtInt || node->getBasicType() == EbtUint || |
| node->getBasicType() == EbtInt64 || node->getBasicType() == EbtUint64); |
| |
| bool convertToFloatTypes = (convertTo == EbtFloat16 || convertTo == EbtFloat || convertTo == EbtDouble); |
| |
| bool convertFromFloatTypes = (node->getBasicType() == EbtFloat16 || |
| node->getBasicType() == EbtFloat || |
| node->getBasicType() == EbtDouble); |
| |
| if (! getArithemeticInt8Enabled()) { |
| if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) || |
| ((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes)) |
| return nullptr; |
| } |
| |
| if (! getArithemeticInt16Enabled()) { |
| if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) || |
| ((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes)) |
| return nullptr; |
| } |
| |
| if (! getArithemeticFloat16Enabled()) { |
| if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) || |
| (node->getBasicType() == EbtFloat16 && ! convertToFloatTypes)) |
| return nullptr; |
| } |
| #endif |
| |
| TIntermUnary* newNode = nullptr; |
| TOperator newOp = EOpNull; |
| if (!buildConvertOp(convertTo, node->getBasicType(), newOp)) { |
| return nullptr; |
| } |
| |
| TType newType(convertTo, EvqTemporary, node->getVectorSize(), node->getMatrixCols(), node->getMatrixRows()); |
| newNode = addUnaryNode(newOp, node, node->getLoc(), newType); |
| |
| if (node->getAsConstantUnion()) { |
| #ifndef GLSLANG_WEB |
| // 8/16-bit storage extensions don't support 8/16-bit constants, so don't fold conversions |
| // to those types |
| if ((getArithemeticInt8Enabled() || !(convertTo == EbtInt8 || convertTo == EbtUint8)) && |
| (getArithemeticInt16Enabled() || !(convertTo == EbtInt16 || convertTo == EbtUint16)) && |
| (getArithemeticFloat16Enabled() || !(convertTo == EbtFloat16))) |
| #endif |
| { |
| TIntermTyped* folded = node->getAsConstantUnion()->fold(newOp, newType); |
| if (folded) |
| return folded; |
| } |
| } |
| |
| // Propagate specialization-constant-ness, if allowed |
| if (node->getType().getQualifier().isSpecConstant() && isSpecializationOperation(*newNode)) |
| newNode->getWritableType().getQualifier().makeSpecConstant(); |
| |
| return newNode; |
| } |
| |
| TIntermTyped* TIntermediate::addConversion(TBasicType convertTo, TIntermTyped* node) const |
| { |
| return createConversion(convertTo, node); |
| } |
| |
| // For converting a pair of operands to a binary operation to compatible |
| // types with each other, relative to the operation in 'op'. |
| // This does not cover assignment operations, which is asymmetric in that the |
| // left type is not changeable. |
| // See addConversion(op, type, node) for assignments and unary operation |
| // conversions. |
| // |
| // Generally, this is focused on basic type conversion, not shape conversion. |
| // See addShapeConversion() for shape conversions. |
| // |
| // Returns the converted pair of nodes. |
| // Returns <nullptr, nullptr> when there is no conversion. |
| std::tuple<TIntermTyped*, TIntermTyped*> |
| TIntermediate::addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1) |
| { |
| if (!isConversionAllowed(op, node0) || !isConversionAllowed(op, node1)) |
| return std::make_tuple(nullptr, nullptr); |
| |
| if (node0->getType() != node1->getType()) { |
| // If differing structure, then no conversions. |
| if (node0->isStruct() || node1->isStruct()) |
| return std::make_tuple(nullptr, nullptr); |
| |
| // If differing arrays, then no conversions. |
| if (node0->getType().isArray() || node1->getType().isArray()) |
| return std::make_tuple(nullptr, nullptr); |
| |
| // No implicit conversions for operations involving cooperative matrices |
| if (node0->getType().isCoopMat() || node1->getType().isCoopMat()) |
| return std::make_tuple(node0, node1); |
| } |
| |
| auto promoteTo = std::make_tuple(EbtNumTypes, EbtNumTypes); |
| |
| switch (op) { |
| // |
| // List all the binary ops that can implicitly convert one operand to the other's type; |
| // This implements the 'policy' for implicit type conversion. |
| // |
| case EOpLessThan: |
| case EOpGreaterThan: |
| case EOpLessThanEqual: |
| case EOpGreaterThanEqual: |
| case EOpEqual: |
| case EOpNotEqual: |
| |
| case EOpAdd: |
| case EOpSub: |
| case EOpMul: |
| case EOpDiv: |
| case EOpMod: |
| |
| case EOpVectorTimesScalar: |
| case EOpVectorTimesMatrix: |
| case EOpMatrixTimesVector: |
| case EOpMatrixTimesScalar: |
| |
| case EOpAnd: |
| case EOpInclusiveOr: |
| case EOpExclusiveOr: |
| |
| case EOpSequence: // used by ?: |
| |
| if (node0->getBasicType() == node1->getBasicType()) |
| return std::make_tuple(node0, node1); |
| |
| promoteTo = getConversionDestinatonType(node0->getBasicType(), node1->getBasicType(), op); |
| if (std::get<0>(promoteTo) == EbtNumTypes || std::get<1>(promoteTo) == EbtNumTypes) |
| return std::make_tuple(nullptr, nullptr); |
| |
| break; |
| |
| case EOpLogicalAnd: |
| case EOpLogicalOr: |
| case EOpLogicalXor: |
| if (getSource() == EShSourceHlsl) |
| promoteTo = std::make_tuple(EbtBool, EbtBool); |
| else |
| return std::make_tuple(node0, node1); |
| break; |
| |
| // There are no conversions needed for GLSL; the shift amount just needs to be an |
| // integer type, as does the base. |
| // HLSL can promote bools to ints to make this work. |
| case EOpLeftShift: |
| case EOpRightShift: |
| if (getSource() == EShSourceHlsl) { |
| TBasicType node0BasicType = node0->getBasicType(); |
| if (node0BasicType == EbtBool) |
| node0BasicType = EbtInt; |
| if (node1->getBasicType() == EbtBool) |
| promoteTo = std::make_tuple(node0BasicType, EbtInt); |
| else |
| promoteTo = std::make_tuple(node0BasicType, node1->getBasicType()); |
| } else { |
| if (isTypeInt(node0->getBasicType()) && isTypeInt(node1->getBasicType())) |
| return std::make_tuple(node0, node1); |
| else |
| return std::make_tuple(nullptr, nullptr); |
| } |
| break; |
| |
| default: |
| if (node0->getType() == node1->getType()) |
| return std::make_tuple(node0, node1); |
| |
| return std::make_tuple(nullptr, nullptr); |
| } |
| |
| TIntermTyped* newNode0; |
| TIntermTyped* newNode1; |
| |
| if (std::get<0>(promoteTo) != node0->getType().getBasicType()) { |
| if (node0->getAsConstantUnion()) |
| newNode0 = promoteConstantUnion(std::get<0>(promoteTo), node0->getAsConstantUnion()); |
| else |
| newNode0 = createConversion(std::get<0>(promoteTo), node0); |
| } else |
| newNode0 = node0; |
| |
| if (std::get<1>(promoteTo) != node1->getType().getBasicType()) { |
| if (node1->getAsConstantUnion()) |
| newNode1 = promoteConstantUnion(std::get<1>(promoteTo), node1->getAsConstantUnion()); |
| else |
| newNode1 = createConversion(std::get<1>(promoteTo), node1); |
| } else |
| newNode1 = node1; |
| |
| return std::make_tuple(newNode0, newNode1); |
| } |
| |
| // |
| // Convert the node's type to the given type, as allowed by the operation involved: 'op'. |
| // For implicit conversions, 'op' is not the requested conversion, it is the explicit |
| // operation requiring the implicit conversion. |
| // |
| // Binary operation conversions should be handled by addConversion(op, node, node), not here. |
| // |
| // Returns a node representing the conversion, which could be the same |
| // node passed in if no conversion was needed. |
| // |
| // Generally, this is focused on basic type conversion, not shape conversion. |
| // See addShapeConversion() for shape conversions. |
| // |
| // Return nullptr if a conversion can't be done. |
| // |
| TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TIntermTyped* node) |
| { |
| if (!isConversionAllowed(op, node)) |
| return nullptr; |
| |
| // Otherwise, if types are identical, no problem |
| if (type == node->getType()) |
| return node; |
| |
| // If one's a structure, then no conversions. |
| if (type.isStruct() || node->isStruct()) |
| return nullptr; |
| |
| // If one's an array, then no conversions. |
| if (type.isArray() || node->getType().isArray()) |
| return nullptr; |
| |
| // Note: callers are responsible for other aspects of shape, |
| // like vector and matrix sizes. |
| |
| TBasicType promoteTo; |
| // GL_EXT_shader_16bit_storage can't do OpConstantComposite with |
| // 16-bit types, so disable promotion for those types. |
| bool canPromoteConstant = true; |
| |
| switch (op) { |
| // |
| // Explicit conversions (unary operations) |
| // |
| case EOpConstructBool: |
| promoteTo = EbtBool; |
| break; |
| case EOpConstructFloat: |
| promoteTo = EbtFloat; |
| break; |
| case EOpConstructInt: |
| promoteTo = EbtInt; |
| break; |
| case EOpConstructUint: |
| promoteTo = EbtUint; |
| break; |
| #ifndef GLSLANG_WEB |
| case EOpConstructDouble: |
| promoteTo = EbtDouble; |
| break; |
| case EOpConstructFloat16: |
| promoteTo = EbtFloat16; |
| canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16); |
| break; |
| case EOpConstructInt8: |
| promoteTo = EbtInt8; |
| canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8); |
| break; |
| case EOpConstructUint8: |
| promoteTo = EbtUint8; |
| canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8); |
| break; |
| case EOpConstructInt16: |
| promoteTo = EbtInt16; |
| canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16); |
| break; |
| case EOpConstructUint16: |
| promoteTo = EbtUint16; |
| canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16); |
| break; |
| case EOpConstructInt64: |
| promoteTo = EbtInt64; |
| break; |
| case EOpConstructUint64: |
| promoteTo = EbtUint64; |
| break; |
| #endif |
| |
| case EOpLogicalNot: |
| |
| case EOpFunctionCall: |
| |
| case EOpReturn: |
| case EOpAssign: |
| case EOpAddAssign: |
| case EOpSubAssign: |
| case EOpMulAssign: |
| case EOpVectorTimesScalarAssign: |
| case EOpMatrixTimesScalarAssign: |
| case EOpDivAssign: |
| case EOpModAssign: |
| case EOpAndAssign: |
| case EOpInclusiveOrAssign: |
| case EOpExclusiveOrAssign: |
| |
| case EOpAtan: |
| case EOpClamp: |
| case EOpCross: |
| case EOpDistance: |
| case EOpDot: |
| case EOpDst: |
| case EOpFaceForward: |
| case EOpFma: |
| case EOpFrexp: |
| case EOpLdexp: |
| case EOpMix: |
| case EOpLit: |
| case EOpMax: |
| case EOpMin: |
| case EOpMod: |
| case EOpModf: |
| case EOpPow: |
| case EOpReflect: |
| case EOpRefract: |
| case EOpSmoothStep: |
| case EOpStep: |
| |
| case EOpSequence: |
| case EOpConstructStruct: |
| case EOpConstructCooperativeMatrix: |
| |
| if (type.isReference() || node->getType().isReference()) { |
| // types must match to assign a reference |
| if (type == node->getType()) |
| return node; |
| else |
| return nullptr; |
| } |
| |
| if (type.getBasicType() == node->getType().getBasicType()) |
| return node; |
| |
| if (canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op)) |
| promoteTo = type.getBasicType(); |
| else |
| return nullptr; |
| break; |
| |
| // For GLSL, there are no conversions needed; the shift amount just needs to be an |
| // integer type, as do the base/result. |
| // HLSL can convert the shift from a bool to an int. |
| case EOpLeftShiftAssign: |
| case EOpRightShiftAssign: |
| { |
| if (getSource() == EShSourceHlsl && node->getType().getBasicType() == EbtBool) |
| promoteTo = type.getBasicType(); |
| else { |
| if (isTypeInt(type.getBasicType()) && isTypeInt(node->getBasicType())) |
| return node; |
| else |
| return nullptr; |
| } |
| break; |
| } |
| |
| default: |
| // default is to require a match; all exceptions should have case statements above |
| |
| if (type.getBasicType() == node->getType().getBasicType()) |
| return node; |
| else |
| return nullptr; |
| } |
| |
| if (canPromoteConstant && node->getAsConstantUnion()) |
| return promoteConstantUnion(promoteTo, node->getAsConstantUnion()); |
| |
| // |
| // Add a new newNode for the conversion. |
| // |
| TIntermTyped* newNode = createConversion(promoteTo, node); |
| |
| return newNode; |
| } |
| |
| // Convert the node's shape of type for the given type, as allowed by the |
| // operation involved: 'op'. This is for situations where there is only one |
| // direction to consider doing the shape conversion. |
| // |
| // This implements policy, it call addShapeConversion() for the mechanism. |
| // |
| // Generally, the AST represents allowed GLSL shapes, so this isn't needed |
| // for GLSL. Bad shapes are caught in conversion or promotion. |
| // |
| // Return 'node' if no conversion was done. Promotion handles final shape |
| // checking. |
| // |
| TIntermTyped* TIntermediate::addUniShapeConversion(TOperator op, const TType& type, TIntermTyped* node) |
| { |
| // some source languages don't do this |
| switch (getSource()) { |
| case EShSourceHlsl: |
| break; |
| case EShSourceGlsl: |
| default: |
| return node; |
| } |
| |
| // some operations don't do this |
| switch (op) { |
| case EOpFunctionCall: |
| case EOpReturn: |
| break; |
| |
| case EOpMulAssign: |
| // want to support vector *= scalar native ops in AST and lower, not smear, similarly for |
| // matrix *= scalar, etc. |
| |
| case EOpAddAssign: |
| case EOpSubAssign: |
| case EOpDivAssign: |
| case EOpAndAssign: |
| case EOpInclusiveOrAssign: |
| case EOpExclusiveOrAssign: |
| case EOpRightShiftAssign: |
| case EOpLeftShiftAssign: |
| if (node->getVectorSize() == 1) |
| return node; |
| break; |
| |
| case EOpAssign: |
| break; |
| |
| case EOpMix: |
| break; |
| |
| default: |
| return node; |
| } |
| |
| return addShapeConversion(type, node); |
| } |
| |
| // Convert the nodes' shapes to be compatible for the operation 'op'. |
| // |
| // This implements policy, it call addShapeConversion() for the mechanism. |
| // |
| // Generally, the AST represents allowed GLSL shapes, so this isn't needed |
| // for GLSL. Bad shapes are caught in conversion or promotion. |
| // |
| void TIntermediate::addBiShapeConversion(TOperator op, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode) |
| { |
| // some source languages don't do this |
| switch (getSource()) { |
| case EShSourceHlsl: |
| break; |
| case EShSourceGlsl: |
| default: |
| return; |
| } |
| |
| // some operations don't do this |
| // 'break' will mean attempt bidirectional conversion |
| switch (op) { |
| case EOpMulAssign: |
| case EOpAssign: |
| case EOpAddAssign: |
| case EOpSubAssign: |
| case EOpDivAssign: |
| case EOpAndAssign: |
| case EOpInclusiveOrAssign: |
| case EOpExclusiveOrAssign: |
| case EOpRightShiftAssign: |
| case EOpLeftShiftAssign: |
| // switch to unidirectional conversion (the lhs can't change) |
| rhsNode = addUniShapeConversion(op, lhsNode->getType(), rhsNode); |
| return; |
| |
| case EOpMul: |
| // matrix multiply does not change shapes |
| if (lhsNode->isMatrix() && rhsNode->isMatrix()) |
| return; |
| case EOpAdd: |
| case EOpSub: |
| case EOpDiv: |
| // want to support vector * scalar native ops in AST and lower, not smear, similarly for |
| // matrix * vector, etc. |
| if (lhsNode->getVectorSize() == 1 || rhsNode->getVectorSize() == 1) |
| return; |
| break; |
| |
| case EOpRightShift: |
| case EOpLeftShift: |
| // can natively support the right operand being a scalar and the left a vector, |
| // but not the reverse |
| if (rhsNode->getVectorSize() == 1) |
| return; |
| break; |
| |
| case EOpLessThan: |
| case EOpGreaterThan: |
| case EOpLessThanEqual: |
| case EOpGreaterThanEqual: |
| |
| case EOpEqual: |
| case EOpNotEqual: |
| |
| case EOpLogicalAnd: |
| case EOpLogicalOr: |
| case EOpLogicalXor: |
| |
| case EOpAnd: |
| case EOpInclusiveOr: |
| case EOpExclusiveOr: |
| |
| case EOpMix: |
| break; |
| |
| default: |
| return; |
| } |
| |
| // Do bidirectional conversions |
| if (lhsNode->getType().isScalarOrVec1() || rhsNode->getType().isScalarOrVec1()) { |
| if (lhsNode->getType().isScalarOrVec1()) |
| lhsNode = addShapeConversion(rhsNode->getType(), lhsNode); |
| else |
| rhsNode = addShapeConversion(lhsNode->getType(), rhsNode); |
| } |
| lhsNode = addShapeConversion(rhsNode->getType(), lhsNode); |
| rhsNode = addShapeConversion(lhsNode->getType(), rhsNode); |
| } |
| |
| // Convert the node's shape of type for the given type, as allowed by the |
| // operation involved: 'op'. |
| // |
| // Generally, the AST represents allowed GLSL shapes, so this isn't needed |
| // for GLSL. Bad shapes are caught in conversion or promotion. |
| // |
| // Return 'node' if no conversion was done. Promotion handles final shape |
| // checking. |
| // |
| TIntermTyped* TIntermediate::addShapeConversion(const TType& type, TIntermTyped* node) |
| { |
| // no conversion needed |
| if (node->getType() == type) |
| return node; |
| |
| // structures and arrays don't change shape, either to or from |
| if (node->getType().isStruct() || node->getType().isArray() || |
| type.isStruct() || type.isArray()) |
| return node; |
| |
| // The new node that handles the conversion |
| TOperator constructorOp = mapTypeToConstructorOp(type); |
| |
| if (getSource() == EShSourceHlsl) { |
| // HLSL rules for scalar, vector and matrix conversions: |
| // 1) scalar can become anything, initializing every component with its value |
| // 2) vector and matrix can become scalar, first element is used (warning: truncation) |
| // 3) matrix can become matrix with less rows and/or columns (warning: truncation) |
| // 4) vector can become vector with less rows size (warning: truncation) |
| // 5a) vector 4 can become 2x2 matrix (special case) (same packing layout, its a reinterpret) |
| // 5b) 2x2 matrix can become vector 4 (special case) (same packing layout, its a reinterpret) |
| |
| const TType &sourceType = node->getType(); |
| |
| // rule 1 for scalar to matrix is special |
| if (sourceType.isScalarOrVec1() && type.isMatrix()) { |
| |
| // HLSL semantics: the scalar (or vec1) is replicated to every component of the matrix. Left to its |
| // own devices, the constructor from a scalar would populate the diagonal. This forces replication |
| // to every matrix element. |
| |
| // Note that if the node is complex (e.g, a function call), we don't want to duplicate it here |
| // repeatedly, so we copy it to a temp, then use the temp. |
| const int matSize = type.computeNumComponents(); |
| TIntermAggregate* rhsAggregate = new TIntermAggregate(); |
| |
| const bool isSimple = (node->getAsSymbolNode() != nullptr) || (node->getAsConstantUnion() != nullptr); |
| |
| if (!isSimple) { |
| assert(0); // TODO: use node replicator service when available. |
| } |
| |
| for (int x = 0; x < matSize; ++x) |
| rhsAggregate->getSequence().push_back(node); |
| |
| return setAggregateOperator(rhsAggregate, constructorOp, type, node->getLoc()); |
| } |
| |
| // rule 1 and 2 |
| if ((sourceType.isScalar() && !type.isScalar()) || (!sourceType.isScalar() && type.isScalar())) |
| return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); |
| |
| // rule 3 and 5b |
| if (sourceType.isMatrix()) { |
| // rule 3 |
| if (type.isMatrix()) { |
| if ((sourceType.getMatrixCols() != type.getMatrixCols() || sourceType.getMatrixRows() != type.getMatrixRows()) && |
| sourceType.getMatrixCols() >= type.getMatrixCols() && sourceType.getMatrixRows() >= type.getMatrixRows()) |
| return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); |
| // rule 5b |
| } else if (type.isVector()) { |
| if (type.getVectorSize() == 4 && sourceType.getMatrixCols() == 2 && sourceType.getMatrixRows() == 2) |
| return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); |
| } |
| } |
| |
| // rule 4 and 5a |
| if (sourceType.isVector()) { |
| // rule 4 |
| if (type.isVector()) |
| { |
| if (sourceType.getVectorSize() > type.getVectorSize()) |
| return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); |
| // rule 5a |
| } else if (type.isMatrix()) { |
| if (sourceType.getVectorSize() == 4 && type.getMatrixCols() == 2 && type.getMatrixRows() == 2) |
| return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); |
| } |
| } |
| } |
| |
| // scalar -> vector or vec1 -> vector or |
| // vector -> scalar or |
| // bigger vector -> smaller vector |
| if ((node->getType().isScalarOrVec1() && type.isVector()) || |
| (node->getType().isVector() && type.isScalar()) || |
| (node->isVector() && type.isVector() && node->getVectorSize() > type.getVectorSize())) |
| return setAggregateOperator(makeAggregate(node), constructorOp, type, node->getLoc()); |
| |
| return node; |
| } |
| |
| bool TIntermediate::isIntegralPromotion(TBasicType from, TBasicType to) const |
| { |
| // integral promotions |
| if (to == EbtInt) { |
| switch(from) { |
| case EbtInt8: |
| case EbtInt16: |
| case EbtUint8: |
| case EbtUint16: |
| return true; |
| default: |
| break; |
| } |
| } |
| return false; |
| } |
| |
| bool TIntermediate::isFPPromotion(TBasicType from, TBasicType to) const |
| { |
| // floating-point promotions |
| if (to == EbtDouble) { |
| switch(from) { |
| case EbtFloat16: |
| case EbtFloat: |
| return true; |
| default: |
| break; |
| } |
| } |
| return false; |
| } |
| |
| bool TIntermediate::isIntegralConversion(TBasicType from, TBasicType to) const |
| { |
| #ifdef GLSLANG_WEB |
| return false; |
| #endif |
| |
| switch (from) { |
| case EbtInt: |
| switch(to) { |
| case EbtUint: |
| return version >= 400 || getSource() == EShSourceHlsl; |
| case EbtInt64: |
| case EbtUint64: |
| return true; |
| default: |
| break; |
| } |
| break; |
| case EbtUint: |
| switch(to) { |
| case EbtInt64: |
| case EbtUint64: |
| return true; |
| default: |
| break; |
| } |
| break; |
| case EbtInt8: |
| switch (to) { |
| case EbtUint8: |
| case EbtInt16: |
| case EbtUint16: |
| case EbtUint: |
| case EbtInt64: |
| case EbtUint64: |
| return true; |
| default: |
| break; |
| } |
| break; |
| case EbtUint8: |
| switch (to) { |
| case EbtInt16: |
| case EbtUint16: |
| case EbtUint: |
| case EbtInt64: |
| case EbtUint64: |
| return true; |
| default: |
| break; |
| } |
| break; |
| case EbtInt16: |
| switch(to) { |
| case EbtUint16: |
| case EbtUint: |
| case EbtInt64: |
| case EbtUint64: |
| return true; |
| default: |
| break; |
| } |
| break; |
| case EbtUint16: |
| switch(to) { |
| case EbtUint: |
| case EbtInt64: |
| case EbtUint64: |
| return true; |
| default: |
| break; |
| } |
| break; |
| case EbtInt64: |
| if (to == EbtUint64) { |
| return true; |
| } |
| break; |
| default: |
| break; |
| } |
| return false; |
| } |
| |
| bool TIntermediate::isFPConversion(TBasicType from, TBasicType to) const |
| { |
| #ifdef GLSLANG_WEB |
| return false; |
| #endif |
| |
| if (to == EbtFloat && from == EbtFloat16) { |
| return true; |
| } else { |
| return false; |
| } |
| } |
| |
| bool TIntermediate::isFPIntegralConversion(TBasicType from, TBasicType to) const |
| { |
| switch (from) { |
| case EbtInt: |
| case EbtUint: |
| switch(to) { |
| case EbtFloat: |
| case EbtDouble: |
| return true; |
| default: |
| break; |
| } |
| break; |
| #ifndef GLSLANG_WEB |
| case EbtInt8: |
| case EbtUint8: |
| case EbtInt16: |
| case EbtUint16: |
| switch (to) { |
| case EbtFloat16: |
| case EbtFloat: |
| case EbtDouble: |
| return true; |
| default: |
| break; |
| } |
| break; |
| case EbtInt64: |
| case EbtUint64: |
| if (to == EbtDouble) { |
| return true; |
| } |
| break; |
| #endif |
| default: |
| break; |
| } |
| return false; |
| } |
| |
| // |
| // See if the 'from' type is allowed to be implicitly converted to the |
| // 'to' type. This is not about vector/array/struct, only about basic type. |
| // |
| bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op) const |
| { |
| if ((isEsProfile() && version < 310 ) || version == 110) |
| return false; |
| |
| if (from == to) |
| return true; |
| |
| // TODO: Move more policies into language-specific handlers. |
| // Some languages allow more general (or potentially, more specific) conversions under some conditions. |
| if (getSource() == EShSourceHlsl) { |
| const bool fromConvertable = (from == EbtFloat || from == EbtDouble || from == EbtInt || from == EbtUint || from == EbtBool); |
| const bool toConvertable = (to == EbtFloat || to == EbtDouble || to == EbtInt || to == EbtUint || to == EbtBool); |
| |
| if (fromConvertable && toConvertable) { |
| switch (op) { |
| case EOpAndAssign: // assignments can perform arbitrary conversions |
| case EOpInclusiveOrAssign: // ... |
| case EOpExclusiveOrAssign: // ... |
| case EOpAssign: // ... |
| case EOpAddAssign: // ... |
| case EOpSubAssign: // ... |
| case EOpMulAssign: // ... |
| case EOpVectorTimesScalarAssign: // ... |
| case EOpMatrixTimesScalarAssign: // ... |
| case EOpDivAssign: // ... |
| case EOpModAssign: // ... |
| case EOpReturn: // function returns can also perform arbitrary conversions |
| case EOpFunctionCall: // conversion of a calling parameter |
| case EOpLogicalNot: |
| case EOpLogicalAnd: |
| case EOpLogicalOr: |
| case EOpLogicalXor: |
| case EOpConstructStruct: |
| return true; |
| default: |
| break; |
| } |
| } |
| } |
| |
| bool explicitTypesEnabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int32) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int64) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float32) || |
| extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float64); |
| |
| if (explicitTypesEnabled) { |
| // integral promotions |
| if (isIntegralPromotion(from, to)) { |
| return true; |
| } |
| |
| // floating-point promotions |
| if (isFPPromotion(from, to)) { |
| return true; |
| } |
| |
| // integral conversions |
| if (isIntegralConversion(from, to)) { |
| return true; |
| } |
| |
| // floating-point conversions |
| if (isFPConversion(from, to)) { |
| return true; |
| } |
| |
| // floating-integral conversions |
| if (isFPIntegralConversion(from, to)) { |
| return true; |
| } |
| |
| // hlsl supported conversions |
| if (getSource() == EShSourceHlsl) { |
| if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat)) |
| return true; |
| } |
| } else if (isEsProfile()) { |
| switch (to) { |
| case EbtFloat: |
| switch (from) { |
| case EbtInt: |
| case EbtUint: |
| return extensionRequested(E_GL_EXT_shader_implicit_conversions); |
| case EbtFloat: |
| return true; |
| default: |
| return false; |
| } |
| case EbtUint: |
| switch (from) { |
| case EbtInt: |
| return extensionRequested(E_GL_EXT_shader_implicit_conversions); |
| case EbtUint: |
| return true; |
| default: |
| return false; |
| } |
| default: |
| return false; |
| } |
| } else { |
| switch (to) { |
| case EbtDouble: |
| switch (from) { |
| case EbtInt: |
| case EbtUint: |
| case EbtInt64: |
| case EbtUint64: |
| case EbtFloat: |
| case EbtDouble: |
| return version >= 400 || extensionRequested(E_GL_ARB_gpu_shader_fp64); |
| case EbtInt16: |
| case EbtUint16: |
| return (version >= 400 || extensionRequested(E_GL_ARB_gpu_shader_fp64)) && |
| extensionRequested(E_GL_AMD_gpu_shader_int16); |
| case EbtFloat16: |
| return (version >= 400 || extensionRequested(E_GL_ARB_gpu_shader_fp64)) && |
| extensionRequested(E_GL_AMD_gpu_shader_half_float); |
| default: |
| return false; |
| } |
| case EbtFloat: |
| switch (from) { |
| case EbtInt: |
| case EbtUint: |
| case EbtFloat: |
| return true; |
| case EbtBool: |
| return getSource() == EShSourceHlsl; |
| case EbtInt16: |
| case EbtUint16: |
| return extensionRequested(E_GL_AMD_gpu_shader_int16); |
| case EbtFloat16: |
| return |
| extensionRequested(E_GL_AMD_gpu_shader_half_float) || getSource() == EShSourceHlsl; |
| default: |
| return false; |
| } |
| case EbtUint: |
| switch (from) { |
| case EbtInt: |
| return version >= 400 || getSource() == EShSourceHlsl; |
| case EbtUint: |
| return true; |
| case EbtBool: |
| return getSource() == EShSourceHlsl; |
| case EbtInt16: |
| case EbtUint16: |
| return extensionRequested(E_GL_AMD_gpu_shader_int16); |
| default: |
| return false; |
| } |
| case EbtInt: |
| switch (from) { |
| case EbtInt: |
| return true; |
| case EbtBool: |
| return getSource() == EShSourceHlsl; |
| case EbtInt16: |
| return extensionRequested(E_GL_AMD_gpu_shader_int16); |
| default: |
| return false; |
| } |
| case EbtUint64: |
| switch (from) { |
| case EbtInt: |
| case EbtUint: |
| case EbtInt64: |
| case EbtUint64: |
| return true; |
| case EbtInt16: |
| case EbtUint16: |
| return extensionRequested(E_GL_AMD_gpu_shader_int16); |
| default: |
| return false; |
| } |
| case EbtInt64: |
| switch (from) { |
| case EbtInt: |
| case EbtInt64: |
| return true; |
| case EbtInt16: |
| return extensionRequested(E_GL_AMD_gpu_shader_int16); |
| default: |
| return false; |
| } |
| case EbtFloat16: |
| switch (from) { |
| case EbtInt16: |
| case EbtUint16: |
| return extensionRequested(E_GL_AMD_gpu_shader_int16); |
| case EbtFloat16: |
| return extensionRequested(E_GL_AMD_gpu_shader_half_float); |
| default: |
| break; |
| } |
| return false; |
| case EbtUint16: |
| switch (from) { |
| case EbtInt16: |
| case EbtUint16: |
| return extensionRequested(E_GL_AMD_gpu_shader_int16); |
| default: |
| break; |
| } |
| return false; |
| default: |
| return false; |
| } |
| } |
| |
| return false; |
| } |
| |
| static bool canSignedIntTypeRepresentAllUnsignedValues(TBasicType sintType, TBasicType uintType) |
| { |
| #ifdef GLSLANG_WEB |
| return false; |
| #endif |
| |
| switch(sintType) { |
| case EbtInt8: |
| switch(uintType) { |
| case EbtUint8: |
| case EbtUint16: |
| case EbtUint: |
| case EbtUint64: |
| return false; |
| default: |
| assert(false); |
| return false; |
| } |
| break; |
| case EbtInt16: |
| switch(uintType) { |
| case EbtUint8: |
| return true; |
| case EbtUint16: |
| case EbtUint: |
| case EbtUint64: |
| return false; |
| default: |
| assert(false); |
| return false; |
| } |
| break; |
| case EbtInt: |
| switch(uintType) { |
| case EbtUint8: |
| case EbtUint16: |
| return true; |
| case EbtUint: |
| return false; |
| default: |
| assert(false); |
| return false; |
| } |
| break; |
| case EbtInt64: |
| switch(uintType) { |
| case EbtUint8: |
| case EbtUint16: |
| case EbtUint: |
| return true; |
| case EbtUint64: |
| return false; |
| default: |
| assert(false); |
| return false; |
| } |
| break; |
| default: |
| assert(false); |
| return false; |
| } |
| } |
| |
| |
| static TBasicType getCorrespondingUnsignedType(TBasicType type) |
| { |
| #ifdef GLSLANG_WEB |
| assert(type == EbtInt); |
| return EbtUint; |
| #endif |
| |
| switch(type) { |
| case EbtInt8: |
| return EbtUint8; |
| case EbtInt16: |
| return EbtUint16; |
| case EbtInt: |
| return EbtUint; |
| case EbtInt64: |
| return EbtUint64; |
| default: |
| assert(false); |
| return EbtNumTypes; |
| } |
| } |
| |
| // Implements the following rules |
| // - If either operand has type float64_t or derived from float64_t, |
| // the other shall be converted to float64_t or derived type. |
| // - Otherwise, if either operand has type float32_t or derived from |
| // float32_t, the other shall be converted to float32_t or derived type. |
| // - Otherwise, if either operand has type float16_t or derived from |
| // float16_t, the other shall be converted to float16_t or derived type. |
| // - Otherwise, if both operands have integer types the following rules |
| // shall be applied to the operands: |
| // - If both operands have the same type, no further conversion |
| // is needed. |
| // - Otherwise, if both operands have signed integer types or both |
| // have unsigned integer types, the operand with the type of lesser |
| // integer conversion rank shall be converted to the type of the |
| // operand with greater rank. |
| // - Otherwise, if the operand that has unsigned integer type has rank |
| // greater than or equal to the rank of the type of the other |
| // operand, the operand with signed integer type shall be converted |
| // to the type of the operand with unsigned integer type. |
| // - Otherwise, if the type of the operand with signed integer type can |
| // represent all of the values of the type of the operand with |
| // unsigned integer type, the operand with unsigned integer type |
| // shall be converted to the type of the operand with signed |
| // integer type. |
| // - Otherwise, both operands shall be converted to the unsigned |
| // integer type corresponding to the type of the operand with signed |
| // integer type. |
| |
| std::tuple<TBasicType, TBasicType> TIntermediate::getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const |
| { |
| TBasicType res0 = EbtNumTypes; |
| TBasicType res1 = EbtNumTypes; |
| |
| if ((isEsProfile() && |
| (version < 310 || !extensionRequested(E_GL_EXT_shader_implicit_conversions))) || |
| version == 110) |
| return std::make_tuple(res0, res1); |
| |
| if (getSource() == EShSourceHlsl) { |
| if (canImplicitlyPromote(type1, type0, op)) { |
| res0 = type0; |
| res1 = type0; |
| } else if (canImplicitlyPromote(type0, type1, op)) { |
| res0 = type1; |
| res1 = type1; |
| } |
| return std::make_tuple(res0, res1); |
| } |
| |
| if ((type0 == EbtDouble && canImplicitlyPromote(type1, EbtDouble, op)) || |
| (type1 == EbtDouble && canImplicitlyPromote(type0, EbtDouble, op)) ) { |
| res0 = EbtDouble; |
| res1 = EbtDouble; |
| } else if ((type0 == EbtFloat && canImplicitlyPromote(type1, EbtFloat, op)) || |
| (type1 == EbtFloat && canImplicitlyPromote(type0, EbtFloat, op)) ) { |
| res0 = EbtFloat; |
| res1 = EbtFloat; |
| } else if ((type0 == EbtFloat16 && canImplicitlyPromote(type1, EbtFloat16, op)) || |
| (type1 == EbtFloat16 && canImplicitlyPromote(type0, EbtFloat16, op)) ) { |
| res0 = EbtFloat16; |
| res1 = EbtFloat16; |
| } else if (isTypeInt(type0) && isTypeInt(type1) && |
| (canImplicitlyPromote(type0, type1, op) || canImplicitlyPromote(type1, type0, op))) { |
| if ((isTypeSignedInt(type0) && isTypeSignedInt(type1)) || |
| (isTypeUnsignedInt(type0) && isTypeUnsignedInt(type1))) { |
| if (getTypeRank(type0) < getTypeRank(type1)) { |
| res0 = type1; |
| res1 = type1; |
| } else { |
| res0 = type0; |
| res1 = type0; |
| } |
| } else if (isTypeUnsignedInt(type0) && (getTypeRank(type0) > getTypeRank(type1))) { |
| res0 = type0; |
| res1 = type0; |
| } else if (isTypeUnsignedInt(type1) && (getTypeRank(type1) > getTypeRank(type0))) { |
| res0 = type1; |
| res1 = type1; |
| } else if (isTypeSignedInt(type0)) { |
| if (canSignedIntTypeRepresentAllUnsignedValues(type0, type1)) { |
| res0 = type0; |
| res1 = type0; |
| } else { |
| res0 = getCorrespondingUnsignedType(type0); |
| res1 = getCorrespondingUnsignedType(type0); |
| } |
| } else if (isTypeSignedInt(type1)) { |
| if (canSignedIntTypeRepresentAllUnsignedValues(type1, type0)) { |
| res0 = type1; |
| res1 = type1; |
| } else { |
| res0 = getCorrespondingUnsignedType(type1); |
| res1 = getCorrespondingUnsignedType(type1); |
| } |
| } |
| } |
| |
| return std::make_tuple(res0, res1); |
| } |
| |
| // |
| // Given a type, find what operation would fully construct it. |
| // |
| TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const |
| { |
| TOperator op = EOpNull; |
| |
| if (type.getQualifier().isNonUniform()) |
| return EOpConstructNonuniform; |
| |
| if (type.isCoopMat()) |
| return EOpConstructCooperativeMatrix; |
| |
| switch (type.getBasicType()) { |
| case EbtStruct: |
| op = EOpConstructStruct; |
| break; |
| case EbtSampler: |
| if (type.getSampler().isCombined()) |
| op = EOpConstructTextureSampler; |
| break; |
| case EbtFloat: |
| if (type.isMatrix()) { |
| switch (type.getMatrixCols()) { |
| case 2: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructMat2x2; break; |
| case 3: op = EOpConstructMat2x3; break; |
| case 4: op = EOpConstructMat2x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 3: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructMat3x2; break; |
| case 3: op = EOpConstructMat3x3; break; |
| case 4: op = EOpConstructMat3x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 4: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructMat4x2; break; |
| case 3: op = EOpConstructMat4x3; break; |
| case 4: op = EOpConstructMat4x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| default: break; // some compilers want this |
| } |
| } else { |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructFloat; break; |
| case 2: op = EOpConstructVec2; break; |
| case 3: op = EOpConstructVec3; break; |
| case 4: op = EOpConstructVec4; break; |
| default: break; // some compilers want this |
| } |
| } |
| break; |
| case EbtInt: |
| if (type.getMatrixCols()) { |
| switch (type.getMatrixCols()) { |
| case 2: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructIMat2x2; break; |
| case 3: op = EOpConstructIMat2x3; break; |
| case 4: op = EOpConstructIMat2x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 3: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructIMat3x2; break; |
| case 3: op = EOpConstructIMat3x3; break; |
| case 4: op = EOpConstructIMat3x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 4: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructIMat4x2; break; |
| case 3: op = EOpConstructIMat4x3; break; |
| case 4: op = EOpConstructIMat4x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| } |
| } else { |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructInt; break; |
| case 2: op = EOpConstructIVec2; break; |
| case 3: op = EOpConstructIVec3; break; |
| case 4: op = EOpConstructIVec4; break; |
| default: break; // some compilers want this |
| } |
| } |
| break; |
| case EbtUint: |
| if (type.getMatrixCols()) { |
| switch (type.getMatrixCols()) { |
| case 2: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructUMat2x2; break; |
| case 3: op = EOpConstructUMat2x3; break; |
| case 4: op = EOpConstructUMat2x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 3: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructUMat3x2; break; |
| case 3: op = EOpConstructUMat3x3; break; |
| case 4: op = EOpConstructUMat3x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 4: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructUMat4x2; break; |
| case 3: op = EOpConstructUMat4x3; break; |
| case 4: op = EOpConstructUMat4x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| } |
| } else { |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructUint; break; |
| case 2: op = EOpConstructUVec2; break; |
| case 3: op = EOpConstructUVec3; break; |
| case 4: op = EOpConstructUVec4; break; |
| default: break; // some compilers want this |
| } |
| } |
| break; |
| case EbtBool: |
| if (type.getMatrixCols()) { |
| switch (type.getMatrixCols()) { |
| case 2: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructBMat2x2; break; |
| case 3: op = EOpConstructBMat2x3; break; |
| case 4: op = EOpConstructBMat2x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 3: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructBMat3x2; break; |
| case 3: op = EOpConstructBMat3x3; break; |
| case 4: op = EOpConstructBMat3x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 4: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructBMat4x2; break; |
| case 3: op = EOpConstructBMat4x3; break; |
| case 4: op = EOpConstructBMat4x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| } |
| } else { |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructBool; break; |
| case 2: op = EOpConstructBVec2; break; |
| case 3: op = EOpConstructBVec3; break; |
| case 4: op = EOpConstructBVec4; break; |
| default: break; // some compilers want this |
| } |
| } |
| break; |
| #ifndef GLSLANG_WEB |
| case EbtDouble: |
| if (type.getMatrixCols()) { |
| switch (type.getMatrixCols()) { |
| case 2: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructDMat2x2; break; |
| case 3: op = EOpConstructDMat2x3; break; |
| case 4: op = EOpConstructDMat2x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 3: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructDMat3x2; break; |
| case 3: op = EOpConstructDMat3x3; break; |
| case 4: op = EOpConstructDMat3x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 4: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructDMat4x2; break; |
| case 3: op = EOpConstructDMat4x3; break; |
| case 4: op = EOpConstructDMat4x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| } |
| } else { |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructDouble; break; |
| case 2: op = EOpConstructDVec2; break; |
| case 3: op = EOpConstructDVec3; break; |
| case 4: op = EOpConstructDVec4; break; |
| default: break; // some compilers want this |
| } |
| } |
| break; |
| case EbtFloat16: |
| if (type.getMatrixCols()) { |
| switch (type.getMatrixCols()) { |
| case 2: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructF16Mat2x2; break; |
| case 3: op = EOpConstructF16Mat2x3; break; |
| case 4: op = EOpConstructF16Mat2x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 3: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructF16Mat3x2; break; |
| case 3: op = EOpConstructF16Mat3x3; break; |
| case 4: op = EOpConstructF16Mat3x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case 4: |
| switch (type.getMatrixRows()) { |
| case 2: op = EOpConstructF16Mat4x2; break; |
| case 3: op = EOpConstructF16Mat4x3; break; |
| case 4: op = EOpConstructF16Mat4x4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| } |
| } |
| else { |
| switch (type.getVectorSize()) { |
| case 1: op = EOpConstructFloat16; break; |
| case 2: op = EOpConstructF16Vec2; break; |
| case 3: op = EOpConstructF16Vec3; break; |
| case 4: op = EOpConstructF16Vec4; break; |
| default: break; // some compilers want this |
| } |
| } |
| break; |
| case EbtInt8: |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructInt8; break; |
| case 2: op = EOpConstructI8Vec2; break; |
| case 3: op = EOpConstructI8Vec3; break; |
| case 4: op = EOpConstructI8Vec4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case EbtUint8: |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructUint8; break; |
| case 2: op = EOpConstructU8Vec2; break; |
| case 3: op = EOpConstructU8Vec3; break; |
| case 4: op = EOpConstructU8Vec4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case EbtInt16: |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructInt16; break; |
| case 2: op = EOpConstructI16Vec2; break; |
| case 3: op = EOpConstructI16Vec3; break; |
| case 4: op = EOpConstructI16Vec4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case EbtUint16: |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructUint16; break; |
| case 2: op = EOpConstructU16Vec2; break; |
| case 3: op = EOpConstructU16Vec3; break; |
| case 4: op = EOpConstructU16Vec4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case EbtInt64: |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructInt64; break; |
| case 2: op = EOpConstructI64Vec2; break; |
| case 3: op = EOpConstructI64Vec3; break; |
| case 4: op = EOpConstructI64Vec4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case EbtUint64: |
| switch(type.getVectorSize()) { |
| case 1: op = EOpConstructUint64; break; |
| case 2: op = EOpConstructU64Vec2; break; |
| case 3: op = EOpConstructU64Vec3; break; |
| case 4: op = EOpConstructU64Vec4; break; |
| default: break; // some compilers want this |
| } |
| break; |
| case EbtReference: |
| op = EOpConstructReference; |
| break; |
| #endif |
| default: |
| break; |
| } |
| |
| return op; |
| } |
| |
| // |
| // Safe way to combine two nodes into an aggregate. Works with null pointers, |
| // a node that's not a aggregate yet, etc. |
| // |
| // Returns the resulting aggregate, unless nullptr was passed in for |
| // both existing nodes. |
| // |
| TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right) |
| { |
| if (left == nullptr && right == nullptr) |
| return nullptr; |
| |
| TIntermAggregate* aggNode = nullptr; |
| if (left != nullptr) |
| aggNode = left->getAsAggregate(); |
| if (aggNode == nullptr || aggNode->getOp() != EOpNull) { |
| aggNode = new TIntermAggregate; |
| if (left != nullptr) |
| aggNode->getSequence().push_back(left); |
| } |
| |
| if (right != nullptr) |
| aggNode->getSequence().push_back(right); |
| |
| return aggNode; |
| } |
| |
| TIntermAggregate* TIntermediate::growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc& loc) |
| { |
| TIntermAggregate* aggNode = growAggregate(left, right); |
| if (aggNode) |
| aggNode->setLoc(loc); |
| |
| return aggNode; |
| } |
| |
| // |
| // Turn an existing node into an aggregate. |
| // |
| // Returns an aggregate, unless nullptr was passed in for the existing node. |
| // |
| TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node) |
| { |
| if (node == nullptr) |
| return nullptr; |
| |
| TIntermAggregate* aggNode = new TIntermAggregate; |
| aggNode->getSequence().push_back(node); |
| aggNode->setLoc(node->getLoc()); |
| |
| return aggNode; |
| } |
| |
| TIntermAggregate* TIntermediate::makeAggregate(TIntermNode* node, const TSourceLoc& loc) |
| { |
| if (node == nullptr) |
| return nullptr; |
| |
| TIntermAggregate* aggNode = new TIntermAggregate; |
| aggNode->getSequence().push_back(node); |
| aggNode->setLoc(loc); |
| |
| return aggNode; |
| } |
| |
| // |
| // Make an aggregate with an empty sequence. |
| // |
| TIntermAggregate* TIntermediate::makeAggregate(const TSourceLoc& loc) |
| { |
| TIntermAggregate* aggNode = new TIntermAggregate; |
| aggNode->setLoc(loc); |
| |
| return aggNode; |
| } |
| |
| // |
| // For "if" test nodes. There are three children; a condition, |
| // a true path, and a false path. The two paths are in the |
| // nodePair. |
| // |
| // Returns the selection node created. |
| // |
| TIntermSelection* TIntermediate::addSelection(TIntermTyped* cond, TIntermNodePair nodePair, const TSourceLoc& loc) |
| { |
| // |
| // Don't prune the false path for compile-time constants; it's needed |
| // for static access analysis. |
| // |
| |
| TIntermSelection* node = new TIntermSelection(cond, nodePair.node1, nodePair.node2); |
| node->setLoc(loc); |
| |
| return node; |
| } |
| |
| TIntermTyped* TIntermediate::addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc) |
| { |
| // However, the lowest precedence operators of the sequence operator ( , ) and the assignment operators |
| // ... are not included in the operators that can create a constant expression. |
| // |
| // if (left->getType().getQualifier().storage == EvqConst && |
| // right->getType().getQualifier().storage == EvqConst) { |
| |
| // return right; |
| //} |
| |
| TIntermTyped *commaAggregate = growAggregate(left, right, loc); |
| commaAggregate->getAsAggregate()->setOperator(EOpComma); |
| commaAggregate->setType(right->getType()); |
| commaAggregate->getWritableType().getQualifier().makeTemporary(); |
| |
| return commaAggregate; |
| } |
| |
| TIntermTyped* TIntermediate::addMethod(TIntermTyped* object, const TType& type, const TString* name, const TSourceLoc& loc) |
| { |
| TIntermMethod* method = new TIntermMethod(object, type, *name); |
| method->setLoc(loc); |
| |
| return method; |
| } |
| |
| // |
| // For "?:" test nodes. There are three children; a condition, |
| // a true path, and a false path. The two paths are specified |
| // as separate parameters. For vector 'cond', the true and false |
| // are not paths, but vectors to mix. |
| // |
| // Specialization constant operations include |
| // - The ternary operator ( ? : ) |
| // |
| // Returns the selection node created, or nullptr if one could not be. |
| // |
| TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock, |
| const TSourceLoc& loc) |
| { |
| // If it's void, go to the if-then-else selection() |
| if (trueBlock->getBasicType() == EbtVoid && falseBlock->getBasicType() == EbtVoid) { |
| TIntermNodePair pair = { trueBlock, falseBlock }; |
| TIntermSelection* selection = addSelection(cond, pair, loc); |
| if (getSource() == EShSourceHlsl) |
| selection->setNoShortCircuit(); |
| |
| return selection; |
| } |
| |
| // |
| // Get compatible types. |
| // |
| auto children = addConversion(EOpSequence, trueBlock, falseBlock); |
| trueBlock = std::get<0>(children); |
| falseBlock = std::get<1>(children); |
| |
| if (trueBlock == nullptr || falseBlock == nullptr) |
| return nullptr; |
| |
| // Handle a vector condition as a mix |
| if (!cond->getType().isScalarOrVec1()) { |
| TType targetVectorType(trueBlock->getType().getBasicType(), EvqTemporary, |
| cond->getType().getVectorSize()); |
| // smear true/false operands as needed |
| trueBlock = addUniShapeConversion(EOpMix, targetVectorType, trueBlock); |
| falseBlock = addUniShapeConversion(EOpMix, targetVectorType, falseBlock); |
| |
| // After conversion, types have to match. |
| if (falseBlock->getType() != trueBlock->getType()) |
| return nullptr; |
| |
| // make the mix operation |
| TIntermAggregate* mix = makeAggregate(loc); |
| mix = growAggregate(mix, falseBlock); |
| mix = growAggregate(mix, trueBlock); |
| mix = growAggregate(mix, cond); |
| mix->setType(targetVectorType); |
| mix->setOp(EOpMix); |
| |
| return mix; |
| } |
| |
| // Now have a scalar condition... |
| |
| // Convert true and false expressions to matching types |
| addBiShapeConversion(EOpMix, trueBlock, falseBlock); |
| |
| // After conversion, types have to match. |
| if (falseBlock->getType() != trueBlock->getType()) |
| return nullptr; |
| |
| // Eliminate the selection when the condition is a scalar and all operands are constant. |
| if (cond->getAsConstantUnion() && trueBlock->getAsConstantUnion() && falseBlock->getAsConstantUnion()) { |
| if (cond->getAsConstantUnion()->getConstArray()[0].getBConst()) |
| return trueBlock; |
| else |
| return falseBlock; |
| } |
| |
| // |
| // Make a selection node. |
| // |
| TIntermSelection* node = new TIntermSelection(cond, trueBlock, falseBlock, trueBlock->getType()); |
| node->setLoc(loc); |
| node->getQualifier().precision = std::max(trueBlock->getQualifier().precision, falseBlock->getQualifier().precision); |
| |
| if ((cond->getQualifier().isConstant() && specConstantPropagates(*trueBlock, *falseBlock)) || |
| (cond->getQualifier().isSpecConstant() && trueBlock->getQualifier().isConstant() && |
| falseBlock->getQualifier().isConstant())) |
| node->getQualifier().makeSpecConstant(); |
| else |
| node->getQualifier().makeTemporary(); |
| |
| if (getSource() == EShSourceHlsl) |
| node->setNoShortCircuit(); |
| |
| return node; |
| } |
| |
| // |
| // Constant terminal nodes. Has a union that contains bool, float or int constants |
| // |
| // Returns the constant union node created. |
| // |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(const TConstUnionArray& unionArray, const TType& t, const TSourceLoc& loc, bool literal) const |
| { |
| TIntermConstantUnion* node = new TIntermConstantUnion(unionArray, t); |
| node->getQualifier().storage = EvqConst; |
| node->setLoc(loc); |
| if (literal) |
| node->setLiteral(); |
| |
| return node; |
| } |
| TIntermConstantUnion* TIntermediate::addConstantUnion(signed char i8, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setI8Const(i8); |
| |
| return addConstantUnion(unionArray, TType(EbtInt8, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned char u8, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setUConst(u8); |
| |
| return addConstantUnion(unionArray, TType(EbtUint8, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(signed short i16, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setI16Const(i16); |
| |
| return addConstantUnion(unionArray, TType(EbtInt16, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned short u16, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setU16Const(u16); |
| |
| return addConstantUnion(unionArray, TType(EbtUint16, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(int i, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setIConst(i); |
| |
| return addConstantUnion(unionArray, TType(EbtInt, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned int u, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setUConst(u); |
| |
| return addConstantUnion(unionArray, TType(EbtUint, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(long long i64, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setI64Const(i64); |
| |
| return addConstantUnion(unionArray, TType(EbtInt64, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(unsigned long long u64, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setU64Const(u64); |
| |
| return addConstantUnion(unionArray, TType(EbtUint64, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(bool b, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setBConst(b); |
| |
| return addConstantUnion(unionArray, TType(EbtBool, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(double d, TBasicType baseType, const TSourceLoc& loc, bool literal) const |
| { |
| assert(baseType == EbtFloat || baseType == EbtDouble || baseType == EbtFloat16); |
| |
| TConstUnionArray unionArray(1); |
| unionArray[0].setDConst(d); |
| |
| return addConstantUnion(unionArray, TType(baseType, EvqConst), loc, literal); |
| } |
| |
| TIntermConstantUnion* TIntermediate::addConstantUnion(const TString* s, const TSourceLoc& loc, bool literal) const |
| { |
| TConstUnionArray unionArray(1); |
| unionArray[0].setSConst(s); |
| |
| return addConstantUnion(unionArray, TType(EbtString, EvqConst), loc, literal); |
| } |
| |
| // Put vector swizzle selectors onto the given sequence |
| void TIntermediate::pushSelector(TIntermSequence& sequence, const TVectorSelector& selector, const TSourceLoc& loc) |
| { |
| TIntermConstantUnion* constIntNode = addConstantUnion(selector, loc); |
| sequence.push_back(constIntNode); |
| } |
| |
| // Put matrix swizzle selectors onto the given sequence |
| void TIntermediate::pushSelector(TIntermSequence& sequence, const TMatrixSelector& selector, const TSourceLoc& loc) |
| { |
| TIntermConstantUnion* constIntNode = addConstantUnion(selector.coord1, loc); |
| sequence.push_back(constIntNode); |
| constIntNode = addConstantUnion(selector.coord2, loc); |
| sequence.push_back(constIntNode); |
| } |
| |
| // Make an aggregate node that has a sequence of all selectors. |
| template TIntermTyped* TIntermediate::addSwizzle<TVectorSelector>(TSwizzleSelectors<TVectorSelector>& selector, const TSourceLoc& loc); |
| template TIntermTyped* TIntermediate::addSwizzle<TMatrixSelector>(TSwizzleSelectors<TMatrixSelector>& selector, const TSourceLoc& loc); |
| template<typename selectorType> |
| TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors<selectorType>& selector, const TSourceLoc& loc) |
| { |
| TIntermAggregate* node = new TIntermAggregate(EOpSequence); |
| |
| node->setLoc(loc); |
| TIntermSequence &sequenceVector = node->getSequence(); |
| |
| for (int i = 0; i < selector.size(); i++) |
| pushSelector(sequenceVector, selector[i], loc); |
| |
| return node; |
| } |
| |
| // |
| // Follow the left branches down to the root of an l-value |
| // expression (just "." and []). |
| // |
| // Return the base of the l-value (where following indexing quits working). |
| // Return nullptr if a chain following dereferences cannot be followed. |
| // |
| // 'swizzleOkay' says whether or not it is okay to consider a swizzle |
| // a valid part of the dereference chain. |
| // |
| const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay) |
| { |
| do { |
| const TIntermBinary* binary = node->getAsBinaryNode(); |
| if (binary == nullptr) |
| return node; |
| TOperator op = binary->getOp(); |
| if (op != EOpIndexDirect && op != EOpIndexIndirect && op != EOpIndexDirectStruct && op != EOpVectorSwizzle && op != EOpMatrixSwizzle) |
| return nullptr; |
| if (! swizzleOkay) { |
| if (op == EOpVectorSwizzle || op == EOpMatrixSwizzle) |
| return nullptr; |
| if ((op == EOpIndexDirect || op == EOpIndexIndirect) && |
| (binary->getLeft()->getType().isVector() || binary->getLeft()->getType().isScalar()) && |
| ! binary->getLeft()->getType().isArray()) |
| return nullptr; |
| } |
| node = node->getAsBinaryNode()->getLeft(); |
| } while (true); |
| } |
| |
| // |
| // Create while and do-while loop nodes. |
| // |
| TIntermLoop* TIntermediate::addLoop(TIntermNode* body, TIntermTyped* test, TIntermTyped* terminal, bool testFirst, |
| const TSourceLoc& loc) |
| { |
| TIntermLoop* node = new TIntermLoop(body, test, terminal, testFirst); |
| node->setLoc(loc); |
| |
| return node; |
| } |
| |
| // |
| // Create a for-loop sequence. |
| // |
| TIntermAggregate* TIntermediate::addForLoop(TIntermNode* body, TIntermNode* initializer, TIntermTyped* test, |
| TIntermTyped* terminal, bool testFirst, const TSourceLoc& loc, TIntermLoop*& node) |
| { |
| node = new TIntermLoop(body, test, terminal, testFirst); |
| node->setLoc(loc); |
| |
| // make a sequence of the initializer and statement, but try to reuse the |
| // aggregate already created for whatever is in the initializer, if there is one |
| TIntermAggregate* loopSequence = (initializer == nullptr || |
| initializer->getAsAggregate() == nullptr) ? makeAggregate(initializer, loc) |
| : initializer->getAsAggregate(); |
| if (loopSequence != nullptr && loopSequence->getOp() == EOpSequence) |
| loopSequence->setOp(EOpNull); |
| loopSequence = growAggregate(loopSequence, node); |
| loopSequence->setOperator(EOpSequence); |
| |
| return loopSequence; |
| } |
| |
| // |
| // Add branches. |
| // |
| TIntermBranch* TIntermediate::addBranch(TOperator branchOp, const TSourceLoc& loc) |
| { |
| return addBranch(branchOp, nullptr, loc); |
| } |
| |
| TIntermBranch* TIntermediate::addBranch(TOperator branchOp, TIntermTyped* expression, const TSourceLoc& loc) |
| { |
| TIntermBranch* node = new TIntermBranch(branchOp, expression); |
| node->setLoc(loc); |
| |
| return node; |
| } |
| |
| // Propagate precision from formal function return type to actual return type, |
| // and on to its subtree. |
| void TIntermBranch::updatePrecision(TPrecisionQualifier parentPrecision) |
| { |
| TIntermTyped* exp = getExpression(); |
| if (exp == nullptr) |
| return; |
| |
| if (exp->getBasicType() == EbtInt || exp->getBasicType() == EbtUint || |
| exp->getBasicType() == EbtFloat || exp->getBasicType() == EbtFloat16) { |
| if (parentPrecision != EpqNone && exp->getQualifier().precision == EpqNone) { |
| exp->propagatePrecision(parentPrecision); |
| } |
| } |
| } |
| |
| // |
| // This is to be executed after the final root is put on top by the parsing |
| // process. |
| // |
| bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/) |
| { |
| if (root == nullptr) |
| return true; |
| |
| // Finish off the top-level sequence |
| TIntermAggregate* aggRoot = root->getAsAggregate(); |
| if (aggRoot && aggRoot->getOp() == EOpNull) |
| aggRoot->setOperator(EOpSequence); |
| |
| #ifndef GLSLANG_WEB |
|