Merge pull request #21651 from compnerd/parse-ordering
Parse: explicitly state ordering of parsing (NFC)
diff --git a/cmake/modules/AddSwift.cmake b/cmake/modules/AddSwift.cmake
index e8862af..18f844d 100644
--- a/cmake/modules/AddSwift.cmake
+++ b/cmake/modules/AddSwift.cmake
@@ -2330,31 +2330,28 @@
endmacro()
function(add_swift_host_tool executable)
- set(ADDSWIFTHOSTTOOL_multiple_parameter_options
- SWIFT_COMPONENT)
+ set(options)
+ set(single_parameter_options SWIFT_COMPONENT)
+ set(multiple_parameter_options)
- cmake_parse_arguments(
- ADDSWIFTHOSTTOOL # prefix
- "" # options
- "" # single-value args
- "${ADDSWIFTHOSTTOOL_multiple_parameter_options}" # multi-value args
- ${ARGN})
+ cmake_parse_arguments(ASHT
+ "${options}"
+ "${single_parameter_options}"
+ "${multiple_parameter_options}"
+ ${ARGN})
- precondition(ADDSWIFTHOSTTOOL_SWIFT_COMPONENT
+ precondition(ASHT_SWIFT_COMPONENT
MESSAGE "Swift Component is required to add a host tool")
# Create the executable rule.
- add_swift_executable(
- ${executable}
- ${ADDSWIFTHOSTTOOL_UNPARSED_ARGUMENTS}
- )
+ add_swift_executable(${executable}
+ ${ASHT_UNPARSED_ARGUMENTS})
- swift_install_in_component(${ADDSWIFTHOSTTOOL_SWIFT_COMPONENT}
+ swift_install_in_component(${ASHT_SWIFT_COMPONENT}
TARGETS ${executable}
RUNTIME DESTINATION bin)
- swift_is_installing_component(${ADDSWIFTHOSTTOOL_SWIFT_COMPONENT}
- is_installing)
+ swift_is_installing_component(${ASHT_SWIFT_COMPONENT} is_installing)
if(NOT is_installing)
set_property(GLOBAL APPEND PROPERTY SWIFT_BUILDTREE_EXPORTS ${executable})
diff --git a/include/swift/SIL/SILConstants.h b/include/swift/SIL/SILConstants.h
index a6c9a29..6fc7913 100644
--- a/include/swift/SIL/SILConstants.h
+++ b/include/swift/SIL/SILConstants.h
@@ -268,6 +268,9 @@
SymbolicValueMemoryObject *
getAddressValue(SmallVectorImpl<unsigned> &accessPath) const;
+ /// Return just the memory object for an address value.
+ SymbolicValueMemoryObject *getAddressValueMemoryObject() const;
+
//===--------------------------------------------------------------------===//
// Helpers
@@ -312,6 +315,24 @@
static SymbolicValueMemoryObject *create(Type type, SymbolicValue value,
ASTContext &astContext);
+ /// Given that this memory object contains an aggregate value like
+ /// {{1, 2}, 3}, and given an access path like [0,1], return the indexed
+ /// element, e.g. "2" in this case.
+ ///
+ /// Returns uninit memory if the access path points at or into uninit memory.
+ ///
+ /// Precondition: The access path must be valid for this memory object's type.
+ SymbolicValue getIndexedElement(ArrayRef<unsigned> accessPath);
+
+ /// Given that this memory object contains an aggregate value like
+ /// {{1, 2}, 3}, given an access path like [0,1], and given a new element like
+ /// "4", set the indexed element to the specified scalar, producing {{1, 4},
+ /// 3} in this case.
+ ///
+ /// Precondition: The access path must be valid for this memory object's type.
+ void setIndexedElement(ArrayRef<unsigned> accessPath,
+ SymbolicValue newElement, ASTContext &astCtx);
+
private:
const Type type;
SymbolicValue value;
diff --git a/lib/AST/GenericSignatureBuilder.cpp b/lib/AST/GenericSignatureBuilder.cpp
index 67c70df..e1d1723 100644
--- a/lib/AST/GenericSignatureBuilder.cpp
+++ b/lib/AST/GenericSignatureBuilder.cpp
@@ -6835,6 +6835,14 @@
auto locA = (*a)->constraint.source->getLoc();
auto locB = (*b)->constraint.source->getLoc();
+ // Put invalid locations after valid ones.
+ if (locA.isInvalid() || locB.isInvalid()) {
+ if (locA.isInvalid() != locB.isInvalid())
+ return locA.isInvalid() ? 1 : -1;
+
+ return 0;
+ }
+
auto bufferA = sourceMgr.findBufferContainingLoc(locA);
auto bufferB = sourceMgr.findBufferContainingLoc(locB);
diff --git a/lib/SIL/SILConstants.cpp b/lib/SIL/SILConstants.cpp
index 2666f45..94ea0dc 100644
--- a/lib/SIL/SILConstants.cpp
+++ b/lib/SIL/SILConstants.cpp
@@ -404,6 +404,14 @@
return dav->memoryObject;
}
+/// Return just the memory object for an address value.
+SymbolicValueMemoryObject *SymbolicValue::getAddressValueMemoryObject() const {
+ if (representationKind == RK_DirectAddress)
+ return value.directAddress;
+ assert(representationKind == RK_DerivedAddress);
+ return value.derivedAddress->memoryObject;
+}
+
//===----------------------------------------------------------------------===//
// Higher level code
//===----------------------------------------------------------------------===//
@@ -521,3 +529,124 @@
emittedFirstNote = true;
}
}
+
+/// Returns the element of `aggregate` specified by the access path.
+///
+/// This is a helper for `SymbolicValueMemoryObject::getIndexedElement`. See
+/// there for more detailed documentation.
+static SymbolicValue getIndexedElement(SymbolicValue aggregate,
+ ArrayRef<unsigned> accessPath,
+ Type type) {
+ // We're done if we've run out of access path.
+ if (accessPath.empty())
+ return aggregate;
+
+ // Everything inside uninit memory is uninit memory.
+ if (aggregate.getKind() == SymbolicValue::UninitMemory)
+ return SymbolicValue::getUninitMemory();
+
+ assert(aggregate.getKind() == SymbolicValue::Aggregate &&
+ "the accessPath is invalid for this type");
+
+ unsigned elementNo = accessPath.front();
+
+ SymbolicValue elt = aggregate.getAggregateValue()[elementNo];
+ Type eltType;
+ if (auto *decl = type->getStructOrBoundGenericStruct()) {
+ auto it = decl->getStoredProperties().begin();
+ std::advance(it, elementNo);
+ eltType = (*it)->getType();
+ } else if (auto tuple = type->getAs<TupleType>()) {
+ assert(elementNo < tuple->getNumElements() && "invalid index");
+ eltType = tuple->getElement(elementNo).getType();
+ } else {
+ llvm_unreachable("the accessPath is invalid for this type");
+ }
+
+ return getIndexedElement(elt, accessPath.drop_front(), eltType);
+}
+
+/// Given that this memory object contains an aggregate value like
+/// {{1, 2}, 3}, and given an access path like [0,1], return the indexed
+/// element, e.g. "2" in this case.
+///
+/// Returns uninit memory if the access path points at or into uninit memory.
+///
+/// Precondition: The access path must be valid for this memory object's type.
+SymbolicValue
+SymbolicValueMemoryObject::getIndexedElement(ArrayRef<unsigned> accessPath) {
+ return ::getIndexedElement(value, accessPath, type);
+}
+
+/// Returns `aggregate` with the element specified by the access path set to
+/// `newElement`.
+///
+/// This is a helper for `SymbolicValueMemoryObject::setIndexedElement`. See
+/// there for more detailed documentation.
+static SymbolicValue setIndexedElement(SymbolicValue aggregate,
+ ArrayRef<unsigned> accessPath,
+ SymbolicValue newElement, Type type,
+ ASTContext &astCtx) {
+ // We're done if we've run out of access path.
+ if (accessPath.empty())
+ return newElement;
+
+ // If we have an uninit memory, then scalarize it into an aggregate to
+ // continue. This happens when memory objects are initialized piecewise.
+ if (aggregate.getKind() == SymbolicValue::UninitMemory) {
+ unsigned numMembers;
+ // We need to have either a struct or a tuple type.
+ if (auto *decl = type->getStructOrBoundGenericStruct()) {
+ numMembers = std::distance(decl->getStoredProperties().begin(),
+ decl->getStoredProperties().end());
+ } else if (auto tuple = type->getAs<TupleType>()) {
+ numMembers = tuple->getNumElements();
+ } else {
+ llvm_unreachable("the accessPath is invalid for this type");
+ }
+
+ SmallVector<SymbolicValue, 4> newElts(numMembers,
+ SymbolicValue::getUninitMemory());
+ aggregate = SymbolicValue::getAggregate(newElts, astCtx);
+ }
+
+ assert(aggregate.getKind() == SymbolicValue::Aggregate &&
+ "the accessPath is invalid for this type");
+
+ unsigned elementNo = accessPath.front();
+
+ ArrayRef<SymbolicValue> oldElts = aggregate.getAggregateValue();
+ Type eltType;
+ if (auto *decl = type->getStructOrBoundGenericStruct()) {
+ auto it = decl->getStoredProperties().begin();
+ std::advance(it, elementNo);
+ eltType = (*it)->getType();
+ } else if (auto tuple = type->getAs<TupleType>()) {
+ assert(elementNo < tuple->getNumElements() && "invalid index");
+ eltType = tuple->getElement(elementNo).getType();
+ } else {
+ llvm_unreachable("the accessPath is invalid for this type");
+ }
+
+ // Update the indexed element of the aggregate.
+ SmallVector<SymbolicValue, 4> newElts(oldElts.begin(), oldElts.end());
+ newElts[elementNo] = setIndexedElement(newElts[elementNo],
+ accessPath.drop_front(), newElement,
+ eltType, astCtx);
+
+ aggregate = SymbolicValue::getAggregate(newElts, astCtx);
+
+ return aggregate;
+}
+
+/// Given that this memory object contains an aggregate value like
+/// {{1, 2}, 3}, given an access path like [0,1], and given a new element like
+/// "4", set the indexed element to the specified scalar, producing {{1, 4},
+/// 3} in this case.
+///
+/// Precondition: The access path must be valid for this memory object's type.
+void SymbolicValueMemoryObject::setIndexedElement(
+ ArrayRef<unsigned> accessPath, SymbolicValue newElement,
+ ASTContext &astCtx) {
+ value = ::setIndexedElement(value, accessPath, newElement, type, astCtx);
+}
diff --git a/lib/SILOptimizer/Transforms/PerformanceInliner.cpp b/lib/SILOptimizer/Transforms/PerformanceInliner.cpp
index 39ff21e..c34a123 100644
--- a/lib/SILOptimizer/Transforms/PerformanceInliner.cpp
+++ b/lib/SILOptimizer/Transforms/PerformanceInliner.cpp
@@ -105,7 +105,7 @@
/// The benefit of inlining an exclusivity-containing callee.
/// The exclusivity needs to be: dynamic,
/// has no nested conflict and addresses known storage
- ExclusivityBenefit = RemovedCallBenefit + 300,
+ ExclusivityBenefit = RemovedCallBenefit + 125,
/// The benefit of inlining class methods with -Osize.
/// We only inline very small class methods with -Osize.
diff --git a/lib/SILOptimizer/Utils/ConstExpr.cpp b/lib/SILOptimizer/Utils/ConstExpr.cpp
index aa900c4..57d6a84 100644
--- a/lib/SILOptimizer/Utils/ConstExpr.cpp
+++ b/lib/SILOptimizer/Utils/ConstExpr.cpp
@@ -20,6 +20,7 @@
#include "swift/SIL/FormalLinkage.h"
#include "swift/SIL/SILBuilder.h"
#include "swift/SIL/SILConstants.h"
+#include "swift/SILOptimizer/Utils/Devirtualize.h"
#include "swift/Serialization/SerializedSILLoader.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/Support/TrailingObjects.h"
@@ -59,9 +60,6 @@
/// substitutionMap specifies a mapping from all of the protocol and type
/// requirements in the generic signature down to concrete conformances and
/// concrete types.
- /// TODO(constexpr patch): I have intentionally included this even though it's
- /// unused, so that I don't have to add it back to all the function signatures
- /// when I start using it.
SubstitutionMap substitutionMap;
/// This keeps track of the number of instructions we've evaluated. If this
@@ -117,10 +115,15 @@
llvm::Optional<SymbolicValue> computeOpaqueCallResult(ApplyInst *apply,
SILFunction *callee);
+ SymbolicValue getSingleWriterAddressValue(SILValue addr);
SymbolicValue getConstAddrAndLoadResult(SILValue addr);
SymbolicValue loadAddrValue(SILValue addr, SymbolicValue addrVal);
llvm::Optional<SymbolicValue> computeFSStore(SymbolicValue storedCst,
SILValue dest);
+
+private:
+ llvm::Optional<SymbolicValue>
+ initializeAddressFromSingleWriter(SILValue addr);
};
} // end anonymous namespace
@@ -134,6 +137,14 @@
SymbolicValue ConstExprFunctionState::computeConstantValue(SILValue value) {
assert(!calculatedValues.count(value));
+ // If the client is asking for the value of a stack object that hasn't been
+ // computed, and if fn is null, then we are in top level code, and the
+ // stack object must be a single store value. Since this is a very different
+ // computation, split it out to its own path.
+ if (!fn && value->getType().isAddress() && isa<AllocStackInst>(value)) {
+ return getSingleWriterAddressValue(value);
+ }
+
// If this a trivial constant instruction that we can handle, then fold it
// immediately.
if (auto *ili = dyn_cast<IntegerLiteralInst>(value))
@@ -221,6 +232,27 @@
if (auto li = dyn_cast<LoadInst>(value))
return getConstAddrAndLoadResult(li->getOperand());
+ // Try to resolve a witness method against our known conformances.
+ if (auto *wmi = dyn_cast<WitnessMethodInst>(value)) {
+ auto confResult = substitutionMap.lookupConformance(
+ wmi->getLookupType(), wmi->getConformance().getRequirement());
+ if (!confResult)
+ return evaluator.getUnknown(value, UnknownReason::Default);
+ auto conf = confResult.getValue();
+ auto &module = wmi->getModule();
+
+ // Look up the conformance's witness table and the member out of it.
+ SILFunction *fn =
+ module.lookUpFunctionInWitnessTable(conf, wmi->getMember()).first;
+ // If we were able to resolve it, then we can proceed.
+ if (fn)
+ return SymbolicValue::getFunction(fn);
+
+ LLVM_DEBUG(llvm::dbgs()
+ << "ConstExpr Unresolved witness: " << *value << "\n");
+ return evaluator.getUnknown(value, UnknownReason::Default);
+ }
+
if (auto *builtin = dyn_cast<BuiltinInst>(value))
return computeConstantValueBuiltin(builtin);
@@ -519,11 +551,66 @@
paramConstants.push_back(argValue);
}
- // TODO(constexpr patch): This is currently unused, so we don't need to
- // calculate the correct value. Eventually, include code that calculates the
- // correct value.
+ // If we reached an external function that hasn't been deserialized yet, make
+ // sure to pull it in so we can see its body. If that fails, then we can't
+ // analyze the function.
+ if (callee->isExternalDeclaration()) {
+ callee->getModule().loadFunction(callee);
+ if (callee->isExternalDeclaration())
+ return computeOpaqueCallResult(apply, callee);
+ }
+
+ // Compute the substitution map for the callee, which maps from all of its
+ // generic requirements to concrete conformances and concrete types.
SubstitutionMap calleeSubMap;
+ auto calleeFnType = callee->getLoweredFunctionType();
+ assert(
+ !calleeFnType->hasSelfParam() ||
+ !calleeFnType->getSelfInstanceType()->getClassOrBoundGenericClass() &&
+ "class methods are not supported");
+ if (calleeFnType->getGenericSignature()) {
+ // Get the substitution map of the call. This maps from the callee's space
+ // into the caller's world. Witness methods require additional work to
+ // compute a mapping that is valid for the callee.
+ SubstitutionMap callSubMap;
+
+ if (calleeFnType->getRepresentation() ==
+ SILFunctionType::Representation::WitnessMethod) {
+ auto protocol =
+ calleeFnType->getWitnessMethodConformance().getRequirement();
+ // Compute a mapping that maps the Self type of the protocol given by
+ // 'requirement' to the concrete type available in the substitutionMap.
+ auto protoSelfToConcreteType =
+ apply->getSubstitutionMap().subst(substitutionMap);
+ // Get a concrete protocol conformance by using the mapping for the
+ // Self type of the requirement.
+ auto conf = protoSelfToConcreteType.lookupConformance(
+ protocol->getSelfInterfaceType()->getCanonicalType(), protocol);
+ if (!conf.hasValue())
+ return evaluator.getUnknown((SILInstruction *)apply,
+ UnknownReason::Default);
+
+ callSubMap = getWitnessMethodSubstitutions(
+ apply->getModule(), ApplySite(apply), callee, conf.getValue());
+
+ /// Remark: If we ever start to care about evaluating classes,
+ /// getSubstitutionsForCallee() is the analogous mapping function we
+ /// should use to get correct mapping from caller to callee namespace.
+ /// Ideally, the function must be renamed as
+ /// getClassMethodSubstitutions().
+ } else {
+ callSubMap = apply->getSubstitutionMap();
+ }
+
+ // The substitution map for the callee is the composition of the callers
+ // substitution map, which is always type/conformance to a concrete type
+ // or conformance, with the mapping introduced by the call itself. This
+ // ensures that the callee's substitution map can map from its type
+ // namespace back to concrete types and conformances.
+ calleeSubMap = callSubMap.subst(substitutionMap);
+ }
+
// Now that we have successfully folded all of the parameters, we can evaluate
// the call.
evaluator.pushCallStack(apply->getLoc().getSourceLoc());
@@ -572,84 +659,283 @@
return result;
}
-/// Given an aggregate value like {{1, 2}, 3} and an access path like [0,1], and
-/// a new element like 4, return the aggregate value with the indexed element
-/// replaced with the new element, producing {{1, 4}, 3} in this case.
-/// If `writeOnlyOnce` is true, and the target aggregate element to update
-/// already has a constant value, fail on the update.
+/// This is a helper function for `getSingleWriterAddressValue`. Callers should
+/// use `getSingleWriterAddressValue`.
///
-/// This returns true on failure and false on success.
+/// If `addr` has no writing uses, returns None.
///
-static bool updateIndexedElement(SymbolicValue &aggregate,
- ArrayRef<unsigned> indices,
- SymbolicValue newElement, Type type,
- bool writeOnlyOnce,
- ASTContext &astContext) {
- // We're done if we've run out of indices.
- if (indices.empty()) {
- aggregate = newElement;
- return false;
- }
+/// If the following conditions hold:
+/// * `addr` points at uninitialized memory;
+/// * there are write(s) to `addr` that, taken together, set the memory
+/// exactly once (e.g. a single "store" to `addr` OR multiple "store"s to
+/// different "tuple_element_addr"s of `addr`); and
+/// * the writes' value(s) can be const-evaluated;
+/// Then: initializes the memory at `addr` and returns None.
+///
+/// Otherwise, sets the memory at `addr` to an unknown SymbolicValue, and
+/// returns the unknown SymbolicValue.
+///
+/// Additional side effects: In all cases, this function might cache address
+/// values for `addr` and for addresses derived from `addr`.
+///
+/// Precondition: An address for `addr`, or an address that `addr` is derived
+/// from, must be cached in `computedValues`.
+llvm::Optional<SymbolicValue>
+ConstExprFunctionState::initializeAddressFromSingleWriter(SILValue addr) {
+ LLVM_DEBUG(llvm::dbgs() << "ConstExpr: initializeAddressFromSingleWriter "
+ << addr);
- // If we have an uninit memory, then scalarize it into an aggregate to
- // continue. This happens when memory objects are initialized piecewise.
- if (aggregate.getKind() == SymbolicValue::UninitMemory) {
- unsigned numMembers;
- // We need to have either a struct or a tuple type.
- if (auto *decl = type->getStructOrBoundGenericStruct()) {
- numMembers = std::distance(decl->getStoredProperties().begin(),
- decl->getStoredProperties().end());
- } else if (auto tuple = type->getAs<TupleType>()) {
- numMembers = tuple->getNumElements();
- } else {
- return true;
+ SmallVector<unsigned, 4> accessPath;
+ auto *memoryObject = getConstantValue(addr).getAddressValue(accessPath);
+
+ // If we detect instructions that initialize an aggregate piecewise, then we
+ // set this flag, which tells us to verify that the entire aggregate has been
+ // initialized.
+ bool mustCheckAggregateInitialized = false;
+
+ // Sets the pointed-at memory to `value`.
+ auto setMemoryValue = [&](SymbolicValue value) {
+ memoryObject->setIndexedElement(accessPath, value,
+ evaluator.getASTContext());
+ };
+
+ // Gets the pointed-at memory value.
+ auto getMemoryValue = [&]() -> SymbolicValue {
+ return memoryObject->getIndexedElement(accessPath);
+ };
+
+ // Does all error-condition side-effects, and returns the appropriate error
+ // result.
+ // Precondition: `unknown` must be an unknown SymbolicValue.
+ auto error = [&](SymbolicValue unknown) -> SymbolicValue {
+ assert(unknown.getKind() == SymbolicValue::Unknown);
+ setMemoryValue(unknown);
+ return unknown;
+ };
+
+ // Checks that the pointed-at aggregate is fully initialized.
+ // Precondition: The pointed-at memory value is uninit memory or an
+ // aggregate.
+ auto checkAggregateInitialized = [&]() -> bool {
+ auto memoryValue = getMemoryValue();
+ return memoryValue.getKind() != SymbolicValue::UninitMemory &&
+ llvm::all_of(memoryValue.getAggregateValue(),
+ [](SymbolicValue v) { return v.isConstant(); });
+ };
+
+ // Okay, check out all of the users of this value looking for semantic stores
+ // into the address. If we find more than one, then this was a var or
+ // something else we can't handle.
+ // We must iterate over all uses, to make sure there is a single initializer.
+ // The only permitted early exit is when we know for sure that we have failed.
+ for (auto *use : addr->getUses()) {
+ auto user = use->getUser();
+
+ // Ignore markers, loads, and other things that aren't stores to this stack
+ // value.
+ if (isa<LoadInst>(user) || isa<DeallocStackInst>(user) ||
+ isa<DestroyAddrInst>(user) || isa<DebugValueAddrInst>(user))
+ continue;
+
+ // TODO: Allow BeginAccess/EndAccess users.
+
+ // If this is a store *to* the memory, analyze the input value.
+ if (auto *si = dyn_cast<StoreInst>(user)) {
+ if (use->getOperandNumber() == 1) {
+ // Forbid multiple assignment.
+ if (getMemoryValue().getKind() != SymbolicValue::UninitMemory)
+ return error(evaluator.getUnknown(addr, UnknownReason::Default));
+
+ auto result = getConstantValue(si->getOperand(0));
+ if (!result.isConstant())
+ return error(evaluator.getUnknown(addr, UnknownReason::Default));
+
+ setMemoryValue(result);
+ continue;
+ }
}
- SmallVector<SymbolicValue, 4> newElts(numMembers,
- SymbolicValue::getUninitMemory());
- aggregate = SymbolicValue::getAggregate(newElts, astContext);
+ if (auto *cai = dyn_cast<CopyAddrInst>(user)) {
+ // If this is a copy_addr *from* the memory, then it is a load, ignore it.
+ if (use->getOperandNumber() == 0)
+ continue;
+
+ // If this is a copy_addr *to* the memory, analyze the input value.
+ assert(use->getOperandNumber() == 1 && "copy_addr has two operands");
+
+ // Forbid multiple assignment.
+ if (getMemoryValue().getKind() != SymbolicValue::UninitMemory)
+ return error(evaluator.getUnknown(addr, UnknownReason::Default));
+
+ auto result = getConstAddrAndLoadResult(cai->getOperand(0));
+ if (!result.isConstant())
+ return error(evaluator.getUnknown(addr, UnknownReason::Default));
+
+ setMemoryValue(result);
+ continue;
+ }
+
+ // If this is an apply_inst passing the memory address as an indirect
+ // result operand, then we have a call that fills in this result.
+ if (auto *apply = dyn_cast<ApplyInst>(user)) {
+ auto conventions = apply->getSubstCalleeConv();
+
+ // If this is an out-parameter, it is like a store. If not, this is an
+ // indirect read which is ok.
+ unsigned numIndirectResults = conventions.getNumIndirectSILResults();
+ unsigned opNum = use->getOperandNumber() - 1;
+ if (opNum >= numIndirectResults)
+ continue;
+
+ // Forbid multiple assignment.
+ if (getMemoryValue().getKind() != SymbolicValue::UninitMemory)
+ return error(evaluator.getUnknown(addr, UnknownReason::Default));
+
+ // The callee needs to be a direct call to a constant expression.
+ auto callResult = computeCallResult(apply);
+
+ // If the call failed, we're done.
+ if (callResult.hasValue())
+ return error(*callResult);
+
+ // computeCallResult will have figured out the result and cached it for
+ // us.
+ assert(getMemoryValue().isConstant());
+ continue;
+ }
+
+ // If it is an index_addr, make sure it is a different address from base.
+ if (auto *iai = dyn_cast<IndexAddrInst>(user)) {
+ assert(use->get() == iai->getBase());
+ if (auto *ili = dyn_cast<IntegerLiteralInst>(iai->getIndex())) {
+ if (ili->getValue().getLimitedValue() != 0)
+ continue;
+ }
+ return error(evaluator.getUnknown(addr, UnknownReason::Default));
+ }
+
+ if (auto *teai = dyn_cast<TupleElementAddrInst>(user)) {
+ // Try finding a writer among the users of `teai`. For example:
+ // %179 = alloc_stack $(Int32, Int32, Int32, Int32)
+ // %183 = tuple_element_addr %179 : $*(Int32, Int32, Int32, Int32), 3
+ // copy_addr %114 to [initialization] %183 : $*Int32
+ // %191 = tuple_element_addr %179 : $*(Int32, Int32, Int32, Int32), 3
+ // copy_addr [take] %191 to [initialization] %178 : $*Int32
+ //
+ // The workflow is: when const-evaluating %178, we const-evaluate %191,
+ // which in turn triggers const-evaluating %179, thereby enter this
+ // function, where `addrInst` being %179. Among its users, %191 is not an
+ // initializer, so we skip it (`initializeAddressFromSingleWriter(teai)`
+ // below will act as a no-op on it). %183 is a good initializer and can
+ // be const-evaluated (by const-evaluating %114).
+
+ // We can't forbid multiple assignment here by checking for uninit memory,
+ // because previous TupleElementAddrInsts may have already partially
+ // initialized the memory. However, the recursive call to
+ // `initializeAddressFromSingleWriter` below detects and forbids multiple
+ // assignment, so we don't need to do it here.
+
+ if (auto failure = initializeAddressFromSingleWriter(teai))
+ return error(*failure);
+
+ // If this instruction partially initialized the memory, then we must
+ // remember to check later that the memory has been fully initialized.
+ if (getMemoryValue().getKind() != SymbolicValue::UninitMemory)
+ mustCheckAggregateInitialized = true;
+
+#ifndef NDEBUG
+ // If all aggregate elements are const, we have successfully
+ // const-evaluated the entire tuple!
+ if (checkAggregateInitialized())
+ LLVM_DEBUG(llvm::dbgs() << "Const-evaluated the entire tuple: ";
+ getMemoryValue().dump());
+#endif // NDEBUG
+ continue;
+ }
+
+ LLVM_DEBUG(llvm::dbgs()
+ << "Unknown SingleStore ConstExpr user: " << *user << "\n");
+
+ // If this is some other user that we don't know about, then we should
+ // treat it conservatively, because it could store into the address.
+ return error(evaluator.getUnknown(addr, UnknownReason::Default));
}
- unsigned elementNo = indices.front();
+ if (mustCheckAggregateInitialized && !checkAggregateInitialized())
+ return error(evaluator.getUnknown(addr, UnknownReason::Default));
- // If we have a non-aggregate then fail.
- if (aggregate.getKind() != SymbolicValue::Aggregate)
- return true;
+ return None;
+}
- ArrayRef<SymbolicValue> oldElts;
- Type eltType;
+/// Find the initializer (single writer) of `addr` among it users,
+/// const-evaluate it and store the result into a memory object.
+///
+/// Side effects: Creates a fully-initialized memory object (on success), or a
+/// memory object containing an unknown (on failure). Inserts the address of
+/// that memory object into `calculatedValues`, with key `addr`.
+///
+/// Returns the address of the memory object on success. Returns the unknown on
+/// failure.
+///
+/// Some use cases are:
+/// 1. When analyzing the top-level code involved in a constant expression, we
+/// can end up demanding values that are returned by address. Handle this by
+/// finding the temporary stack value (an alloc_stack inst), and calling this
+/// method on it.
+/// 2. When const-evaluating an array via decodeAllocUninitializedArray(),
+/// do that by const-evaluating the writers of individual array elements.
+///
+/// There are a few forms of writers, such as:
+/// - store %3 to %4 ...
+/// - %8 = pointer_to_address %7 : $Builtin.RawPointer to [strict] $*Int32
+/// - %14 = index_addr %9 : $*Int32, %13 : $Builtin.Word
+/// - %180 = tuple_element_addr %179 : $*(Int32, Int32, Int32, Int32), 3
+///
+/// Note unlike getConstAddrAndLoadResult(), this method does *not*
+/// const-evaluate the input `addr` by evaluating its operand first, such as %7
+/// above. Instead, it finds a user of %8 who is the initializer, and uses that
+/// to set the const value for %7. In other words, this method propagates const
+/// info from result to operand (e.g. from %8 to %7), while
+/// getConstAddrAndLoadResult() propagates const info from operand to result.
+///
+/// As such, when const-evaluating an address-typed inst such as
+/// pointer_to_address, if the address is to be written to, caller should call
+/// this method (e.g. a[3] = 17). If the address is to be read (e.g. let v =
+/// a[3]), call getConstAddrAndLoadResult().
+SymbolicValue
+ConstExprFunctionState::getSingleWriterAddressValue(SILValue addr) {
+ // Check to see if we already have an answer.
+ auto it = calculatedValues.find(addr);
+ if (it != calculatedValues.end())
+ return it->second;
- // We need to have a struct or a tuple type.
- oldElts = aggregate.getAggregateValue();
+ assert(addr->getType().isAddress());
+ auto *addrInst = dyn_cast<SingleValueInstruction>(addr);
+ if (!addrInst)
+ return evaluator.getUnknown(addr, UnknownReason::Default);
- if (auto *decl = type->getStructOrBoundGenericStruct()) {
- auto it = decl->getStoredProperties().begin();
- std::advance(it, elementNo);
- eltType = (*it)->getType();
- } else if (auto tuple = type->getAs<TupleType>()) {
- assert(elementNo < tuple->getNumElements() && "invalid index");
- eltType = tuple->getElement(elementNo).getType();
- } else {
- return true;
+ // Create a memory object to initialize, and point `addr` at it.
+ auto memoryAddress =
+ createMemoryObject(addr, SymbolicValue::getUninitMemory());
+ auto *memoryObject = memoryAddress.getAddressValueMemoryObject();
+
+ if (auto failure = initializeAddressFromSingleWriter(addr)) {
+ assert(failure->getKind() == SymbolicValue::Unknown);
+ memoryObject->setValue(*failure);
+ return *failure;
+ }
+ if (!memoryObject->getValue().isConstant()) {
+ auto unknown = evaluator.getUnknown(addr, UnknownReason::Default);
+ memoryObject->setValue(unknown);
+ return unknown;
}
- if (writeOnlyOnce &&
- oldElts[elementNo].getKind() != SymbolicValue::UninitMemory) {
- // Cannot overwrite an existing constant.
- return true;
- }
-
- // Update the indexed element of the aggregate.
- SmallVector<SymbolicValue, 4> newElts(oldElts.begin(), oldElts.end());
- if (updateIndexedElement(newElts[elementNo], indices.drop_front(), newElement,
- eltType, writeOnlyOnce, astContext))
- return true;
-
- aggregate = SymbolicValue::getAggregate(newElts, astContext);
- return false;
+ return memoryAddress;
}
/// Given the operand to a load, resolve it to a constant if possible.
+/// Also see the comments on getSingleWriterAddressValue() to contrast these 2
+/// APIs.
SymbolicValue ConstExprFunctionState::getConstAddrAndLoadResult(SILValue addr) {
auto addrVal = getConstantValue(addr);
if (!addrVal.isConstant())
@@ -698,14 +984,8 @@
SmallVector<unsigned, 4> accessPath;
auto *memoryObject = it->second.getAddressValue(accessPath);
- auto objectVal = memoryObject->getValue();
- auto objectType = memoryObject->getType();
-
- if (updateIndexedElement(objectVal, accessPath, storedCst, objectType,
- /*writeOnlyOnce*/ false, evaluator.getASTContext()))
- return evaluator.getUnknown(dest, UnknownReason::Default);
-
- memoryObject->setValue(objectVal);
+ memoryObject->setIndexedElement(accessPath, storedCst,
+ evaluator.getASTContext());
return None;
}
diff --git a/lib/Sema/TypeCheckProtocol.cpp b/lib/Sema/TypeCheckProtocol.cpp
index b5616e7..133ddda 100644
--- a/lib/Sema/TypeCheckProtocol.cpp
+++ b/lib/Sema/TypeCheckProtocol.cpp
@@ -967,9 +967,12 @@
assert(requirement->isProtocolRequirement());
auto *PD = cast<ProtocolDecl>(requirement->getDeclContext());
if (auto A = witness->getAttrs().getAttribute<ImplementsAttr>()) {
- Type T = A->getProtocolType().getType();
- if (T->castTo<ProtocolType>()->getDecl() == PD) {
- return A->getMemberName() == requirement->getFullName();
+ if (Type T = A->getProtocolType().getType()) {
+ if (auto ProtoTy = T->getAs<ProtocolType>()) {
+ if (ProtoTy->getDecl() == PD) {
+ return A->getMemberName() == requirement->getFullName();
+ }
+ }
}
}
return false;
diff --git a/test/SILOptimizer/pound_assert.sil b/test/SILOptimizer/pound_assert.sil
index 930cea4..ee56609 100644
--- a/test/SILOptimizer/pound_assert.sil
+++ b/test/SILOptimizer/pound_assert.sil
@@ -30,9 +30,9 @@
return undef : $()
}
-// Tests that piecewise initialization of memory works, by piecewise
-// initializing a tuple.
-sil @piecewiseInit : $@convention(thin) () -> Bool {
+// Tests that piecewise initialization of memory works during flow-sensitive
+// evaluation, by piecewise initializing a tuple in a function.
+sil @piecewiseInitFlowSensitive : $@convention(thin) () -> Bool {
bb0:
// Allocate and initialize the tuple to (1, 2).
%0 = alloc_stack $(Int64, Int64), var, name "tup"
@@ -61,6 +61,16 @@
return %15 : $Bool
}
+sil @invokePiecewiseInitFlowSensitiveTest : $@convention(thin) () -> () {
+ %0 = function_ref @piecewiseInitFlowSensitive : $@convention(thin) () -> Bool
+ %1 = apply %0() : $@convention(thin) () -> Bool
+ %2 = struct_extract %1 : $Bool, #Bool._value
+ %3 = string_literal utf8 ""
+ %4 = builtin "poundAssert"(%2 : $Builtin.Int1, %3 : $Builtin.RawPointer) : $()
+ %ret = tuple ()
+ return %ret : $()
+}
+
// Tests copy_addr interpretation.
sil @copyAddr : $@convention(thin) () -> Bool {
// Allocate an initialize an Int64 to 1.
@@ -87,19 +97,169 @@
return %10 : $Bool
}
-sil @invokeTests : $@convention(thin) () -> () {
- %0 = function_ref @piecewiseInit : $@convention(thin) () -> Bool
+sil @invokeCopyAddrTest : $@convention(thin) () -> () {
+ %0 = function_ref @copyAddr : $@convention(thin) () -> Bool
%1 = apply %0() : $@convention(thin) () -> Bool
%2 = struct_extract %1 : $Bool, #Bool._value
%3 = string_literal utf8 ""
%4 = builtin "poundAssert"(%2 : $Builtin.Int1, %3 : $Builtin.RawPointer) : $()
+ %ret = tuple ()
+ return %ret : $()
+}
- %5 = function_ref @copyAddr : $@convention(thin) () -> Bool
- %6 = apply %5() : $@convention(thin) () -> Bool
- %7 = struct_extract %6 : $Bool, #Bool._value
+// A function with @out result to help with some tests.
+sil @setInt64To1 : $@convention(thin) () -> (@out Int64) {
+bb0(%0 : $*Int64):
+ %1 = integer_literal $Builtin.Int64, 1
+ %2 = struct $Int64 (%1 : $Builtin.Int64)
+ store %2 to %0 : $*Int64
+ %ret = tuple ()
+ return %ret : $()
+}
+
+// Tests that initialization of memory using `store` works during top-level
+// evaluation.
+sil @storeInitTopLevel : $@convention(thin) () -> () {
+ %0 = alloc_stack $Int64
+ %1 = integer_literal $Builtin.Int64, 1
+ %2 = struct $Int64 (%1 : $Builtin.Int64)
+ store %2 to %0 : $*Int64
+ %4 = load %0 : $*Int64
+ %5 = struct_extract %4 : $Int64, #Int64._value
+ %6 = builtin "cmp_eq_Int64"(%1 : $Builtin.Int64, %5 : $Builtin.Int64) : $Builtin.Int1
+ %7 = string_literal utf8 ""
+ %8 = builtin "poundAssert"(%6 : $Builtin.Int1, %7 : $Builtin.RawPointer) : $()
+ dealloc_stack %0 : $*Int64
+ %ret = tuple ()
+ return %ret : $()
+}
+
+// Tests that initialization of memory using `copy_addr` works during top-level
+// evaluation.
+sil @copyInitTopLevel : $@convention(thin) () -> () {
+ %0 = alloc_stack $Int64
+ %1 = alloc_stack $Int64
+ %2 = integer_literal $Builtin.Int64, 1
+ %3 = struct $Int64 (%2 : $Builtin.Int64)
+ store %3 to %0 : $*Int64
+ copy_addr %0 to %1 : $*Int64
+ %6 = load %1 : $*Int64
+ %7 = struct_extract %6 : $Int64, #Int64._value
+ %8 = builtin "cmp_eq_Int64"(%2 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1
+ %9 = string_literal utf8 ""
+ %10 = builtin "poundAssert"(%8 : $Builtin.Int1, %9 : $Builtin.RawPointer) : $()
+ dealloc_stack %1 : $*Int64
+ dealloc_stack %0 : $*Int64
+ %ret = tuple ()
+ return %ret : $()
+}
+
+// Tests that initialization of memory using `apply` works during top-level
+// evaluation.
+sil @applyInitTopLevel : $@convention(thin) () -> () {
+ %0 = alloc_stack $Int64
+ %1 = function_ref @setInt64To1: $@convention(thin) () -> (@out Int64)
+ %2 = apply %1(%0) : $@convention(thin) () -> (@out Int64)
+ %3 = load %0 : $*Int64
+ %4 = struct_extract %3 : $Int64, #Int64._value
+ %5 = integer_literal $Builtin.Int64, 1
+ %6 = builtin "cmp_eq_Int64"(%4 : $Builtin.Int64, %5 : $Builtin.Int64) : $Builtin.Int1
+ %7 = string_literal utf8 ""
+ %8 = builtin "poundAssert"(%6 : $Builtin.Int1, %7 : $Builtin.RawPointer) : $()
+ dealloc_stack %0 : $*Int64
+ %ret = tuple ()
+ return %ret : $()
+}
+
+// Tests that piecewise initialization of tuple memory works during top-level
+// evaluation.
+sil @piecewiseInitTopLevel : $@convention(thin) () -> () {
+bb0:
+ // Allocate and initialize the tuple to (1, 2).
+ %0 = alloc_stack $(Int64, Int64), var, name "tup"
+ %1 = tuple_element_addr %0 : $*(Int64, Int64), 0
+ %2 = tuple_element_addr %0 : $*(Int64, Int64), 1
+ %3 = integer_literal $Builtin.Int64, 1
+ %4 = struct $Int64 (%3 : $Builtin.Int64)
+ store %4 to %1 : $*Int64
+ %6 = integer_literal $Builtin.Int64, 2
+ %7 = struct $Int64 (%6 : $Builtin.Int64)
+ store %7 to %2 : $*Int64
+
+ // Read the first element from the tuple.
+ // TODO: Allow `begin_access` in top level initialization.
+ // %9 = begin_access [read] [static] %0 : $*(Int64, Int64)
+ %10 = tuple_element_addr %0 : $*(Int64, Int64), 0
+ %11 = load %10 : $*Int64
+ // end_access %9 : $*(Int64, Int64)
+
+ // Check that the first element is what we put in.
+ %13 = struct_extract %11 : $Int64, #Int64._value
+ %14 = builtin "cmp_eq_Int64"(%3 : $Builtin.Int64, %13 : $Builtin.Int64) : $Builtin.Int1
+ %15 = string_literal utf8 ""
+ %16 = builtin "poundAssert"(%14 : $Builtin.Int1, %15 : $Builtin.RawPointer) : $()
+
+ // Deallocate and return.
+ dealloc_stack %0 : $*(Int64, Int64)
+ %ret = tuple ()
+ return %ret : $()
+}
+
+// Tests that top-level evaluation detects memory that gets written to twice.
+sil @doubleWriteTopLevel : $@convention(thin) () -> () {
+ // expected-note @+1 {{could not fold operation}}
+ %0 = alloc_stack $Int64
+ %1 = integer_literal $Builtin.Int64, 1
+ %2 = struct $Int64 (%1 : $Builtin.Int64)
+ store %2 to %0 : $*Int64
+ store %2 to %0 : $*Int64
+ %5 = load %0 : $*Int64
+ %6 = struct_extract %5 : $Int64, #Int64._value
+ %7 = builtin "cmp_eq_Int64"(%1 : $Builtin.Int64, %6 : $Builtin.Int64) : $Builtin.Int1
%8 = string_literal utf8 ""
+ // expected-error @+1 {{#assert condition not constant}}
%9 = builtin "poundAssert"(%7 : $Builtin.Int1, %8 : $Builtin.RawPointer) : $()
+ dealloc_stack %0 : $*Int64
+ %ret = tuple ()
+ return %ret : $()
+}
+// There was a bug where the evalutor would not detect a double-write to a
+// tuple element at the top level if one of the writes writes an unknown value.
+sil @doubleWriteTupleElement : $@convention(thin) (Int64) -> () {
+bb0(%arg : $Int64):
+ // Allocate and initialize the tuple to (1, 2).
+ %0 = alloc_stack $(Int64, Int64), var, name "tup"
+ %1 = tuple_element_addr %0 : $*(Int64, Int64), 0
+ %2 = tuple_element_addr %0 : $*(Int64, Int64), 1
+ %3 = integer_literal $Builtin.Int64, 1
+ %4 = struct $Int64 (%3 : $Builtin.Int64)
+ store %4 to %1 : $*Int64
+ %6 = integer_literal $Builtin.Int64, 2
+ %7 = struct $Int64 (%6 : $Builtin.Int64)
+ store %7 to %2 : $*Int64
+
+ // Store %arg, whose value is unknown, to the first element of the tuple.
+ // expected-note @+1 {{could not fold operation}}
+ %addr = tuple_element_addr %0 : $*(Int64, Int64), 0
+ store %arg to %addr : $*Int64
+
+ // Read the first element from the tuple.
+ // TODO: Allow `begin_access` in top level initialization.
+ // %9 = begin_access [read] [static] %0 : $*(Int64, Int64)
+ %10 = tuple_element_addr %0 : $*(Int64, Int64), 0
+ %11 = load %10 : $*Int64
+ // end_access %9 : $*(Int64, Int64)
+
+ // Check that the first element is what we put in.
+ %13 = struct_extract %11 : $Int64, #Int64._value
+ %14 = builtin "cmp_eq_Int64"(%3 : $Builtin.Int64, %13 : $Builtin.Int64) : $Builtin.Int1
+ %15 = string_literal utf8 ""
+ // expected-error @+1 {{#assert condition not constant}}
+ %16 = builtin "poundAssert"(%14 : $Builtin.Int1, %15 : $Builtin.RawPointer) : $()
+
+ // Deallocate and return.
+ dealloc_stack %0 : $*(Int64, Int64)
%ret = tuple ()
return %ret : $()
}
diff --git a/test/SILOptimizer/pound_assert.swift b/test/SILOptimizer/pound_assert.swift
index 297aa1f..dc3d66c 100644
--- a/test/SILOptimizer/pound_assert.swift
+++ b/test/SILOptimizer/pound_assert.swift
@@ -223,3 +223,235 @@
#assert(replaceAggregate())
#assert(shouldNotAlias())
}
+
+//===----------------------------------------------------------------------===//
+// Evaluating generic functions
+//===----------------------------------------------------------------------===//
+
+func genericAdd<T: Numeric>(_ a: T, _ b: T) -> T {
+ return a + b
+}
+
+func test_genericAdd() {
+ #assert(genericAdd(1, 1) == 2)
+}
+
+func test_tupleAsGeneric() {
+ func identity<T>(_ t: T) -> T {
+ return t
+ }
+ #assert(identity((1, 2)) == (1, 2))
+}
+
+//===----------------------------------------------------------------------===//
+// Reduced testcase propagating substitutions around.
+//===----------------------------------------------------------------------===//
+protocol SubstitutionsP {
+ init<T: SubstitutionsP>(something: T)
+
+ func get() -> Int
+}
+
+struct SubstitutionsX : SubstitutionsP {
+ var state : Int
+ init<T: SubstitutionsP>(something: T) {
+ state = something.get()
+ }
+ func get() -> Int {
+ fatalError()
+ }
+
+ func getState() -> Int {
+ return state
+ }
+}
+
+struct SubstitutionsY : SubstitutionsP {
+ init() {}
+ init<T: SubstitutionsP>(something: T) {
+ }
+
+ func get() -> Int {
+ return 123
+ }
+}
+func substitutionsF<T: SubstitutionsP>(_: T.Type) -> T {
+ return T(something: SubstitutionsY())
+}
+
+func testProto() {
+ #assert(substitutionsF(SubstitutionsX.self).getState() == 123)
+}
+
+//===----------------------------------------------------------------------===//
+// Structs with generics
+//===----------------------------------------------------------------------===//
+
+// Test 1
+struct S<X, Y> {
+ func method<Z>(_ z: Z) -> Int {
+ return 0
+ }
+}
+
+func callerOfSMethod<U, V, W>(_ s: S<U, V>, _ w: W) -> Int {
+ return s.method(w)
+}
+
+func toplevel() {
+ let s = S<Int, Float>()
+ #assert(callerOfSMethod(s, -1) == 0)
+}
+
+// Test 2: test a struct method returning its generic argument.
+struct S2<X> {
+ func method<Z>(_ z: Z) -> Z {
+ return z
+ }
+}
+
+func callerOfS2Method<U, V>(_ s: S2<U>, _ v: V) -> V {
+ return s.method(v)
+}
+
+func testStructMethodReturningGenericParam() {
+ let s = S2<Float>()
+ #assert(callerOfS2Method(s, -1) == -1)
+}
+
+//===----------------------------------------------------------------------===//
+// Test that the order in which the generic parameters are declared doesn't
+// affect the interpreter.
+//===----------------------------------------------------------------------===//
+
+protocol Proto {
+ func amethod<U>(_ u: U) -> Int
+}
+
+func callMethod<U, T: Proto>(_ a: T, _ u: U) -> Int {
+ return a.amethod(u)
+}
+
+// Test 1
+struct Sp : Proto {
+ func amethod<U>(_ u: U) -> Int {
+ return 0
+ }
+}
+
+func testProtocolMethod() {
+ let s = Sp()
+ #assert(callMethod(s, 10) == 0)
+}
+
+// Test 2
+struct GenericS<P>: Proto {
+ func amethod<U>(_ u: U) -> Int {
+ return 12
+ }
+}
+
+func testProtocolMethodForGenericStructs() {
+ let s = GenericS<Int>()
+ #assert(callMethod(s, 10) == 12)
+}
+
+// Test 3 (with generic fields)
+struct GenericS2<P: Equatable>: Proto {
+ var fld1: P
+ var fld2: P
+
+ init(_ p: P, _ q: P) {
+ fld1 = p
+ fld2 = q
+ }
+
+ func amethod<U>(_ u: U) -> Int {
+ if (fld1 == fld2) {
+ return 15
+ }
+ return 0
+ }
+}
+
+func testProtocolMethodForStructsWithGenericFields() {
+ let s = GenericS2<Int>(1, 1)
+ #assert(callMethod(s, 10) == 15)
+}
+
+//===----------------------------------------------------------------------===//
+// Structs with generics and protocols with associated types.
+//===----------------------------------------------------------------------===//
+
+protocol ProtoWithAssocType {
+ associatedtype U
+
+ func amethod(_ u: U) -> U
+}
+
+struct St<X, Y> : ProtoWithAssocType {
+ typealias U = X
+
+ func amethod(_ x: X) -> X {
+ return x
+ }
+}
+
+func callerOfStMethod<P, Q>(_ s: St<P, Q>, _ p: P) -> P {
+ return s.amethod(p)
+}
+
+func testProtoWithAssocTypes() {
+ let s = St<Int, Float>()
+ #assert(callerOfStMethod(s, 11) == 11)
+}
+
+// Test 2: test a protocol method returning its generic argument.
+protocol ProtoWithGenericMethod {
+ func amethod<U>(_ u: U) -> U
+}
+
+
+struct SProtoWithGenericMethod<X> : ProtoWithGenericMethod {
+ func amethod<Z>(_ z: Z) -> Z {
+ return z
+ }
+}
+
+func callerOfGenericProtoMethod<S: ProtoWithGenericMethod, V>(_ s: S,
+ _ v: V) -> V {
+ return s.amethod(v)
+}
+
+func testProtoWithGenericMethod() {
+ let s = SProtoWithGenericMethod<Float>()
+ #assert(callerOfGenericProtoMethod(s, -1) == -1)
+}
+
+//===----------------------------------------------------------------------===//
+// Converting a struct instance to protocol instance is not supported yet.
+// This requires handling init_existential_addr instruction. Once they are
+// supported, the following static assert must pass. For now, a workaround is
+// to use generic parameters with protocol constraints in the interpretable
+// code fragments.
+//===----------------------------------------------------------------------===//
+
+protocol ProtoSimple {
+ func amethod() -> Int
+}
+
+func callProtoSimpleMethod(_ p: ProtoSimple) -> Int {
+ return p.amethod()
+}
+
+struct SPsimp : ProtoSimple {
+ func amethod() -> Int {
+ return 0
+ }
+}
+
+func testStructPassedAsProtocols() {
+ let s = SPsimp()
+ #assert(callProtoSimpleMethod(s) == 0) // expected-error {{#assert condition not constant}}
+ // expected-note@-1 {{could not fold operation}}
+}
diff --git a/validation-test/compiler_crashers_2_fixed/0187-rdar46678653.swift b/validation-test/compiler_crashers_2_fixed/0187-rdar46678653.swift
new file mode 100644
index 0000000..0cb046d
--- /dev/null
+++ b/validation-test/compiler_crashers_2_fixed/0187-rdar46678653.swift
@@ -0,0 +1,15 @@
+// RUN: not %target-swift-frontend -typecheck %s
+
+protocol P: class { }
+
+protocol Q {
+ func g()
+}
+
+protocol P { }
+
+struct S : Q {
+ @_implements(P, g())
+ func h() {}
+}
+
diff --git a/validation-test/compiler_crashers_2_fixed/0188-sr9496.swift b/validation-test/compiler_crashers_2_fixed/0188-sr9496.swift
new file mode 100644
index 0000000..73f5bd4
--- /dev/null
+++ b/validation-test/compiler_crashers_2_fixed/0188-sr9496.swift
@@ -0,0 +1,23 @@
+// RUN: %target-swift-frontend -typecheck %s
+
+protocol P1 {
+ associatedtype A1
+}
+
+protocol P2 {
+ associatedtype A2
+}
+
+struct S1<G1: P1, G2: P1>: P1 where G1.A1 == G2.A1 {
+ typealias A1 = G1.A1
+}
+
+struct S2<G1: P1, G2: P2>: P2 where G1.A1 == G2.A2 {
+ typealias A2 = G2.A2
+}
+
+struct S3<G1: P1, G2: P2> where G1.A1 == G2.A2 {
+ func f<G: P1>(_: G) -> S3<S1<G, G1>, S2<G, G2>> {
+ fatalError()
+ }
+}