Merge pull request #22695 from milseman/5_0_en_gadus_offset
[5.0][String.Index] Deprecate encodedOffset var/init
diff --git a/lib/IDE/CodeCompletion.cpp b/lib/IDE/CodeCompletion.cpp
index 8bbc5c5..3e5e424 100644
--- a/lib/IDE/CodeCompletion.cpp
+++ b/lib/IDE/CodeCompletion.cpp
@@ -5409,7 +5409,11 @@
// We cannot analyze without target.
if (!ParsedExpr)
return false;
- DC->walkContext(Finder);
+
+ // For 'Initializer' context, we need to look into its parent because it
+ // might constrain the initializer's type.
+ auto analyzeDC = isa<Initializer>(DC) ? DC->getParent() : DC;
+ analyzeDC->walkContext(Finder);
for (auto It = Finder.Ancestors.rbegin(); It != Finder.Ancestors.rend();
++ It) {
diff --git a/lib/IRGen/ClassLayout.cpp b/lib/IRGen/ClassLayout.cpp
index 4347de7..82583ff 100644
--- a/lib/IRGen/ClassLayout.cpp
+++ b/lib/IRGen/ClassLayout.cpp
@@ -28,6 +28,7 @@
bool isFixedSize,
bool metadataRequiresInitialization,
bool metadataRequiresRelocation,
+ bool classHasObjCAncestry,
llvm::Type *classTy,
ArrayRef<VarDecl *> allStoredProps,
ArrayRef<FieldAccess> allFieldAccesses,
@@ -38,21 +39,29 @@
IsFixedSize(isFixedSize),
MetadataRequiresInitialization(metadataRequiresInitialization),
MetadataRequiresRelocation(metadataRequiresRelocation),
+ ClassHasObjCAncestry(classHasObjCAncestry),
Ty(classTy),
AllStoredProperties(allStoredProps),
AllFieldAccesses(allFieldAccesses),
AllElements(allElements) { }
Size ClassLayout::getInstanceStart() const {
- if (AllElements.empty())
- return getSize();
+ ArrayRef<ElementLayout> elements = AllElements;
+ while (!elements.empty()) {
+ auto element = elements.front();
+ elements = elements.drop_front();
- auto element = AllElements[0];
- if (element.getKind() == ElementLayout::Kind::Fixed ||
- element.getKind() == ElementLayout::Kind::Empty) {
- // FIXME: assumes layout is always sequential!
- return element.getByteOffset();
+ // Ignore empty elements.
+ if (element.isEmpty()) {
+ continue;
+ } else if (element.hasByteOffset()) {
+ // FIXME: assumes layout is always sequential!
+ return element.getByteOffset();
+ } else {
+ return Size(0);
+ }
}
- return Size(0);
+ // If there are no non-empty elements, just return the computed size.
+ return getSize();
}
diff --git a/lib/IRGen/ClassLayout.h b/lib/IRGen/ClassLayout.h
index bbacb99..b728c55 100644
--- a/lib/IRGen/ClassLayout.h
+++ b/lib/IRGen/ClassLayout.h
@@ -63,6 +63,9 @@
/// Does the class metadata require relocation?
bool MetadataRequiresRelocation;
+ /// Does the class have ObjC ancestry?
+ bool ClassHasObjCAncestry;
+
/// The LLVM type for instances of this class.
llvm::Type *Ty;
@@ -82,6 +85,7 @@
bool isFixedSize,
bool metadataRequiresInitialization,
bool metadataRequiresRelocation,
+ bool classHasObjCAncestry,
llvm::Type *classTy,
ArrayRef<VarDecl *> allStoredProps,
ArrayRef<FieldAccess> allFieldAccesses,
@@ -98,6 +102,15 @@
bool isFixedSize() const { return IsFixedSize; }
+ /// Returns true if the runtime may attempt to assign non-zero offsets to
+ /// empty fields for this class. The ObjC runtime will do this if it
+ /// decides it needs to slide ivars. This is the one exception to the
+ /// general rule that the runtime will not try to assign a different offset
+ /// than was computed statically for a field with a fixed offset.
+ bool mayRuntimeAssignNonZeroOffsetsToEmptyFields() const {
+ return ClassHasObjCAncestry;
+ }
+
bool doesMetadataRequireInitialization() const {
return MetadataRequiresInitialization;
}
diff --git a/lib/IRGen/GenClass.cpp b/lib/IRGen/GenClass.cpp
index 15b0b7f..c6fa64d 100644
--- a/lib/IRGen/GenClass.cpp
+++ b/lib/IRGen/GenClass.cpp
@@ -293,6 +293,7 @@
isFixedSize(),
doesMetadataRequireInitialization(),
doesMetadataRequireRelocation(),
+ ClassHasObjCAncestry,
classTy,
allStoredProps,
allFieldAccesses,
@@ -393,7 +394,7 @@
}
auto element = ElementLayout::getIncomplete(*eltType);
- addField(element, LayoutStrategy::Universal);
+ bool isKnownEmpty = !addField(element, LayoutStrategy::Universal);
// The 'Elements' list only contains superclass fields when we're
// building a layout for tail allocation.
@@ -402,7 +403,7 @@
if (!superclass) {
AllStoredProperties.push_back(var);
- AllFieldAccesses.push_back(getFieldAccess());
+ AllFieldAccesses.push_back(getFieldAccess(isKnownEmpty));
}
}
@@ -459,7 +460,12 @@
}
}
- FieldAccess getFieldAccess() {
+ FieldAccess getFieldAccess(bool isKnownEmpty) {
+ // If the field is known empty, then its access pattern is always
+ // constant-direct.
+ if (isKnownEmpty)
+ return FieldAccess::ConstantDirect;
+
// If the layout so far has a fixed size, the field offset is known
// statically.
if (isFixedSize())
@@ -635,8 +641,7 @@
auto fieldInfo = classLayout.getFieldAccessAndElement(field);
auto element = fieldInfo.second;
- assert(element.getKind() == ElementLayout::Kind::Fixed ||
- element.getKind() == ElementLayout::Kind::Empty);
+ assert(element.hasByteOffset());
return element.getByteOffset();
}
diff --git a/lib/IRGen/GenMeta.cpp b/lib/IRGen/GenMeta.cpp
index 2d6e1aa..b3b15b2 100644
--- a/lib/IRGen/GenMeta.cpp
+++ b/lib/IRGen/GenMeta.cpp
@@ -2318,7 +2318,7 @@
llvm::Constant *fieldOffsetOrZero;
- if (element.getKind() == ElementLayout::Kind::Fixed) {
+ if (element.hasByteOffset()) {
// Use a fixed offset if we have one.
fieldOffsetOrZero = IGM.getSize(element.getByteOffset());
} else {
@@ -2346,11 +2346,21 @@
// If it is constant in the fragile layout only, newer Objective-C
// runtimes will still update them in place, so make sure to check the
// correct layout.
+ //
+ // The one exception to this rule is with empty fields with
+ // ObjC-resilient heritage. The ObjC runtime will attempt to slide
+ // these offsets if it slides the rest of the class, and in doing so
+ // it will compute a different offset than we computed statically.
+ // But this is ultimately unimportant because we do not care about the
+ // offset of an empty field.
auto resilientInfo = resilientLayout.getFieldAccessAndElement(prop);
- if (resilientInfo.first == FieldAccess::ConstantDirect) {
+ if (resilientInfo.first == FieldAccess::ConstantDirect &&
+ (!resilientInfo.second.isEmpty() ||
+ !resilientLayout.mayRuntimeAssignNonZeroOffsetsToEmptyFields())) {
// If it is constant in the resilient layout, it should be constant in
// the fragile layout also.
assert(access == FieldAccess::ConstantDirect);
+ assert(element.hasByteOffset());
offsetVar->setConstant(true);
}
diff --git a/lib/IRGen/GenProto.cpp b/lib/IRGen/GenProto.cpp
index abb2f68..eb438f7 100644
--- a/lib/IRGen/GenProto.cpp
+++ b/lib/IRGen/GenProto.cpp
@@ -947,23 +947,35 @@
/// Return true if the witness table requires runtime instantiation to
/// handle resiliently-added requirements with default implementations.
-static bool isResilientConformance(const NormalProtocolConformance *conformance) {
+bool IRGenModule::isResilientConformance(
+ const NormalProtocolConformance *conformance) {
// If the protocol is not resilient, the conformance is not resilient
// either.
if (!conformance->getProtocol()->isResilient())
return false;
- // If the protocol is in the same module as the conformance, we're
- // not resilient.
- if (conformance->getDeclContext()->getParentModule()
- == conformance->getProtocol()->getParentModule())
+ auto *conformanceModule = conformance->getDeclContext()->getParentModule();
+
+ // If the protocol and the conformance are both in the current module,
+ // they're not resilient.
+ if (conformanceModule == getSwiftModule() &&
+ conformanceModule == conformance->getProtocol()->getParentModule())
+ return false;
+
+ // If the protocol and the conformance are in the same module and the
+ // conforming type is not generic, they're not resilient.
+ //
+ // This is an optimization -- a conformance of a non-generic type cannot
+ // resiliently become dependent.
+ if (!conformance->getDeclContext()->isGenericContext() &&
+ conformanceModule == conformance->getProtocol()->getParentModule())
return false;
// We have a resilient conformance.
return true;
}
-static bool isResilientConformance(const RootProtocolConformance *root) {
+bool IRGenModule::isResilientConformance(const RootProtocolConformance *root) {
if (auto normal = dyn_cast<NormalProtocolConformance>(root))
return isResilientConformance(normal);
// Self-conformances never require this.
@@ -996,6 +1008,7 @@
}
static bool isDependentConformance(
+ IRGenModule &IGM,
const RootProtocolConformance *rootConformance,
bool considerResilience,
llvm::SmallPtrSet<const NormalProtocolConformance *, 4> &visited){
@@ -1010,7 +1023,7 @@
return false;
// If the conformance is resilient, this is always true.
- if (considerResilience && isResilientConformance(conformance))
+ if (considerResilience && IGM.isResilientConformance(conformance))
return true;
// Check whether any of the conformances are dependent.
@@ -1026,7 +1039,8 @@
auto assocConformance =
conformance->getAssociatedConformance(req.getFirstType(), assocProtocol);
if (assocConformance.isAbstract() ||
- isDependentConformance(assocConformance.getConcrete()
+ isDependentConformance(IGM,
+ assocConformance.getConcrete()
->getRootConformance(),
considerResilience,
visited))
@@ -1044,10 +1058,12 @@
/// Is there anything about the given conformance that requires witness
/// tables to be dependently-generated?
-static bool isDependentConformance(const RootProtocolConformance *conformance,
- bool considerResilience) {
+bool IRGenModule::isDependentConformance(
+ const RootProtocolConformance *conformance,
+ bool considerResilience) {
llvm::SmallPtrSet<const NormalProtocolConformance *, 4> visited;
- return ::isDependentConformance(conformance, considerResilience, visited);
+ return ::isDependentConformance(*this, conformance, considerResilience,
+ visited);
}
static bool isSynthesizedNonUnique(const RootProtocolConformance *conformance) {
@@ -1285,7 +1301,7 @@
Conformance.getDeclContext())),
SILEntries(SILWT->getEntries()),
SILConditionalConformances(SILWT->getConditionalConformances()),
- ResilientConformance(isResilientConformance(&Conformance)),
+ ResilientConformance(IGM.isResilientConformance(&Conformance)),
PI(IGM.getProtocolInfo(SILWT->getConformance()->getProtocol(),
(ResilientConformance
? ProtocolInfoKind::RequirementSignature
diff --git a/lib/IRGen/GenRecord.h b/lib/IRGen/GenRecord.h
index 9bdb2ef..697742a 100644
--- a/lib/IRGen/GenRecord.h
+++ b/lib/IRGen/GenRecord.h
@@ -82,6 +82,10 @@
ElementLayout::Kind getKind() const {
return Layout.getKind();
}
+
+ bool hasFixedByteOffset() const {
+ return Layout.hasByteOffset();
+ }
Size getFixedByteOffset() const {
return Layout.getByteOffset();
@@ -763,7 +767,7 @@
Explosion &src,
unsigned startOffset) const override {
for (auto &field : getFields()) {
- if (field.getKind() != ElementLayout::Kind::Empty) {
+ if (!field.isEmpty()) {
unsigned offset = field.getFixedByteOffset().getValueInBits()
+ startOffset;
cast<LoadableTypeInfo>(field.getTypeInfo())
@@ -776,7 +780,7 @@
Explosion &dest, unsigned startOffset)
const override {
for (auto &field : getFields()) {
- if (field.getKind() != ElementLayout::Kind::Empty) {
+ if (!field.isEmpty()) {
unsigned offset = field.getFixedByteOffset().getValueInBits()
+ startOffset;
cast<LoadableTypeInfo>(field.getTypeInfo())
diff --git a/lib/IRGen/GenStruct.cpp b/lib/IRGen/GenStruct.cpp
index 036afaa..5fda66b 100644
--- a/lib/IRGen/GenStruct.cpp
+++ b/lib/IRGen/GenStruct.cpp
@@ -172,8 +172,7 @@
llvm::Constant *getConstantFieldOffset(IRGenModule &IGM,
VarDecl *field) const {
auto &fieldInfo = getFieldInfo(field);
- if (fieldInfo.getKind() == ElementLayout::Kind::Fixed
- || fieldInfo.getKind() == ElementLayout::Kind::Empty) {
+ if (fieldInfo.hasFixedByteOffset()) {
return llvm::ConstantInt::get(
IGM.Int32Ty, fieldInfo.getFixedByteOffset().getValue());
}
@@ -791,8 +790,7 @@
ElementLayout layout = ElementLayout::getIncomplete(fieldType);
auto isEmpty = fieldType.isKnownEmpty(ResilienceExpansion::Maximal);
if (isEmpty)
- layout.completeEmpty(fieldType.isPOD(ResilienceExpansion::Maximal),
- NextOffset);
+ layout.completeEmpty(fieldType.isPOD(ResilienceExpansion::Maximal));
else
layout.completeFixed(fieldType.isPOD(ResilienceExpansion::Maximal),
NextOffset, LLVMFields.size());
diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h
index 8ffd3cc..3365cd4 100644
--- a/lib/IRGen/IRGenModule.h
+++ b/lib/IRGen/IRGenModule.h
@@ -793,6 +793,11 @@
ResilienceExpansion getResilienceExpansionForLayout(NominalTypeDecl *decl);
ResilienceExpansion getResilienceExpansionForLayout(SILGlobalVariable *var);
+ bool isResilientConformance(const NormalProtocolConformance *conformance);
+ bool isResilientConformance(const RootProtocolConformance *root);
+ bool isDependentConformance(const RootProtocolConformance *conformance,
+ bool considerResilience);
+
Alignment getCappedAlignment(Alignment alignment);
SpareBitVector getSpareBitsForType(llvm::Type *scalarTy, Size size);
diff --git a/lib/IRGen/StructLayout.cpp b/lib/IRGen/StructLayout.cpp
index 016fd31..37cdbb1 100644
--- a/lib/IRGen/StructLayout.cpp
+++ b/lib/IRGen/StructLayout.cpp
@@ -304,8 +304,7 @@
/// Add an empty element to the aggregate.
void StructLayoutBuilder::addEmptyElement(ElementLayout &elt) {
- elt.completeEmpty(elt.getType().isPOD(ResilienceExpansion::Maximal),
- CurSize);
+ elt.completeEmpty(elt.getType().isPOD(ResilienceExpansion::Maximal));
}
/// Add an element at the fixed offset of the current end of the
diff --git a/lib/IRGen/StructLayout.h b/lib/IRGen/StructLayout.h
index 668b661..a4a00c7 100644
--- a/lib/IRGen/StructLayout.h
+++ b/lib/IRGen/StructLayout.h
@@ -81,6 +81,7 @@
public:
enum class Kind {
/// The element is known to require no storage in the aggregate.
+ /// Its offset in the aggregate is always statically zero.
Empty,
/// The element can be positioned at a fixed offset within the
@@ -95,10 +96,12 @@
/// offset zero. This is necessary because LLVM forbids even a
/// 'gep 0' on an unsized type.
InitialNonFixedSize
+
+ // IncompleteKind comes here
};
private:
- enum : unsigned { IncompleteKind = 4 };
+ enum : unsigned { IncompleteKind = unsigned(Kind::InitialNonFixedSize) + 1 };
/// The swift type information for this element's layout.
const TypeInfo *Type;
@@ -137,19 +140,17 @@
Index = other.Index;
}
- void completeEmpty(IsPOD_t isPOD, Size byteOffset) {
+ void completeEmpty(IsPOD_t isPOD) {
TheKind = unsigned(Kind::Empty);
IsPOD = unsigned(isPOD);
- // We still want to give empty fields an offset for use by things like
- // ObjC ivar emission. We use the first field in a class layout as the
- // instanceStart.
- ByteOffset = byteOffset.getValue();
+ ByteOffset = 0;
Index = 0; // make a complete write of the bitfield
}
void completeInitialNonFixedSize(IsPOD_t isPOD) {
TheKind = unsigned(Kind::InitialNonFixedSize);
IsPOD = unsigned(isPOD);
+ ByteOffset = 0;
Index = 0; // make a complete write of the bitfield
}
@@ -189,10 +190,25 @@
return IsPOD_t(IsPOD);
}
+ /// Can we access this element at a static offset?
+ bool hasByteOffset() const {
+ switch (getKind()) {
+ case Kind::Empty:
+ case Kind::Fixed:
+ return true;
+
+ // FIXME: InitialNonFixedSize should go in the above, but I'm being
+ // paranoid about changing behavior.
+ case Kind::InitialNonFixedSize:
+ case Kind::NonFixed:
+ return false;
+ }
+ llvm_unreachable("bad kind");
+ }
+
/// Given that this element has a fixed offset, return that offset in bytes.
Size getByteOffset() const {
- assert(isCompleted() &&
- (getKind() == Kind::Fixed || getKind() == Kind::Empty));
+ assert(isCompleted() && hasByteOffset());
return Size(ByteOffset);
}
diff --git a/lib/SIL/SILProfiler.cpp b/lib/SIL/SILProfiler.cpp
index 84a0d6c..88f3d3e 100644
--- a/lib/SIL/SILProfiler.cpp
+++ b/lib/SIL/SILProfiler.cpp
@@ -42,6 +42,10 @@
/// Check whether a root AST node is unmapped, i.e not profiled.
static bool isUnmapped(ASTNode N) {
+ // Do not map AST nodes with invalid source locations.
+ if (N.getStartLoc().isInvalid() || N.getEndLoc().isInvalid())
+ return true;
+
if (auto *E = N.dyn_cast<Expr *>()) {
auto *CE = dyn_cast<AbstractClosureExpr>(E);
@@ -396,7 +400,12 @@
public:
SourceMappingRegion(ASTNode Node, CounterExpr &Count,
Optional<SourceLoc> StartLoc, Optional<SourceLoc> EndLoc)
- : Node(Node), Count(&Count), StartLoc(StartLoc), EndLoc(EndLoc) {}
+ : Node(Node), Count(&Count), StartLoc(StartLoc), EndLoc(EndLoc) {
+ assert((!StartLoc || StartLoc->isValid()) &&
+ "Expected start location to be valid");
+ assert((!EndLoc || EndLoc->isValid()) &&
+ "Expected start location to be valid");
+ }
SourceMappingRegion(SourceMappingRegion &&Region) = default;
SourceMappingRegion &operator=(SourceMappingRegion &&RHS) = default;
diff --git a/lib/SILOptimizer/Transforms/AccessEnforcementOpts.cpp b/lib/SILOptimizer/Transforms/AccessEnforcementOpts.cpp
index 6c8c761..352dc5b 100644
--- a/lib/SILOptimizer/Transforms/AccessEnforcementOpts.cpp
+++ b/lib/SILOptimizer/Transforms/AccessEnforcementOpts.cpp
@@ -988,6 +988,12 @@
}
static bool canMergeEnd(BeginAccessInst *parentIns, BeginAccessInst *childIns) {
+ // A [read] access cannot be converted to a [modify] without potentially
+ // introducing new conflicts that were previously ignored. Merging read/modify
+ // will require additional data flow information.
+ if (childIns->getAccessKind() != parentIns->getAccessKind())
+ return false;
+
auto *endP = getSingleEndAccess(parentIns);
if (!endP)
return false;
@@ -1087,13 +1093,6 @@
LLVM_DEBUG(llvm::dbgs()
<< "Merging: " << *childIns << " into " << *parentIns << "\n");
- // Change the type of access of parent:
- // should be the worse between it and child
- auto childAccess = childIns->getAccessKind();
- if (parentIns->getAccessKind() < childAccess) {
- parentIns->setAccessKind(childAccess);
- }
-
// Change the no nested conflict of parent:
// should be the worst case scenario: we might merge to non-conflicting
// scopes to a conflicting one. f the new result does not conflict,
diff --git a/test/IDE/complete_in_accessors.swift b/test/IDE/complete_in_accessors.swift
index dd08693..64f90b2 100644
--- a/test/IDE/complete_in_accessors.swift
+++ b/test/IDE/complete_in_accessors.swift
@@ -150,7 +150,7 @@
// WITH_MEMBER_DECLS_INIT: Begin completions
// WITH_MEMBER_DECLS_INIT-DAG: Decl[Struct]/CurrModule: FooStruct[#FooStruct#]{{; name=.+$}}
-// WITH_MEMBER_DECLS_INIT-DAG: Decl[FreeFunction]/CurrModule: returnsInt()[#Int#]{{; name=.+$}}
+// WITH_MEMBER_DECLS_INIT-DAG: Decl[FreeFunction]/CurrModule/TypeRelation[Identical]: returnsInt()[#Int#]{{; name=.+$}}
// WITH_MEMBER_DECLS_INIT-DAG: Decl[InstanceMethod]/CurrNominal: instanceFunc({#self: MemberAccessors#})[#(Int) -> Float#]{{; name=.+$}}
// WITH_MEMBER_DECLS_INIT: End completions
diff --git a/test/IDE/complete_unresolved_members.swift b/test/IDE/complete_unresolved_members.swift
index fc2de9f..2cb6a1b 100644
--- a/test/IDE/complete_unresolved_members.swift
+++ b/test/IDE/complete_unresolved_members.swift
@@ -78,6 +78,8 @@
// RUN: %target-swift-ide-test -code-completion -source-filename %s -code-completion-token=OVERLOADED_INIT_1 | %FileCheck %s -check-prefix=OVERLOADED_METHOD_1
// RUN: %target-swift-ide-test -code-completion -source-filename %s -code-completion-token=OVERLOADED_INIT_2 | %FileCheck %s -check-prefix=OVERLOADED_METHOD_1
+// RUN: %target-swift-ide-test -code-completion -source-filename %s -code-completion-token=DECL_MEMBER_INIT_1 | %FileCheck %s -check-prefix=UNRESOLVED_3
+
enum SomeEnum1 {
case South
case North
@@ -535,3 +537,7 @@
let _ = HasOverloaded(e: .#^OVERLOADED_INIT_2^#)
// Same as OVERLOADED_METHOD_1.
}
+
+struct TestingStruct {
+ var value: SomeEnum1 = .#^DECL_MEMBER_INIT_1^#
+}
diff --git a/test/IRGen/class_resilience.swift b/test/IRGen/class_resilience.swift
index b2ad2fe..3124031 100644
--- a/test/IRGen/class_resilience.swift
+++ b/test/IRGen/class_resilience.swift
@@ -16,6 +16,9 @@
// CHECK: @"$s16class_resilience21ResilientGenericChildCMo" = {{(protected )?}}{{(dllexport )?}}global [[BOUNDS:{ (i32|i64), i32, i32 }]] zeroinitializer
+// CHECK: @"$s16class_resilience27ClassWithEmptyThenResilientC9resilient0H7_struct0G3IntVvpWvd" = hidden global [[INT]] 0,
+// CHECK: @"$s16class_resilience27ClassWithResilientThenEmptyC9resilient0H7_struct0E3IntVvpWvd" = hidden global [[INT]] 0,
+
// CHECK: @"$s16class_resilience26ClassWithResilientPropertyCMo" = {{(protected )?}}{{(dllexport )?}}constant [[BOUNDS]]
// CHECK-SAME-32: { [[INT]] 52, i32 2, i32 13 }
// CHECK-SAME-64: { [[INT]] 80, i32 2, i32 10 }
@@ -113,6 +116,9 @@
// CHECK-SAME-32: { [[INT]] 64, i32 2, i32 16 }
// CHECK-SAME-64: { [[INT]] 104, i32 2, i32 13 }
+// CHECK: @"$s16class_resilience27ClassWithEmptyThenResilientC5emptyAA0E0VvpWvd" = hidden constant [[INT]] 0,
+// CHECK: @"$s16class_resilience27ClassWithResilientThenEmptyC5emptyAA0G0VvpWvd" = hidden constant [[INT]] 0,
+
// CHECK: @"$s16class_resilience14ResilientChildC5fields5Int32VvgTq" = {{(protected )?}}{{(dllexport )?}}alias %swift.method_descriptor, getelementptr inbounds
// CHECK: @"$s16class_resilience14ResilientChildC5fields5Int32VvsTq" = {{(protected )?}}{{(dllexport )?}}alias %swift.method_descriptor, getelementptr inbounds
// CHECK: @"$s16class_resilience14ResilientChildC5fields5Int32VvMTq" = {{(protected )?}}{{(dllexport )?}}alias %swift.method_descriptor, getelementptr inbounds
@@ -244,6 +250,34 @@
}
}
+// rdar://48031465
+// Field offsets for empty fields in resilient classes should be initialized
+// to their best-known value and made non-constant if that value might
+// disagree with the dynamic value.
+
+@_fixed_layout
+public struct Empty {}
+
+public class ClassWithEmptyThenResilient {
+ public let empty: Empty
+ public let resilient: ResilientInt
+
+ public init(empty: Empty, resilient: ResilientInt) {
+ self.empty = empty
+ self.resilient = resilient
+ }
+}
+
+public class ClassWithResilientThenEmpty {
+ public let resilient: ResilientInt
+ public let empty: Empty
+
+ public init(empty: Empty, resilient: ResilientInt) {
+ self.empty = empty
+ self.resilient = resilient
+ }
+}
+
// ClassWithResilientProperty.color getter
// CHECK-LABEL: define{{( dllexport)?}}{{( protected)?}} swiftcc i32 @"$s16class_resilience26ClassWithResilientPropertyC5colors5Int32Vvg"(%T16class_resilience26ClassWithResilientPropertyC* swiftself)
diff --git a/test/IRGen/class_resilience_objc.swift b/test/IRGen/class_resilience_objc.swift
index 236ecbc..96d2188 100644
--- a/test/IRGen/class_resilience_objc.swift
+++ b/test/IRGen/class_resilience_objc.swift
@@ -1,10 +1,17 @@
-// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) -enable-objc-interop -emit-ir -o - -primary-file %s | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize
+// RUN: %empty-directory(%t)
+// RUN: %target-swift-frontend -emit-module -enable-resilience -enable-class-resilience -emit-module-path=%t/resilient_struct.swiftmodule -module-name=resilient_struct %S/../Inputs/resilient_struct.swift
+// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) -I %t -enable-resilience -enable-class-resilience -enable-objc-interop -emit-ir -o - -primary-file %s | %FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-%target-ptrsize -DINT=i%target-ptrsize
// XFAIL: CPU=armv7k
-// CHECK: %swift.type = type { [[INT:i32|i64]] }
-
import Foundation
+import resilient_struct
+
+// Note that these are all mutable to allow for the runtime to slide them.
+// CHECK: @"$s21class_resilience_objc27ClassWithEmptyThenResilientC9resilient0I7_struct0H3IntVvpWvd" = hidden global [[INT]] 0,
+// CHECK: @"$s21class_resilience_objc27ClassWithResilientThenEmptyC9resilient0I7_struct0F3IntVvpWvd" = hidden global [[INT]] 0,
+// CHECK: @"$s21class_resilience_objc27ClassWithEmptyThenResilientC5emptyAA0F0VvpWvd" = hidden global [[INT]] 0,
+// CHECK: @"$s21class_resilience_objc27ClassWithResilientThenEmptyC5emptyAA0H0VvpWvd" = hidden global [[INT]] 0,
public class FixedLayoutObjCSubclass : NSObject {
// This field could use constant direct access because NSObject has
@@ -89,3 +96,26 @@
// because the field offset vector only contains Swift field offsets.
o.field = 10
}
+
+@_fixed_layout
+public struct Empty {}
+
+public class ClassWithEmptyThenResilient : DummyClass {
+ public let empty: Empty
+ public let resilient: ResilientInt
+
+ public init(empty: Empty, resilient: ResilientInt) {
+ self.empty = empty
+ self.resilient = resilient
+ }
+}
+
+public class ClassWithResilientThenEmpty : DummyClass {
+ public let resilient: ResilientInt
+ public let empty: Empty
+
+ public init(empty: Empty, resilient: ResilientInt) {
+ self.empty = empty
+ self.resilient = resilient
+ }
+}
diff --git a/test/IRGen/conformance_resilience.swift b/test/IRGen/conformance_resilience.swift
new file mode 100644
index 0000000..1d29073
--- /dev/null
+++ b/test/IRGen/conformance_resilience.swift
@@ -0,0 +1,28 @@
+// RUN: %empty-directory(%t)
+// RUN: %target-swift-frontend -emit-module -enable-resilience -emit-module-path=%t/resilient_protocol.swiftmodule -module-name=resilient_protocol %S/../Inputs/resilient_protocol.swift
+// RUN: %target-swift-frontend -I %t -emit-ir -enable-resilience %s | %FileCheck %s -DINT=i%target-ptrsize
+// RUN: %target-swift-frontend -I %t -emit-ir -enable-resilience -O %s
+
+import resilient_protocol
+
+// CHECK-LABEL: define{{( protected)?}} swiftcc void @"$s22conformance_resilience14useConformanceyyx18resilient_protocol22OtherResilientProtocolRzlF"(%swift.opaque* noalias nocapture, %swift.type* %T, i8** %T.OtherResilientProtocol)
+public func useConformance<T : OtherResilientProtocol>(_: T) {}
+
+// CHECK-LABEL: define{{( protected)?}} swiftcc void @"$s22conformance_resilience14getConformanceyy18resilient_protocol7WrapperVyxGlF"(%swift.opaque* noalias nocapture, %swift.type* %T)
+public func getConformance<T>(_ w: Wrapper<T>) {
+ // CHECK: [[RESPONSE:%.*]] = call swiftcc %swift.metadata_response @"$s18resilient_protocol7WrapperVMa"([[INT]] 0, %swift.type* %T)
+ // CHECK: [[META:%.*]] = extractvalue %swift.metadata_response [[RESPONSE]], 0
+ // CHECK: [[WTABLE:%.*]] = call i8** @swift_getWitnessTable(%swift.protocol_conformance_descriptor* @"$s18resilient_protocol7WrapperVyxGAA22OtherResilientProtocolAAMc", %swift.type* [[META]], i8*** undef)
+ // CHECK: call swiftcc void @"$s22conformance_resilience14useConformanceyyx18resilient_protocol22OtherResilientProtocolRzlF"(%swift.opaque* noalias nocapture %0, %swift.type* [[META]], i8** [[WTABLE]])
+ // CHECK: ret void
+ useConformance(w)
+}
+
+// CHECK-LABEL: define{{( protected)?}} swiftcc void @"$s22conformance_resilience14getConformanceyy18resilient_protocol15ConcreteWrapperVF"(%swift.opaque* noalias nocapture)
+public func getConformance(_ w: ConcreteWrapper) {
+ // CHECK: [[RESPONSE:%.*]] = call swiftcc %swift.metadata_response @"$s18resilient_protocol15ConcreteWrapperVMa"([[INT]] 0)
+ // CHECK: [[META:%.*]] = extractvalue %swift.metadata_response [[RESPONSE]], 0
+ // CHECK: call swiftcc void @"$s22conformance_resilience14useConformanceyyx18resilient_protocol22OtherResilientProtocolRzlF"(%swift.opaque* noalias nocapture %0, %swift.type* [[META]], i8** @"$s18resilient_protocol15ConcreteWrapperVAA22OtherResilientProtocolAAWP")
+ // CHECK: ret void
+ useConformance(w)
+}
\ No newline at end of file
diff --git a/test/Inputs/resilient_protocol.swift b/test/Inputs/resilient_protocol.swift
index 9a93674..f6dfe51 100644
--- a/test/Inputs/resilient_protocol.swift
+++ b/test/Inputs/resilient_protocol.swift
@@ -30,6 +30,8 @@
public struct Wrapper<T>: OtherResilientProtocol { }
+public struct ConcreteWrapper: OtherResilientProtocol { }
+
public protocol ProtocolWithAssocTypeDefaults {
associatedtype T1 = Self
associatedtype T2: OtherResilientProtocol = Wrapper<T1>
diff --git a/test/Interpreter/class_resilience.swift b/test/Interpreter/class_resilience.swift
index 7cdba43..06c44b2 100644
--- a/test/Interpreter/class_resilience.swift
+++ b/test/Interpreter/class_resilience.swift
@@ -94,7 +94,6 @@
expectEqual(1, c.laziestNumber)
}
-
// Generic class with resilient stored property
public class GenericClassWithResilientProperty<T> {
@@ -289,5 +288,40 @@
== ChildOfOutsideParentWithResilientStoredProperty.self)
}
+@_fixed_layout
+public struct Empty {}
+
+// rdar://48031465
+public class ClassWithEmptyThenResilient {
+ public let empty: Empty
+ public let resilient: ResilientInt
+
+ public init(empty: Empty, resilient: ResilientInt) {
+ self.empty = empty
+ self.resilient = resilient
+ }
+}
+
+ResilientClassTestSuite.test("EmptyThenResilient") {
+ let c = ClassWithEmptyThenResilient(empty: Empty(),
+ resilient: ResilientInt(i: 17))
+ expectEqual(c.resilient.i, 17)
+}
+
+public class ClassWithResilientThenEmpty {
+ public let resilient: ResilientInt
+ public let empty: Empty
+
+ public init(empty: Empty, resilient: ResilientInt) {
+ self.empty = empty
+ self.resilient = resilient
+ }
+}
+
+ResilientClassTestSuite.test("ResilientThenEmpty") {
+ let c = ClassWithResilientThenEmpty(empty: Empty(),
+ resilient: ResilientInt(i: 17))
+ expectEqual(c.resilient.i, 17)
+}
runAllTests()
diff --git a/test/Interpreter/objc_class_resilience.swift b/test/Interpreter/objc_class_resilience.swift
index f652567..df3d6b9 100644
--- a/test/Interpreter/objc_class_resilience.swift
+++ b/test/Interpreter/objc_class_resilience.swift
@@ -1,9 +1,10 @@
// RUN: %empty-directory(%t)
+// RUN: %target-clang -fobjc-arc %S/Inputs/ObjCClasses/ObjCClasses.m -c -o %t/ObjCClasses.o
// RUN: %target-build-swift-dylib(%t/libresilient_struct.%target-dylib-extension) -Xfrontend -enable-resilience -Xfrontend -enable-class-resilience %S/../Inputs/resilient_struct.swift -emit-module -emit-module-path %t/resilient_struct.swiftmodule -module-name resilient_struct
// RUN: %target-codesign %t/libresilient_struct.%target-dylib-extension
-// RUN: %target-build-swift %s -L %t -I %t -lresilient_struct -o %t/main -Xlinker -rpath -Xlinker %t
+// RUN: %target-build-swift %s -L %t -I %t -lresilient_struct -I %S/Inputs/ObjCClasses/ -Xlinker %t/ObjCClasses.o -o %t/main -Xlinker -rpath -Xlinker %t
// RUN: %target-codesign %t/main
// RUN: %target-run %t/main %t/libresilient_struct.%target-dylib-extension
@@ -14,6 +15,7 @@
import StdlibUnittest
import Foundation
import resilient_struct
+import ObjCClasses
var ResilientClassTestSuite = TestSuite("ResilientClass")
@@ -49,4 +51,55 @@
_blackHole(ResilientSubclass())
}
+// rdar://48031465 - Make sure we handle sliding empty ivars properly.
+struct Empty {}
+
+class ClassWithEmptyThenResilient : HasHiddenIvars {
+ let empty: Empty
+ let resilient: ResilientInt
+
+ init(empty: Empty, resilient: ResilientInt) {
+ self.empty = empty
+ self.resilient = resilient
+ }
+}
+
+ResilientClassTestSuite.test("EmptyThenResilient") {
+ let c = ClassWithEmptyThenResilient(empty: Empty(),
+ resilient: ResilientInt(i: 17))
+ c.x = 100
+ c.y = 2000
+ c.z = 30000
+ c.t = 400000
+ expectEqual(c.resilient.i, 17)
+ expectEqual(c.x, 100)
+ expectEqual(c.y, 2000)
+ expectEqual(c.z, 30000)
+ expectEqual(c.t, 400000)
+}
+
+class ClassWithResilientThenEmpty : HasHiddenIvars {
+ let resilient: ResilientInt
+ let empty: Empty
+
+ init(empty: Empty, resilient: ResilientInt) {
+ self.empty = empty
+ self.resilient = resilient
+ }
+}
+
+ResilientClassTestSuite.test("ResilientThenEmpty") {
+ let c = ClassWithResilientThenEmpty(empty: Empty(),
+ resilient: ResilientInt(i: 17))
+ c.x = 100
+ c.y = 2000
+ c.z = 30000
+ c.t = 400000
+ expectEqual(c.resilient.i, 17)
+ expectEqual(c.x, 100)
+ expectEqual(c.y, 2000)
+ expectEqual(c.z, 30000)
+ expectEqual(c.t, 400000)
+}
+
runAllTests()
diff --git a/test/Profiler/coverage_subscript_autoclosure.swift b/test/Profiler/coverage_subscript_autoclosure.swift
new file mode 100644
index 0000000..0e521cc
--- /dev/null
+++ b/test/Profiler/coverage_subscript_autoclosure.swift
@@ -0,0 +1,15 @@
+// RUN: %target-swift-frontend -Xllvm -sil-full-demangle -enable-testing -profile-generate -profile-coverage-mapping -emit-sil -module-name coverage_subscript_autoclosure %s | %FileCheck %s
+
+struct S {
+ subscript(i: Int, autoclosure: @autoclosure () -> Int) -> Int {
+ // CHECK-LABEL: sil_coverage_map {{.*}}S.subscript.getter
+ get { // CHECK-NEXT: [[@LINE]]:9 -> [[@LINE+2]]:6 : 0
+ return 0
+ }
+
+ // CHECK-LABEL: sil_coverage_map {{.*}}S.subscript.setter
+ set { // CHECK-NEXT: [[@LINE]]:9 -> [[@LINE+2]]:6 : 0
+
+ }
+ }
+}
diff --git a/test/SILOptimizer/access_enforcement_opts.sil b/test/SILOptimizer/access_enforcement_opts.sil
index b8f2fc0..9849947 100644
--- a/test/SILOptimizer/access_enforcement_opts.sil
+++ b/test/SILOptimizer/access_enforcement_opts.sil
@@ -506,14 +506,19 @@
// }
// Preserve the scope of the outer inout access. Runtime trap expected.
//
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
// CHECK-LABEL: sil @$s17enforce_with_opts24testInoutWriteEscapeReadyyF : $@convention(thin) () -> () {
// CHECK: [[BOX:%.*]] = alloc_box ${ var Int64 }, var, name "x"
// CHECK: [[BOXADR:%.*]] = project_box [[BOX]] : ${ var Int64 }, 0
// CHECK: [[BEGIN:%.*]] = begin_access [modify] [dynamic] [[BOXADR]] : $*Int64
// CHECK: apply
-// CHECK-NOT: begin_access
// CHECK: end_access [[BEGIN]]
-// CHECK-NOT: begin_access
+// CHECK: [[BEGIN2:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[BOXADR]] : $*Int64
+// CHECK: load [[BEGIN2]]
+// CHECK: end_access [[BEGIN2]]
// CHECK-LABEL: } // end sil function '$s17enforce_with_opts24testInoutWriteEscapeReadyyF'
sil @$s17enforce_with_opts24testInoutWriteEscapeReadyyF : $@convention(thin) () -> () {
bb0:
@@ -579,14 +584,19 @@
// }
// Preserve the scope of the outer inout access. Runtime trap expected.
//
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
// CHECK-LABEL: sil @$s17enforce_with_opts020testInoutWriteEscapeF0yyF : $@convention(thin) () -> () {
// CHECK: [[BOX:%.*]] = alloc_box ${ var Int64 }, var, name "x"
// CHECK: [[BOXADR:%.*]] = project_box [[BOX]] : ${ var Int64 }, 0
// CHECK: [[BEGIN:%.*]] = begin_access [modify] [dynamic] [[BOXADR]] : $*Int64
// CHECK: apply
-// CHECK-NOT: begin_access
// CHECK: end_access [[BEGIN]]
-// CHECK-NOT: begin_access
+// CHECK: [[BEGIN2:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[BOXADR]] : $*Int64
+// CHECK: load [[BEGIN2]]
+// CHECK: end_access [[BEGIN2]]
// CHECK-LABEL: } // end sil function '$s17enforce_with_opts020testInoutWriteEscapeF0yyF'
sil @$s17enforce_with_opts020testInoutWriteEscapeF0yyF : $@convention(thin) () -> () {
bb0:
@@ -999,14 +1009,21 @@
// public func testOldToNewMapWrite) {
// Checks merging of 3 scopes resulting in a larger modify scope
//
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
// CHECK-LABEL: sil @testOldToNewMapWrite : $@convention(thin) () -> () {
// CHECK: [[GLOBAL:%.*]] = global_addr @globalX : $*X
-// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [modify] [dynamic] [[GLOBAL]] : $*X
-// CHECK-NEXT: load [[BEGIN]] : $*X
-// CHECK: store {{.*}} to [[BEGIN]] : $*X
+// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
// CHECK-NEXT: load [[BEGIN]] : $*X
// CHECK-NEXT: end_access [[BEGIN]] : $*X
-// CHECK-NOT: begin_access
+// CHECK: [[BEGIN2:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
+// CHECK-NEXT: store {{.*}} to [[BEGIN2]] : $*X
+// CHECK-NEXT: end_access [[BEGIN2]] : $*X
+// CHECK: [[BEGIN3:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
+// CHECK-NEXT: load [[BEGIN3]] : $*X
+// CHECK-NEXT: end_access [[BEGIN3]] : $*X
// CHECK-LABEL: } // end sil function 'testOldToNewMapWrite'
sil @testOldToNewMapWrite : $@convention(thin) () -> () {
bb0:
@@ -1031,15 +1048,20 @@
// public func testDataFlowAcrossBBs() {
// Checks merging of scopes across basic blocks - propagating that information
//
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
// CHECK-LABEL: sil @testDataFlowAcrossBBs : $@convention(thin) () -> () {
// CHECK: [[GLOBAL:%.*]] = global_addr @globalX : $*X
-// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [modify] [dynamic] [[GLOBAL]] : $*X
+// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
// CHECK-NEXT: load [[BEGIN]] : $*X
+// CHECK-NEXT: end_access [[BEGIN]] : $*X
// CHECK-NEXT: br bb1
// CHECK: br bb2
-// CHECK: load [[BEGIN]] : $*X
-// CHECK-NEXT: end_access [[BEGIN]] : $*X
-// CHECK-NOT: begin_access
+// CHECK: [[BEGIN2:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
+// CHECK-NEXT: load [[BEGIN2]] : $*X
+// CHECK-NEXT: end_access [[BEGIN2]] : $*X
// CHECK-LABEL: } // end sil function 'testDataFlowAcrossBBs'
sil @testDataFlowAcrossBBs : $@convention(thin) () -> () {
bb0:
@@ -1063,15 +1085,20 @@
// public func testDataFlowAcrossInnerLoop() {
// Checks merging of scopes across an inner loop
//
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
// CHECK-LABEL: sil @testDataFlowAcrossInnerLoop : $@convention(thin) () -> () {
// CHECK: [[GLOBAL:%.*]] = global_addr @globalX : $*X
-// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [modify] [dynamic] [[GLOBAL]] : $*X
+// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
// CHECK-NEXT: load [[BEGIN]] : $*X
+// CHECK-NEXT: end_access [[BEGIN]] : $*X
// CHECK-NEXT: br bb1
// CHECK: cond_br {{.*}}, bb1, bb2
-// CHECK: load [[BEGIN]] : $*X
-// CHECK-NEXT: end_access [[BEGIN]] : $*X
-// CHECK-NOT: begin_access
+// CHECK: [[BEGIN2:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
+// CHECK-NEXT: load [[BEGIN2]] : $*X
+// CHECK-NEXT: end_access [[BEGIN2]] : $*X
// CHECK-LABEL: } // end sil function 'testDataFlowAcrossInnerLoop'
sil @testDataFlowAcrossInnerLoop : $@convention(thin) () -> () {
bb0:
@@ -1248,13 +1275,19 @@
// Checks detection of irreducible control flow / bail + parts that we *can* merge
// See disableCrossBlock in the algorithm: this detects this corner case
//
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
// CHECK-LABEL: sil @testIrreducibleGraph2 : $@convention(thin) () -> () {
// CHECK: [[GLOBAL:%.*]] = global_addr @globalX : $*X
-// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [modify] [dynamic] [[GLOBAL]] : $*X
+// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
// CHECK-NEXT: load [[BEGIN]] : $*X
-// CHECK-NEXT: br bb1
-// CHECK: load [[BEGIN]] : $*X
// CHECK-NEXT: end_access [[BEGIN]] : $*X
+// CHECK-NEXT: br bb1
+// CHECK: [[BEGIN1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
+// CHECK-NEXT: load [[BEGIN1]] : $*X
+// CHECK-NEXT: end_access [[BEGIN1]] : $*X
// CHECK: cond_br {{.*}}, bb2, bb3
// CHECK: [[BEGIN2:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
// CHECK-NEXT: load [[BEGIN2]] : $*X
@@ -1390,13 +1423,19 @@
// During the merge optimization,
// Check that we don't merge cross strongly component boundaries for now
//
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
// CHECK-LABEL: sil @testStronglyConnectedComponent : $@convention(thin) () -> () {
// CHECK: [[GLOBAL:%.*]] = global_addr @globalX : $*X
-// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [modify] [dynamic] [[GLOBAL]] : $*X
+// CHECK-NEXT: [[BEGIN:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
// CHECK-NEXT: load [[BEGIN]] : $*X
-// CHECK-NEXT: br bb1
-// CHECK: load [[BEGIN]] : $*X
// CHECK-NEXT: end_access [[BEGIN]] : $*X
+// CHECK-NEXT: br bb1
+// CHECK: [[BEGIN1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
+// CHECK-NEXT: load [[BEGIN1]] : $*X
+// CHECK-NEXT: end_access [[BEGIN1]] : $*X
// CHECK: cond_br {{.*}}, bb2, bb3
// CHECK: [[BEGIN2:%.*]] = begin_access [read] [dynamic] [no_nested_conflict] [[GLOBAL]] : $*X
// CHECK-NEXT: load [[BEGIN2]] : $*X
@@ -1555,3 +1594,59 @@
return %10 : $()
}
+// --- rdar://48239213: Fatal access conflict detected.
+//
+// The read/modify pair of accesses in testReadModifyConflictPair
+// cannot be merged without introducing a false conflict.
+
+public class TestClass {
+ @_hasStorage @_hasInitialValue var flags: Int64 { get set }
+}
+
+// CHECK-LABEL: sil hidden [noinline] @readFlags : $@convention(method) (Int64, @guaranteed TestClass) -> Bool {
+// CHECK: bb0(%0 : $Int64, %1 : $TestClass):
+// CHECK: [[ADR:%.*]] = ref_element_addr %1 : $TestClass, #TestClass.flags
+// CHECK: begin_access [read] [dynamic] [no_nested_conflict] [[ADR]] : $*Int64
+// CHECK: load %4 : $*Builtin.Int64
+// CHECK: end_access
+// CHECK-LABEL: } // end sil function 'readFlags'
+sil hidden [noinline] @readFlags : $@convention(method) (Int64, @guaranteed TestClass) -> Bool {
+bb0(%0 : $Int64, %1 : $TestClass):
+ %2 = ref_element_addr %1 : $TestClass, #TestClass.flags
+ %3 = begin_access [read] [dynamic] [no_nested_conflict] %2 : $*Int64
+ %4 = struct_element_addr %3 : $*Int64, #Int64._value
+ %5 = load %4 : $*Builtin.Int64
+ end_access %3 : $*Int64
+ %7 = struct_extract %0 : $Int64, #Int64._value
+ %8 = builtin "cmp_eq_Int64"(%5 : $Builtin.Int64, %7 : $Builtin.Int64) : $Builtin.Int1
+ %9 = struct $Bool (%8 : $Builtin.Int1)
+ return %9 : $Bool
+}
+
+// CHECK-LABEL: sil @testReadModifyConflictPair : $@convention(method) (@guaranteed TestClass) -> () {
+// CHECK: bb0(%0 : $TestClass):
+// CHECK: [[ADR:%.*]] = ref_element_addr %0 : $TestClass, #TestClass.flags
+// CHECK: begin_access [read] [dynamic] [no_nested_conflict] [[ADR]] : $*Int64
+// CHECK: load
+// CHECK: end_access
+// CHECK: apply {{.*}} : $@convention(method) (Int64, @guaranteed TestClass) -> Bool
+// CHECK: begin_access [modify] [dynamic] [no_nested_conflict] [[ADR]] : $*Int64
+// CHECK: store
+// CHECK: end_access
+// CHECK-LABEL: } // end sil function 'testReadModifyConflictPair'
+sil @testReadModifyConflictPair : $@convention(method) (@guaranteed TestClass) -> () {
+bb0(%0 : $TestClass):
+ %1 = ref_element_addr %0 : $TestClass, #TestClass.flags
+ %2 = begin_access [read] [dynamic] %1 : $*Int64
+ %3 = load %2 : $*Int64
+ end_access %2 : $*Int64
+ %5 = function_ref @readFlags : $@convention(method) (Int64, @guaranteed TestClass) -> Bool
+ %6 = apply %5(%3, %0) : $@convention(method) (Int64, @guaranteed TestClass) -> Bool
+ %7 = integer_literal $Builtin.Int64, 3
+ %8 = struct $Int64 (%7 : $Builtin.Int64)
+ %9 = begin_access [modify] [dynamic] %1 : $*Int64
+ store %8 to %9 : $*Int64
+ end_access %9 : $*Int64
+ %12 = tuple ()
+ return %12 : $()
+}
diff --git a/test/SILOptimizer/merge_exclusivity.swift b/test/SILOptimizer/merge_exclusivity.swift
index 5aa674d..9e995df 100644
--- a/test/SILOptimizer/merge_exclusivity.swift
+++ b/test/SILOptimizer/merge_exclusivity.swift
@@ -9,26 +9,36 @@
return x &+ y
}
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
// TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest1yySiF : $@convention(thin)
// TESTSIL: bb0
// TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
// TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
// TESTSIL: end_access [[B1]]
// TESTSIL: bb5
-// TESTSIL: [[B2:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B2]]
-// TESTSIL: store {{.*}} to [[B2]]
-// TESTSIL: end_access [[B2]]
+// TESTSIL: [[B2a:%.*]] = begin_access [read] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL-NEXT: load [[B2a]]
+// TESTSIL: end_access [[B2a]]
+// TESTSIL: [[B2b:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL: store {{.*}} to [[B2b]]
+// TESTSIL: end_access [[B2b]]
// TESTSIL: bb6
-// TESTSIL: [[B3:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B3]]
-// TESTSIL: store {{.*}} to [[B3]]
-// TESTSIL: end_access [[B3]]
+// TESTSIL: [[B3a:%.*]] = begin_access [read] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL-NEXT: load [[B3a]]
+// TESTSIL: end_access [[B3a]]
+// TESTSIL: [[B3b:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL: store {{.*}} to [[B3b]]
+// TESTSIL: end_access [[B3b]]
// TESTSIL: bb7
-// TESTSIL: [[B4:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B4]]
-// TESTSIL: store {{.*}} to [[B4]]
-// TESTSIL: end_access [[B4]]
+// TESTSIL: [[B4a:%.*]] = begin_access [read] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL-NEXT: load [[B4a]]
+// TESTSIL: end_access [[B4a]]
+// TESTSIL: [[B4b:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL: store {{.*}} to [[B4b]]
+// TESTSIL: end_access [[B4b]]
// TESTSIL-NOT: begin_access
// TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest1yySiF'
@inline(never)
@@ -48,21 +58,29 @@
}
}
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
// TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest2yySiF : $@convention(thin)
// TESTSIL: bb0
// TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
// TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
// TESTSIL: end_access [[B1]]
// TESTSIL: bb6
-// TESTSIL: [[B2:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B2]]
-// TESTSIL: store {{.*}} to [[B2]]
-// TESTSIL: end_access [[B2]]
+// TESTSIL: [[B2a:%.*]] = begin_access [read] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL-NEXT: load [[B2a]]
+// TESTSIL: end_access [[B2a]]
+// TESTSIL: [[B2b:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL: store {{.*}} to [[B2b]]
+// TESTSIL: end_access [[B2b]]
// TESTSIL: bb7
-// TESTSIL: [[B3:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B3]]
-// TESTSIL: store {{.*}} to [[B3]]
-// TESTSIL: end_access [[B3]]
+// TESTSIL: [[B3a:%.*]] = begin_access [read] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL-NEXT: load [[B3a]]
+// TESTSIL: end_access [[B3a]]
+// TESTSIL: [[B3b:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// TESTSIL: store {{.*}} to [[B3b]]
+// TESTSIL: end_access [[B3b]]
// TESTSIL-NOT: begin_access
// TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest2yySiF'
@inline(never)
@@ -81,13 +99,17 @@
}
}
-// TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest3yySiF : $@convention(thin)
-// TESTSIL: bb0
-// TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
-// TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL: end_access [[B1]]
-// TESTSIL-NOT: begin_access
-// TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest3yySiF'
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
+// FIXME_TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest3yySiF : $@convention(thin)
+// FIXME_TESTSIL: bb0
+// FIXME_TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
+// FIXME_TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL: end_access [[B1]]
+// FIXME_TESTSIL-NOT: begin_access
+// FIXME_TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest3yySiF'
@inline(never)
public func MergeTest3(_ N: Int) {
let range = 0..<10000
@@ -99,23 +121,27 @@
}
}
-// TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest4yySiF : $@convention(thin)
-// TESTSIL: bb0
-// TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
-// TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL: end_access [[B1]]
-// TESTSIL: bb7
-// TESTSIL: [[B2:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B2]]
-// TESTSIL: store {{.*}} to [[B2]]
-// TESTSIL: end_access [[B2]]
-// TESTSIL: bb8
-// TESTSIL: [[B3:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B3]]
-// TESTSIL: store {{.*}} to [[B3]]
-// TESTSIL: end_access [[B3]]
-// TESTSIL-NOT: begin_access
-// TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest4yySiF'
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
+// FIXME_TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest4yySiF : $@convention(thin)
+// FIXME_TESTSIL: bb0
+// FIXME_TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
+// FIXME_TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL: end_access [[B1]]
+// FIXME_TESTSIL: bb7
+// FIXME_TESTSIL: [[B2:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL-NEXT: load [[B2]]
+// FIXME_TESTSIL: store {{.*}} to [[B2]]
+// FIXME_TESTSIL: end_access [[B2]]
+// FIXME_TESTSIL: bb8
+// FIXME_TESTSIL: [[B3:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL-NEXT: load [[B3]]
+// FIXME_TESTSIL: store {{.*}} to [[B3]]
+// FIXME_TESTSIL: end_access [[B3]]
+// FIXME_TESTSIL-NOT: begin_access
+// FIXME_TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest4yySiF'
@inline(never)
public func MergeTest4(_ N: Int) {
let range = 0..<10000
@@ -130,28 +156,32 @@
}
}
-// TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest5yySiF : $@convention(thin)
-// TESTSIL: bb0
-// TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
-// TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL: end_access [[B1]]
-// TESTSIL: bb6
-// TESTSIL: [[B2:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B2]]
-// TESTSIL: store {{.*}} to [[B2]]
-// TESTSIL: end_access [[B2]]
-// TESTSIL: bb7
-// TESTSIL: [[B3:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B3]]
-// TESTSIL: store {{.*}} to [[B3]]
-// TESTSIL: end_access [[B3]]
-// TESTSIL: bb8
-// TESTSIL: [[B4:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL-NEXT: load [[B4]]
-// TESTSIL: store {{.*}} to [[B4]]
-// TESTSIL: end_access [[B4]]
-// TESTSIL-NOT: begin_access
-// TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest5yySiF'
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
+// FIXME_TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest5yySiF : $@convention(thin)
+// FIXME_TESTSIL: bb0
+// FIXME_TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
+// FIXME_TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL: end_access [[B1]]
+// FIXME_TESTSIL: bb6
+// FIXME_TESTSIL: [[B2:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL-NEXT: load [[B2]]
+// FIXME_TESTSIL: store {{.*}} to [[B2]]
+// FIXME_TESTSIL: end_access [[B2]]
+// FIXME_TESTSIL: bb7
+// FIXME_TESTSIL: [[B3:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL-NEXT: load [[B3]]
+// FIXME_TESTSIL: store {{.*}} to [[B3]]
+// FIXME_TESTSIL: end_access [[B3]]
+// FIXME_TESTSIL: bb8
+// FIXME_TESTSIL: [[B4:%.*]] = begin_access [modify] [static] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL-NEXT: load [[B4]]
+// FIXME_TESTSIL: store {{.*}} to [[B4]]
+// FIXME_TESTSIL: end_access [[B4]]
+// FIXME_TESTSIL-NOT: begin_access
+// FIXME_TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest5yySiF'
@inline(never)
public func MergeTest5(_ N: Int) {
let range = 0..<10000
@@ -169,13 +199,17 @@
}
}
-// TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest6yySiF : $@convention(thin)
-// TESTSIL: bb0
-// TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
-// TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL: end_access [[B1]]
-// TESTSIL-NOT: begin_access
-// TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest6yySiF'
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
+// FIXME_TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest6yySiF : $@convention(thin)
+// FIXME_TESTSIL: bb0
+// FIXME_TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
+// FIXME_TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL: end_access [[B1]]
+// FIXME_TESTSIL-NOT: begin_access
+// FIXME_TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest6yySiF'
@inline(never)
public func MergeTest6(_ N: Int) {
let range = 0..<10000
@@ -195,13 +229,17 @@
public func foo() {
}
-// TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest7yySiF : $@convention(thin)
-// TESTSIL: bb0
-// TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
-// TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL: end_access [[B1]]
-// TESTSIL-NOT: begin_access
-// TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest7yySiF'
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
+// FIXME_TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest7yySiF : $@convention(thin)
+// FIXME_TESTSIL: bb0
+// FIXME_TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
+// FIXME_TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL: end_access [[B1]]
+// FIXME_TESTSIL-NOT: begin_access
+// FIXME_TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest7yySiF'
@inline(never)
public func MergeTest7(_ N: Int) {
let range = 0..<10000
@@ -217,13 +255,17 @@
}
}
-// TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest8yySiF : $@convention(thin)
-// TESTSIL: bb0
-// TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
-// TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
-// TESTSIL: end_access [[B1]]
-// TESTSIL-NOT: begin_access
-// TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest8yySiF'
+// FIXME: The optimization should be able to merge these accesses, but
+// it must first prove that no other conflicting read accesses occur
+// within the existing read access scopes.
+//
+// FIXME_TESTSIL-LABEL: sil [noinline] @$s17merge_exclusivity10MergeTest8yySiF : $@convention(thin)
+// FIXME_TESTSIL: bb0
+// FIXME_TESTSIL: [[GLOBALVAR:%.*]] = global_addr @$s17merge_exclusivity5checks6UInt64Vvp
+// FIXME_TESTSIL: [[B1:%.*]] = begin_access [modify] [dynamic] [no_nested_conflict] [[GLOBALVAR]]
+// FIXME_TESTSIL: end_access [[B1]]
+// FIXME_TESTSIL-NOT: begin_access
+// FIXME_TESTSIL-LABEL: } // end sil function '$s17merge_exclusivity10MergeTest8yySiF'
@inline(never)
public func MergeTest8(_ N: Int) {
let range = 0..<10000
diff --git a/utils/build-presets.ini b/utils/build-presets.ini
index bffca2a..a169975 100644
--- a/utils/build-presets.ini
+++ b/utils/build-presets.ini
@@ -1778,3 +1778,14 @@
swift-include-tests=0
darwin-deployment-version-ios=10.0
darwin-crash-reporter-client=1
+
+#===------------------------------------------------------------------------===#
+# Linux corelibs foundation preset
+#===------------------------------------------------------------------------===#
+[preset: buildbot_linux,foundation=debug]
+mixin-preset=buildbot_linux
+
+debug-foundation
+
+[preset: buildbot_linux,foundation=release]
+mixin-preset=buildbot_linux
diff --git a/validation-test/Evolution/Inputs/conformance_reference.swift b/validation-test/Evolution/Inputs/conformance_reference.swift
new file mode 100644
index 0000000..1c49c87
--- /dev/null
+++ b/validation-test/Evolution/Inputs/conformance_reference.swift
@@ -0,0 +1,36 @@
+
+public func getVersion() -> Int {
+#if BEFORE
+ return 0
+#else
+ return 1
+#endif
+}
+
+
+public protocol BaseProtocol {
+#if AFTER
+ associatedtype Assoc = Self
+#endif
+}
+
+public protocol DerivedProtocol : BaseProtocol {}
+
+
+public struct FirstGeneric<T> : BaseProtocol {
+ public init() {}
+}
+
+public struct SecondGeneric<T> : DerivedProtocol {
+ public init() {}
+}
+
+extension BaseProtocol {
+ public func getMeAType() -> Any.Type {
+#if BEFORE
+ return Self.self
+#else
+ return Assoc.self
+#endif
+ }
+}
\ No newline at end of file
diff --git a/validation-test/Evolution/test_conformance_reference.swift b/validation-test/Evolution/test_conformance_reference.swift
new file mode 100644
index 0000000..1ab2a82
--- /dev/null
+++ b/validation-test/Evolution/test_conformance_reference.swift
@@ -0,0 +1,39 @@
+// RUN: %target-resilience-test --no-symbol-diff
+// REQUIRES: executable_test
+
+import StdlibUnittest
+import conformance_reference
+
+
+var ConformanceReferenceTest = TestSuite("ConformanceReference")
+
+
+func useBase<T : BaseProtocol>(_: T) {}
+
+ConformanceReferenceTest.test("BaseConformance") {
+ useBase(FirstGeneric<Int>())
+ useBase(SecondGeneric<Int>())
+}
+
+
+func useDerived<T : DerivedProtocol>(_: T) {}
+
+ConformanceReferenceTest.test("DerivedConformance") {
+ useDerived(SecondGeneric<Int>())
+}
+
+
+protocol EvenMoreDerivedProtocol : DerivedProtocol {}
+
+extension FirstGeneric : EvenMoreDerivedProtocol {}
+
+func useEvenMoreDerived<T : EvenMoreDerivedProtocol>(_ t: T) -> Any.Type {
+ return t.getMeAType()
+}
+
+ConformanceReferenceTest.test("EvenMoreDerivedConformance") {
+ expectTrue(FirstGeneric<Int>.self == useEvenMoreDerived(FirstGeneric<Int>()))
+ expectTrue(FirstGeneric<String>.self == useEvenMoreDerived(FirstGeneric<String>()))
+}
+
+runAllTests()
\ No newline at end of file