Merge pull request #21352 from compnerd/lit-erally-better-on-windows
lit improvements for Windows expansions
diff --git a/cmake/modules/AddSwift.cmake b/cmake/modules/AddSwift.cmake
index 5238446..f7704e4 100644
--- a/cmake/modules/AddSwift.cmake
+++ b/cmake/modules/AddSwift.cmake
@@ -434,12 +434,14 @@
# we need to add the math library, which is linked implicitly by libc++.
list(APPEND result "-nostdlib++" "-lm")
if("${LFLAGS_ARCH}" MATCHES armv7)
- list(APPEND result "${SWIFT_ANDROID_NDK_PATH}/sources/cxx-stl/llvm-libc++/libs/armeabi-v7a/libc++_shared.so")
+ set(android_libcxx_path "${SWIFT_ANDROID_NDK_PATH}/sources/cxx-stl/llvm-libc++/libs/armeabi-v7a")
elseif("${LFLAGS_ARCH}" MATCHES aarch64)
- list(APPEND result "${SWIFT_ANDROID_NDK_PATH}/sources/cxx-stl/llvm-libc++/libs/arm64-v8a/libc++_shared.so")
+ set(android_libcxx_path "${SWIFT_ANDROID_NDK_PATH}/sources/cxx-stl/llvm-libc++/libs/arm64-v8a")
else()
message(SEND_ERROR "unknown architecture (${LFLAGS_ARCH}) for android")
endif()
+ list(APPEND link_libraries "${android_libcxx_path}/libc++abi.a")
+ list(APPEND link_libraries "${android_libcxx_path}/libc++_shared.so")
swift_android_lib_for_arch(${LFLAGS_ARCH} ${LFLAGS_ARCH}_LIB)
foreach(path IN LISTS ${LFLAGS_ARCH}_LIB)
list(APPEND library_search_directories ${path})
diff --git a/cmake/modules/SwiftConfigureSDK.cmake b/cmake/modules/SwiftConfigureSDK.cmake
index 21e47a6..f8a59d6 100644
--- a/cmake/modules/SwiftConfigureSDK.cmake
+++ b/cmake/modules/SwiftConfigureSDK.cmake
@@ -12,9 +12,9 @@
function(_report_sdk prefix)
message(STATUS "${SWIFT_SDK_${prefix}_NAME} SDK:")
if("${prefix}" STREQUAL "WINDOWS")
- message(STATUS " UCRT Version: $ENV{UCRTVersion}")
- message(STATUS " UCRT SDK Dir: $ENV{UniversalCRTSdkDir}")
- message(STATUS " VC Dir: $ENV{VCToolsInstallDir}")
+ message(STATUS " UCRT Version: ${UCRTVersion}")
+ message(STATUS " UCRT SDK Dir: ${UniversalCRTSdkDir}")
+ message(STATUS " VC Dir: ${VCToolsInstallDir}")
if("${CMAKE_BUILD_TYPE}" STREQUAL "DEBUG")
message(STATUS " ${CMAKE_BUILD_TYPE} VC++ CRT: MDd")
else()
@@ -250,6 +250,8 @@
# Note: this has to be implemented as a macro because it sets global
# variables.
+ swift_windows_cache_VCVARS()
+
string(TOUPPER ${name} prefix)
string(TOLOWER ${name} platform)
@@ -274,10 +276,10 @@
# NOTE(compnerd) workaround incorrectly extensioned import libraries from
# the Windows SDK on case sensitive file systems.
swift_windows_arch_spelling(${arch} WinSDKArchitecture)
- set(WinSDK${arch}UMDir "$ENV{UniversalCRTSdkDir}/Lib/$ENV{UCRTVersion}/um/${WinSDKArchitecture}")
+ set(WinSDK${arch}UMDir "${UniversalCRTSdkDir}/Lib/${UCRTVersion}/um/${WinSDKArchitecture}")
set(OverlayDirectory "${CMAKE_BINARY_DIR}/winsdk_lib_${arch}_symlinks")
- if(NOT EXISTS "$ENV{UniversalCRTSdkDir}/Include/$ENV{UCRTVersion}/um/WINDOWS.H")
+ if(NOT EXISTS "${UniversalCRTSdkDir}/Include/${UCRTVersion}/um/WINDOWS.H")
file(MAKE_DIRECTORY ${OverlayDirectory})
file(GLOB libraries RELATIVE "${WinSDK${arch}UMDir}" "${WinSDK${arch}UMDir}/*")
diff --git a/cmake/modules/SwiftWindowsSupport.cmake b/cmake/modules/SwiftWindowsSupport.cmake
index c0dd779..fb4ebb7 100644
--- a/cmake/modules/SwiftWindowsSupport.cmake
+++ b/cmake/modules/SwiftWindowsSupport.cmake
@@ -15,35 +15,16 @@
endif()
endfunction()
-function(swift_verify_windows_environment_variables)
- set(VCToolsInstallDir $ENV{VCToolsInstallDir})
- set(UniversalCRTSdkDir $ENV{UniversalCRTSdkDir})
- set(UCRTVersion $ENV{UCRTVersion})
-
- precondition(VCToolsInstallDir
- MESSAGE
- "VCToolsInstallDir environment variable must be set")
- precondition(UniversalCRTSdkDir
- MESSAGE
- "UniversalCRTSdkDir environment variable must be set")
- precondition(UCRTVersion
- MESSAGE
- "UCRTVersion environment variable must be set")
-endfunction()
-
function(swift_windows_include_for_arch arch var)
- swift_verify_windows_environment_variables()
-
set(paths
- "$ENV{VCToolsInstallDir}/include"
- "$ENV{UniversalCRTSdkDir}/Include/$ENV{UCRTVersion}/ucrt"
- "$ENV{UniversalCRTSdkDir}/Include/$ENV{UCRTVersion}/shared"
- "$ENV{UniversalCRTSdkDir}/Include/$ENV{UCRTVersion}/um")
+ "${VCToolsInstallDir}/include"
+ "${UniversalCRTSdkDir}/Include/${UCRTVersion}/ucrt"
+ "${UniversalCRTSdkDir}/Include/${UCRTVersion}/shared"
+ "${UniversalCRTSdkDir}/Include/${UCRTVersion}/um")
set(${var} ${paths} PARENT_SCOPE)
endfunction()
function(swift_windows_lib_for_arch arch var)
- swift_verify_windows_environment_variables()
swift_windows_arch_spelling(${arch} ARCH)
set(paths)
@@ -51,29 +32,27 @@
# NOTE(compnerd) provide compatibility with VS2015 which had the libraries in
# a directory called "Lib" rather than VS2017 which normalizes the layout and
# places them in a directory named "lib".
- if(IS_DIRECTORY "$ENV{VCToolsInstallDir}/Lib")
+ if(IS_DIRECTORY "${VCToolsInstallDir}/Lib")
if(${ARCH} STREQUAL x86)
- list(APPEND paths "$ENV{VCToolsInstallDir}/Lib/")
+ list(APPEND paths "${VCToolsInstallDir}/Lib/")
else()
- list(APPEND paths "$ENV{VCToolsInstallDir}/Lib/${ARCH}")
+ list(APPEND paths "${VCToolsInstallDir}/Lib/${ARCH}")
endif()
else()
- list(APPEND paths "$ENV{VCToolsInstallDir}/lib/${ARCH}")
+ list(APPEND paths "${VCToolsInstallDir}/lib/${ARCH}")
endif()
list(APPEND paths
- "$ENV{UniversalCRTSdkDir}/Lib/$ENV{UCRTVersion}/ucrt/${ARCH}"
- "$ENV{UniversalCRTSdkDir}/Lib/$ENV{UCRTVersion}/um/${ARCH}")
+ "${UniversalCRTSdkDir}/Lib/${UCRTVersion}/ucrt/${ARCH}"
+ "${UniversalCRTSdkDir}/Lib/${UCRTVersion}/um/${ARCH}")
set(${var} ${paths} PARENT_SCOPE)
endfunction()
function(swift_windows_generate_sdk_vfs_overlay flags)
- swift_verify_windows_environment_variables()
-
- get_filename_component(VCToolsInstallDir $ENV{VCToolsInstallDir} ABSOLUTE)
- get_filename_component(UniversalCRTSdkDir $ENV{UniversalCRTSdkDir} ABSOLUTE)
- set(UCRTVersion $ENV{UCRTVersion})
+ get_filename_component(VCToolsInstallDir ${VCToolsInstallDir} ABSOLUTE)
+ get_filename_component(UniversalCRTSdkDir ${UniversalCRTSdkDir} ABSOLUTE)
+ set(UCRTVersion ${UCRTVersion})
# TODO(compnerd) use a target to avoid re-creating this file all the time
configure_file("${SWIFT_SOURCE_DIR}/utils/WindowsSDKVFSOverlay.yaml.in"
@@ -85,3 +64,19 @@
PARENT_SCOPE)
endfunction()
+function(swift_verify_windows_VCVAR var)
+ if (NOT DEFINED "${var}" AND NOT DEFINED "ENV{${var}}")
+ message(FATAL_ERROR "${var} environment variable must be set")
+ endif()
+endfunction()
+
+function(swift_windows_cache_VCVARS)
+ swift_verify_windows_VCVAR(VCToolsInstallDir)
+ swift_verify_windows_VCVAR(UniversalCRTSdkDir)
+ swift_verify_windows_VCVAR(UCRTVersion)
+
+ set(VCToolsInstallDir $ENV{VCToolsInstallDir} CACHE STRING "")
+ set(UniversalCRTSdkDir $ENV{UniversalCRTSdkDir} CACHE STRING "")
+ set(UCRTVersion $ENV{UCRTVersion} CACHE STRING "")
+endfunction()
+
diff --git a/include/swift/Driver/DependencyGraph.h b/include/swift/Driver/DependencyGraph.h
index 73da043..56c0e32 100644
--- a/include/swift/Driver/DependencyGraph.h
+++ b/include/swift/Driver/DependencyGraph.h
@@ -121,6 +121,7 @@
llvm::StringMap<std::pair<std::vector<DependencyEntryTy>, DependencyMaskTy>> Dependencies;
/// The set of marked nodes.
+
llvm::SmallPtrSet<const void *, 16> Marked;
/// A list of all external dependencies that cannot be resolved from just this
@@ -153,6 +154,8 @@
(void)newlyInserted;
}
+ /// See DependencyGraph::markTransitive.
+
void markTransitive(SmallVectorImpl<const void *> &visited,
const void *node, MarkTracerImpl *tracer = nullptr);
bool markIntransitive(const void *node) {
@@ -254,7 +257,7 @@
/// Marks \p node and all nodes that depend on \p node, and places any nodes
/// that get transitively marked into \p visited.
///
- /// Nodes that have been previously marked are not included in \p newlyMarked,
+ /// Nodes that have been previously marked are not included in \p visited,
/// nor are their successors traversed, <em>even if their "provides" set has
/// been updated since it was marked.</em> (However, nodes that depend on the
/// given \p node are always traversed.)
@@ -264,6 +267,14 @@
///
/// If you want to see how each node gets added to \p visited, pass a local
/// MarkTracer instance to \p tracer.
+ ///
+ /// Conservatively assumes that there exists a "cascading" edge into the
+ /// starting node. Therefore, mark the start. For each visited node, add it to
+ /// \p visited, and mark it if some incoming edge cascades. The start node is
+ /// NOT added to \p visited.
+ ///
+ /// The traversal routines use
+ /// \p visited to avoid endless recursion.
template <unsigned N>
void markTransitive(SmallVector<T, N> &visited, T node,
MarkTracer *tracer = nullptr) {
diff --git a/include/swift/Driver/Job.h b/include/swift/Driver/Job.h
index b4a8c4f..941d75a 100644
--- a/include/swift/Driver/Job.h
+++ b/include/swift/Driver/Job.h
@@ -230,9 +230,17 @@
class Job {
public:
enum class Condition {
+ // There was no information about the previous build (i.e., an input map),
+ // or the map marked this Job as dirty or needing a cascading build.
+ // Be maximally conservative with dependencies.
Always,
+ // The input changed, or this job was scheduled as non-cascading in the last
+ // build but didn't get to run.
RunWithoutCascading,
+ // The best case: input didn't change, output exists.
+ // Only run if it depends on some other thing that changed.
CheckDependencies,
+ // Run no matter what (but may or may not cascade).
NewlyAdded
};
diff --git a/include/swift/SIL/SILConstants.h b/include/swift/SIL/SILConstants.h
index 1a431ac..a6c9a29 100644
--- a/include/swift/SIL/SILConstants.h
+++ b/include/swift/SIL/SILConstants.h
@@ -30,7 +30,9 @@
struct APIntSymbolicValue;
struct ArraySymbolicValue;
+struct DerivedAddressValue;
struct EnumWithPayloadSymbolicValue;
+struct SymbolicValueMemoryObject;
struct UnknownSymbolicValue;
extern llvm::cl::opt<unsigned> ConstExprLimit;
@@ -69,6 +71,10 @@
class SymbolicValue {
private:
enum RepresentationKind {
+ /// This value is an alloc stack that has not (yet) been initialized
+ /// by flow-sensitive analysis.
+ RK_UninitMemory,
+
/// This symbolic value cannot be determined, carries multiple values
/// (i.e., varies dynamically at the top level), or is of some type that
/// we cannot analyze and propagate (e.g. NSObject).
@@ -92,6 +98,12 @@
/// This value is a struct or tuple of constants. This is tracked by the
/// "aggregate" member of the value union.
RK_Aggregate,
+
+ /// This represents the address of a memory object.
+ RK_DirectAddress,
+
+ /// This represents an index *into* a memory object.
+ RK_DerivedAddress,
};
union {
@@ -115,6 +127,14 @@
/// When this SymbolicValue is of "Aggregate" kind, this pointer stores
/// information about the array elements and count.
const SymbolicValue *aggregate;
+
+ /// When the representationKind is "DirectAddress", this pointer is the
+ /// memory object referenced.
+ SymbolicValueMemoryObject *directAddress;
+
+ /// When this SymbolicValue is of "DerivedAddress" kind, this pointer stores
+ /// information about the memory object and access path of the access.
+ DerivedAddressValue *derivedAddress;
} value;
RepresentationKind representationKind : 8;
@@ -150,6 +170,13 @@
/// This can be an array, struct, tuple, etc.
Aggregate,
+
+ /// This value represents the address of, or into, a memory object.
+ Address,
+
+ /// These values are generally only seen internally to the system, external
+ /// clients shouldn't have to deal with them.
+ UninitMemory
};
/// For constant values, return the type classification of this value.
@@ -158,7 +185,7 @@
/// Return true if this represents a constant value.
bool isConstant() const {
auto kind = getKind();
- return kind != Unknown;
+ return kind != Unknown && kind != UninitMemory;
}
static SymbolicValue getUnknown(SILNode *node, UnknownReason reason,
@@ -177,6 +204,12 @@
/// Return the reason an unknown result was generated.
UnknownReason getUnknownReason() const;
+ static SymbolicValue getUninitMemory() {
+ SymbolicValue result;
+ result.representationKind = RK_UninitMemory;
+ return result;
+ }
+
static SymbolicValue getMetatype(CanType type) {
SymbolicValue result;
result.representationKind = RK_Metatype;
@@ -216,6 +249,25 @@
ArrayRef<SymbolicValue> getAggregateValue() const;
+ /// Return a symbolic value that represents the address of a memory object.
+ static SymbolicValue getAddress(SymbolicValueMemoryObject *memoryObject) {
+ SymbolicValue result;
+ result.representationKind = RK_DirectAddress;
+ result.value.directAddress = memoryObject;
+ return result;
+ }
+
+ /// Return a symbolic value that represents the address of a memory object
+ /// indexed by a path.
+ static SymbolicValue getAddress(SymbolicValueMemoryObject *memoryObject,
+ ArrayRef<unsigned> indices,
+ ASTContext &astContext);
+
+ /// Return the memory object of this reference along with any access path
+ /// indices involved.
+ SymbolicValueMemoryObject *
+ getAddressValue(SmallVectorImpl<unsigned> &accessPath) const;
+
//===--------------------------------------------------------------------===//
// Helpers
@@ -247,6 +299,29 @@
return os;
}
+/// This is a representation of a memory object referred to by an address.
+/// Memory objects may be mutated over their lifetime, but their overall type
+/// remains the same.
+struct SymbolicValueMemoryObject {
+ Type getType() const { return type; }
+
+ SymbolicValue getValue() const { return value; }
+ void setValue(SymbolicValue newValue) { value = newValue; }
+
+ /// Create a new memory object whose overall type is as specified.
+ static SymbolicValueMemoryObject *create(Type type, SymbolicValue value,
+ ASTContext &astContext);
+
+private:
+ const Type type;
+ SymbolicValue value;
+
+ SymbolicValueMemoryObject(Type type, SymbolicValue value)
+ : type(type), value(value) {}
+ SymbolicValueMemoryObject(const SymbolicValueMemoryObject &) = delete;
+ void operator=(const SymbolicValueMemoryObject &) = delete;
+};
+
} // end namespace swift
#endif
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
index de773fe..83d9c5c 100644
--- a/lib/Driver/Compilation.cpp
+++ b/lib/Driver/Compilation.cpp
@@ -206,9 +206,10 @@
/// Jobs that incremental-mode has decided it can skip.
CommandSet DeferredCommands;
- /// Jobs in the initial set with Condition::Always, or lacking existing
+ /// Jobs in the initial set with Condition::Always, and having an existing
/// .swiftdeps files.
- SmallVector<const Job *, 16> InitialOutOfDateCommands;
+ /// Set by scheduleInitialJobs and used only by scheduleAdditionalJobs.
+ SmallVector<const Job *, 16> InitialCascadingCommands;
/// Dependency graph for deciding which jobs are dirty (need running)
/// or clean (can be skipped).
@@ -382,7 +383,7 @@
DeferredCommands.clear();
}
- /// Helper that attmepts to reload a job's .swiftdeps file after the job
+ /// Helper that attempts to reload a job's .swiftdeps file after the job
/// exits, and re-run transitive marking to ensure everything is properly
/// invalidated by any new dependency edges introduced by it. If reloading
/// fails, this can cause deferred jobs to be immediately scheduled.
@@ -406,6 +407,13 @@
// If we have a dependency file /and/ the frontend task exited normally,
// we can be discerning about what downstream files to rebuild.
if (ReturnCode == EXIT_SUCCESS || ReturnCode == EXIT_FAILURE) {
+ // "Marked" means that everything provided by this node (i.e. Job) is
+ // dirty. Thus any file using any of these provides must be
+ // recompiled. (Only non-private entities are output as provides.) In
+ // other words, this Job "cascades"; the need to recompile it causes
+ // other recompilations. It is possible that the current code marks
+ // things that do not need to be marked. Unecessary compilation would
+ // result if that were the case.
bool wasCascading = DepGraph.isMarked(FinishedCmd);
switch (DepGraph.loadFromPath(FinishedCmd, DependenciesFile)) {
@@ -715,7 +723,15 @@
switch (Condition) {
case Job::Condition::Always:
if (Comp.getIncrementalBuildEnabled() && !DependenciesFile.empty()) {
- InitialOutOfDateCommands.push_back(Cmd);
+ // Ensure dependents will get recompiled.
+ InitialCascadingCommands.push_back(Cmd);
+ // Mark this job as cascading.
+ //
+ // It would probably be safe and simpler to markTransitive on the
+ // start nodes in the "Always" condition from the start instead of
+ // using markIntransitive and having later functions call
+ // markTransitive. That way markIntransitive would be an
+ // implementation detail of DependencyGraph.
DepGraph.markIntransitive(Cmd);
}
LLVM_FALLTHROUGH;
@@ -740,7 +756,7 @@
// We scheduled all of the files that have actually changed. Now add the
// files that haven't changed, so that they'll get built in parallel if
// possible and after the first set of files if it's not.
- for (auto *Cmd : InitialOutOfDateCommands) {
+ for (auto *Cmd : InitialCascadingCommands) {
DepGraph.markTransitive(AdditionalOutOfDateCommands, Cmd,
IncrementalTracer);
}
diff --git a/lib/Driver/CompilationRecord.h b/lib/Driver/CompilationRecord.h
index 82f3783..ce12150 100644
--- a/lib/Driver/CompilationRecord.h
+++ b/lib/Driver/CompilationRecord.h
@@ -19,8 +19,8 @@
namespace driver {
namespace compilation_record {
-/// Compilation record files (.swiftdeps files) are YAML files composed of these
-/// top-level keys.
+/// Compilation record files (-master.swiftdeps files) are YAML files composed
+/// of these top-level keys.
enum class TopLevelKey {
/// The key for the Swift compiler version used to produce the compilation
/// record.
diff --git a/lib/IRGen/GenDecl.cpp b/lib/IRGen/GenDecl.cpp
index a9c20e6..55f852e 100644
--- a/lib/IRGen/GenDecl.cpp
+++ b/lib/IRGen/GenDecl.cpp
@@ -1099,30 +1099,8 @@
while (!LazyTypeMetadata.empty() ||
!LazyTypeContextDescriptors.empty() ||
!LazyFunctionDefinitions.empty() ||
- !LazyFieldTypes.empty() ||
!LazyWitnessTables.empty()) {
- while (!LazyFieldTypes.empty()) {
- auto info = LazyFieldTypes.pop_back_val();
- auto &IGM = *info.IGM;
-
- for (auto fieldType : info.fieldTypes) {
- if (fieldType->hasArchetype())
- continue;
-
- // All of the required attributes are going to be preserved
- // by field reflection metadata in the mangled name, so
- // there is no need to worry about ownership semantics here.
- if (auto refStorTy = dyn_cast<ReferenceStorageType>(fieldType))
- fieldType = refStorTy.getReferentType();
-
- // Make sure that all of the field type metadata is forced,
- // otherwise there might be a problem when fields are accessed
- // through reflection.
- (void)irgen::getOrCreateTypeMetadataAccessFunction(IGM, fieldType);
- }
- }
-
// Emit any lazy type metadata we require.
while (!LazyTypeMetadata.empty()) {
NominalTypeDecl *type = LazyTypeMetadata.pop_back_val();
diff --git a/lib/IRGen/GenMeta.cpp b/lib/IRGen/GenMeta.cpp
index 08d9344..2b43e02 100644
--- a/lib/IRGen/GenMeta.cpp
+++ b/lib/IRGen/GenMeta.cpp
@@ -1198,37 +1198,6 @@
}
return numFields;
}
-
- /// Track the field types of a struct or class for reflection metadata
- /// emission.
- static void
- addFieldTypes(IRGenModule &IGM, NominalTypeDecl *type,
- NominalTypeDecl::StoredPropertyRange storedProperties) {
- SmallVector<CanType, 4> types;
- for (VarDecl *prop : storedProperties) {
- auto propertyType = type->mapTypeIntoContext(prop->getInterfaceType())
- ->getCanonicalType();
- types.push_back(propertyType);
- }
-
- IGM.addFieldTypes(types);
- }
-
- /// Track the payload types of an enum for reflection metadata
- /// emission.
- static void addFieldTypes(IRGenModule &IGM,
- ArrayRef<EnumImplStrategy::Element> enumElements) {
- SmallVector<CanType, 4> types;
-
- for (auto &elt : enumElements) {
- auto caseType = elt.decl->getParentEnum()->mapTypeIntoContext(
- elt.decl->getArgumentInterfaceType())
- ->getCanonicalType();
- types.push_back(caseType);
- }
-
- IGM.addFieldTypes(types);
- }
class StructContextDescriptorBuilder
: public TypeContextDescriptorBuilderBase<StructContextDescriptorBuilder,
@@ -1264,7 +1233,9 @@
// uint32_t FieldOffsetVectorOffset;
B.addInt32(FieldVectorOffset / IGM.getPointerSize());
- addFieldTypes(IGM, getType(), properties);
+ // For any nominal type metadata required for reflection.
+ for (auto *prop : properties)
+ IGM.IRGen.noteUseOfTypeMetadata(prop->getValueInterfaceType());
}
uint16_t getKindSpecificFlags() {
@@ -1336,7 +1307,9 @@
// uint32_t NumEmptyCases;
B.addInt32(Strategy.getElementsWithNoPayload().size());
- addFieldTypes(IGM, Strategy.getElementsWithPayload());
+ // For any nominal type metadata required for reflection.
+ for (auto elt : Strategy.getElementsWithPayload())
+ IGM.IRGen.noteUseOfTypeMetadata(elt.decl->getArgumentInterfaceType());
}
uint16_t getKindSpecificFlags() {
@@ -1646,7 +1619,9 @@
// uint32_t FieldOffsetVectorOffset;
B.addInt32(getFieldVectorOffset() / IGM.getPointerSize());
- addFieldTypes(IGM, getType(), properties);
+ // For any nominal type metadata required for reflection.
+ for (auto *prop : properties)
+ IGM.IRGen.noteUseOfTypeMetadata(prop->getValueInterfaceType());
}
};
} // end anonymous namespace
@@ -1772,10 +1747,6 @@
[&]{ AnonymousContextDescriptorBuilder(*this, DC).emit(); });
}
-void IRGenModule::addFieldTypes(ArrayRef<CanType> fieldTypes) {
- IRGen.addFieldTypes(fieldTypes, this);
-}
-
static void emitInitializeFieldOffsetVector(IRGenFunction &IGF,
SILType T,
llvm::Value *metadata,
diff --git a/lib/IRGen/IRGenModule.h b/lib/IRGen/IRGenModule.h
index f185002..f45794d 100644
--- a/lib/IRGen/IRGenModule.h
+++ b/lib/IRGen/IRGenModule.h
@@ -241,14 +241,6 @@
llvm::SetVector<SILFunction*> DynamicReplacements;
- struct FieldTypeMetadata {
- IRGenModule *IGM;
- std::vector<CanType> fieldTypes;
- };
-
- /// Field types we need to verify are present.
- llvm::SmallVector<FieldTypeMetadata, 4> LazyFieldTypes;
-
/// SIL functions that we need to emit lazily.
llvm::SmallVector<SILFunction*, 4> LazyFunctionDefinitions;
@@ -367,6 +359,13 @@
noteUseOfTypeGlobals(type, true, RequireMetadata);
}
+ void noteUseOfTypeMetadata(Type type) {
+ type.visit([&](Type t) {
+ if (auto *nominal = t->getAnyNominal())
+ noteUseOfTypeMetadata(nominal);
+ });
+ }
+
void noteUseOfTypeContextDescriptor(NominalTypeDecl *type,
RequireMetadata_t requireMetadata) {
noteUseOfTypeGlobals(type, false, requireMetadata);
@@ -386,9 +385,6 @@
/// Adds \p Conf to LazyWitnessTables if it has not been added yet.
void addLazyWitnessTable(const ProtocolConformance *Conf);
- void addFieldTypes(ArrayRef<CanType> fieldTypes, IRGenModule *IGM) {
- LazyFieldTypes.push_back({IGM, {fieldTypes.begin(), fieldTypes.end()}});
- }
void addClassForEagerInitialization(ClassDecl *ClassDecl);
@@ -846,7 +842,6 @@
void addUsedGlobal(llvm::GlobalValue *global);
void addCompilerUsedGlobal(llvm::GlobalValue *global);
void addObjCClass(llvm::Constant *addr, bool nonlazy);
- void addFieldTypes(ArrayRef<CanType> fieldTypes);
void addProtocolConformance(ConformanceDescription &&conformance);
llvm::Constant *emitSwiftProtocols();
diff --git a/lib/SIL/SILConstants.cpp b/lib/SIL/SILConstants.cpp
index 5ff9c9f..2666f45 100644
--- a/lib/SIL/SILConstants.cpp
+++ b/lib/SIL/SILConstants.cpp
@@ -37,6 +37,9 @@
void SymbolicValue::print(llvm::raw_ostream &os, unsigned indent) const {
os.indent(indent);
switch (representationKind) {
+ case RK_UninitMemory:
+ os << "uninit\n";
+ return;
case RK_Unknown: {
os << "unknown(" << (int)getUnknownReason() << "): ";
getUnknownNode()->dump();
@@ -76,6 +79,16 @@
return;
}
}
+ case RK_DirectAddress:
+ case RK_DerivedAddress: {
+ SmallVector<unsigned, 4> accessPath;
+ SymbolicValueMemoryObject *memObject = getAddressValue(accessPath);
+ os << "Address[" << memObject->getType() << "] ";
+ interleave(accessPath.begin(), accessPath.end(),
+ [&](unsigned idx) { os << idx; }, [&]() { os << ", "; });
+ os << "\n";
+ break;
+ }
}
}
@@ -85,6 +98,8 @@
/// multiple forms for efficiency, but provide a simpler interface to clients.
SymbolicValue::Kind SymbolicValue::getKind() const {
switch (representationKind) {
+ case RK_UninitMemory:
+ return UninitMemory;
case RK_Unknown:
return Unknown;
case RK_Metatype:
@@ -96,6 +111,9 @@
case RK_Integer:
case RK_IntegerInline:
return Integer;
+ case RK_DirectAddress:
+ case RK_DerivedAddress:
+ return Address;
}
}
@@ -105,6 +123,7 @@
SymbolicValue::cloneInto(ASTContext &astContext) const {
auto thisRK = representationKind;
switch (thisRK) {
+ case RK_UninitMemory:
case RK_Unknown:
case RK_Metatype:
case RK_Function:
@@ -120,7 +139,28 @@
results.push_back(elt.cloneInto(astContext));
return getAggregate(results, astContext);
}
+ case RK_DirectAddress:
+ case RK_DerivedAddress: {
+ SmallVector<unsigned, 4> accessPath;
+ auto *memObject = getAddressValue(accessPath);
+ auto *newMemObject = SymbolicValueMemoryObject::create(
+ memObject->getType(), memObject->getValue(), astContext);
+ return getAddress(newMemObject, accessPath, astContext);
}
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// SymbolicValueMemoryObject implementation
+//===----------------------------------------------------------------------===//
+
+SymbolicValueMemoryObject *
+SymbolicValueMemoryObject::create(Type type, SymbolicValue value,
+ ASTContext &astContext) {
+ auto *result = astContext.Allocate(sizeof(SymbolicValueMemoryObject),
+ alignof(SymbolicValueMemoryObject));
+ new (result) SymbolicValueMemoryObject(type, value);
+ return (SymbolicValueMemoryObject *)result;
}
//===----------------------------------------------------------------------===//
@@ -225,7 +265,7 @@
ASTContext &astContext) {
auto byteSize =
UnknownSymbolicValue::totalSizeToAlloc<SourceLoc>(elements.size());
- auto rawMem = astContext.Allocate(byteSize, alignof(UnknownSymbolicValue));
+ auto *rawMem = astContext.Allocate(byteSize, alignof(UnknownSymbolicValue));
// Placement-new the value inside the memory we just allocated.
auto value = ::new (rawMem) UnknownSymbolicValue(
@@ -280,6 +320,91 @@
}
//===----------------------------------------------------------------------===//
+// Addresses
+//===----------------------------------------------------------------------===//
+
+namespace swift {
+
+/// This is the representation of a derived address. A derived address refers
+/// to a memory object along with an access path that drills into it.
+struct DerivedAddressValue final
+ : private llvm::TrailingObjects<DerivedAddressValue, unsigned> {
+ friend class llvm::TrailingObjects<DerivedAddressValue, unsigned>;
+
+ SymbolicValueMemoryObject *memoryObject;
+
+ /// This is the number of indices in the derived address.
+ const unsigned numElements;
+
+ static DerivedAddressValue *create(SymbolicValueMemoryObject *memoryObject,
+ ArrayRef<unsigned> elements,
+ ASTContext &astContext) {
+ auto byteSize =
+ DerivedAddressValue::totalSizeToAlloc<unsigned>(elements.size());
+ auto *rawMem = astContext.Allocate(byteSize, alignof(DerivedAddressValue));
+
+ // Placement initialize the object.
+ auto dav =
+ ::new (rawMem) DerivedAddressValue(memoryObject, elements.size());
+ std::uninitialized_copy(elements.begin(), elements.end(),
+ dav->getTrailingObjects<unsigned>());
+ return dav;
+ }
+
+ /// Return the access path for this derived address, which is an array of
+ /// indices drilling into the memory object.
+ ArrayRef<unsigned> getElements() const {
+ return {getTrailingObjects<unsigned>(), numElements};
+ }
+
+ // This is used by the llvm::TrailingObjects base class.
+ size_t numTrailingObjects(OverloadToken<unsigned>) const {
+ return numElements;
+ }
+
+private:
+ DerivedAddressValue() = delete;
+ DerivedAddressValue(const DerivedAddressValue &) = delete;
+ DerivedAddressValue(SymbolicValueMemoryObject *memoryObject,
+ unsigned numElements)
+ : memoryObject(memoryObject), numElements(numElements) {}
+};
+} // end namespace swift
+
+/// Return a symbolic value that represents the address of a memory object
+/// indexed by a path.
+SymbolicValue SymbolicValue::getAddress(SymbolicValueMemoryObject *memoryObject,
+ ArrayRef<unsigned> indices,
+ ASTContext &astContext) {
+ if (indices.empty())
+ return getAddress(memoryObject);
+
+ auto dav = DerivedAddressValue::create(memoryObject, indices, astContext);
+ SymbolicValue result;
+ result.representationKind = RK_DerivedAddress;
+ result.value.derivedAddress = dav;
+ return result;
+}
+
+/// Return the memory object of this reference along with any access path
+/// indices involved.
+SymbolicValueMemoryObject *
+SymbolicValue::getAddressValue(SmallVectorImpl<unsigned> &accessPath) const {
+ assert(getKind() == Address);
+
+ accessPath.clear();
+ if (representationKind == RK_DirectAddress)
+ return value.directAddress;
+ assert(representationKind == RK_DerivedAddress);
+
+ auto *dav = value.derivedAddress;
+
+ // The first entry is the object ID, the rest are indices in the accessPath.
+ accessPath.assign(dav->getElements().begin(), dav->getElements().end());
+ return dav->memoryObject;
+}
+
+//===----------------------------------------------------------------------===//
// Higher level code
//===----------------------------------------------------------------------===//
diff --git a/lib/SIL/SILFunctionType.cpp b/lib/SIL/SILFunctionType.cpp
index 64254fb..5d14830 100644
--- a/lib/SIL/SILFunctionType.cpp
+++ b/lib/SIL/SILFunctionType.cpp
@@ -2713,7 +2713,7 @@
// Build the curried function type.
auto inner =
CanFunctionType::get(llvm::makeArrayRef(bridgedParams),
- bridgedResultType);
+ bridgedResultType, innerExtInfo);
auto curried =
CanAnyFunctionType::get(genericSig, {selfParam}, inner, extInfo);
diff --git a/lib/SILGen/SILGenExpr.cpp b/lib/SILGen/SILGenExpr.cpp
index 1700891..2cc12d0 100644
--- a/lib/SILGen/SILGenExpr.cpp
+++ b/lib/SILGen/SILGenExpr.cpp
@@ -4283,9 +4283,6 @@
// If the destination is a tuple, recursively destructure.
void visitTupleExpr(TupleExpr *E) {
- auto *TTy = E->getType()->castTo<TupleType>();
- assert(TTy->hasLValueType() || TTy->isVoid());
- (void)TTy;
for (auto &elt : E->getElements()) {
visit(elt);
}
@@ -4398,7 +4395,6 @@
// Handle tuple destinations by destructuring them if present.
CanType destType = dest->getType()->getCanonicalType();
- assert(!destType->isMaterializable() || destType->isVoid());
// But avoid this in the common case.
if (!isa<TupleType>(destType)) {
diff --git a/lib/SILOptimizer/Utils/ConstExpr.cpp b/lib/SILOptimizer/Utils/ConstExpr.cpp
index 0b4de9a..aa900c4 100644
--- a/lib/SILOptimizer/Utils/ConstExpr.cpp
+++ b/lib/SILOptimizer/Utils/ConstExpr.cpp
@@ -69,7 +69,8 @@
unsigned &numInstEvaluated;
/// This is a state of previously analyzed values, maintained and filled in
- /// by getConstantValue. This does not hold SIL address values.
+ /// by getConstantValue. This does not hold the memory referred to by SIL
+ /// addresses.
llvm::DenseMap<SILValue, SymbolicValue> calculatedValues;
public:
@@ -80,13 +81,21 @@
numInstEvaluated(numInstEvaluated) {}
void setValue(SILValue value, SymbolicValue symVal) {
- // TODO(constexpr patch): Uncomment this assertion once Address kinds have
- // been added.
- // assert(symVal.getKind() != SymbolicValue::Address &&
- // "calculatedValues does not hold addresses");
calculatedValues.insert({value, symVal});
}
+ /// Invariant: Before the call, `calculatedValues` must not contain `addr`
+ /// as a key.
+ SymbolicValue createMemoryObject(SILValue addr, SymbolicValue initialValue) {
+ assert(!calculatedValues.count(addr));
+ auto type = substituteGenericParamsAndSimpify(addr->getType().getASTType());
+ auto *memObject = SymbolicValueMemoryObject::create(
+ type, initialValue, evaluator.getASTContext());
+ auto result = SymbolicValue::getAddress(memObject);
+ setValue(addr, result);
+ return result;
+ }
+
/// Return the SymbolicValue for the specified SIL value, lazily computing
/// it if needed.
SymbolicValue getConstantValue(SILValue value);
@@ -96,9 +105,9 @@
/// statements.
llvm::Optional<SymbolicValue> evaluateFlowSensitive(SILInstruction *inst);
- Type simplifyType(Type ty);
- CanType simplifyType(CanType ty) {
- return simplifyType(Type(ty))->getCanonicalType();
+ Type substituteGenericParamsAndSimpify(Type ty);
+ CanType substituteGenericParamsAndSimpify(CanType ty) {
+ return substituteGenericParamsAndSimpify(Type(ty))->getCanonicalType();
}
SymbolicValue computeConstantValue(SILValue value);
SymbolicValue computeConstantValueBuiltin(BuiltinInst *inst);
@@ -108,12 +117,16 @@
llvm::Optional<SymbolicValue> computeOpaqueCallResult(ApplyInst *apply,
SILFunction *callee);
+ SymbolicValue getConstAddrAndLoadResult(SILValue addr);
+ SymbolicValue loadAddrValue(SILValue addr, SymbolicValue addrVal);
+ llvm::Optional<SymbolicValue> computeFSStore(SymbolicValue storedCst,
+ SILValue dest);
};
} // end anonymous namespace
/// Simplify the specified type based on knowledge of substitutions if we have
/// any.
-Type ConstExprFunctionState::simplifyType(Type ty) {
+Type ConstExprFunctionState::substituteGenericParamsAndSimpify(Type ty) {
return substitutionMap.empty() ? ty : ty.subst(substitutionMap);
}
@@ -133,7 +146,8 @@
// types.
if (auto *mti = dyn_cast<MetatypeInst>(value)) {
auto metatype = mti->getType().castTo<MetatypeType>();
- auto type = simplifyType(metatype->getInstanceType())->getCanonicalType();
+ auto type = substituteGenericParamsAndSimpify(metatype->getInstanceType())
+ ->getCanonicalType();
return SymbolicValue::getMetatype(type);
}
@@ -174,6 +188,39 @@
return SymbolicValue::getAggregate(elts, evaluator.getASTContext());
}
+ // If this is a struct or tuple element addressor, compute a more derived
+ // address.
+ if (isa<StructElementAddrInst>(value) || isa<TupleElementAddrInst>(value)) {
+ auto inst = cast<SingleValueInstruction>(value);
+ auto baseAddr = getConstantValue(inst->getOperand(0));
+ if (!baseAddr.isConstant())
+ return baseAddr;
+
+ SmallVector<unsigned, 4> accessPath;
+ auto *memObject = baseAddr.getAddressValue(accessPath);
+
+ // Add our index onto the next of the list.
+ unsigned index;
+ if (auto sea = dyn_cast<StructElementAddrInst>(inst))
+ index = sea->getFieldNo();
+ else
+ index = cast<TupleElementAddrInst>(inst)->getFieldNo();
+ accessPath.push_back(index);
+ return SymbolicValue::getAddress(memObject, accessPath,
+ evaluator.getASTContext());
+ }
+
+ // If this is a load, then we either have computed the value of the memory
+ // already (when analyzing the body of a function in a flow-sensitive
+ // fashion), or this is the indirect result of a call. Either way, we ask for
+ // the value of the pointer. In the former case, this will be the latest
+ // value of the memory. In the latter case, the call must be the only
+ // store to the address so that the memory object can be computed by
+ // recursively processing the allocation and call instructions in a
+ // demand-driven fashion.
+ if (auto li = dyn_cast<LoadInst>(value))
+ return getConstAddrAndLoadResult(li->getOperand());
+
if (auto *builtin = dyn_cast<BuiltinInst>(value))
return computeConstantValueBuiltin(builtin);
@@ -188,6 +235,10 @@
return calculatedValues[apply];
}
+ // This instruction is a marker that returns its first operand.
+ if (auto *bai = dyn_cast<BeginAccessInst>(value))
+ return getConstantValue(bai->getOperand());
+
LLVM_DEBUG(llvm::dbgs() << "ConstExpr Unknown simple: " << *value << "\n");
// Otherwise, we don't know how to handle this.
@@ -521,6 +572,143 @@
return result;
}
+/// Given an aggregate value like {{1, 2}, 3} and an access path like [0,1], and
+/// a new element like 4, return the aggregate value with the indexed element
+/// replaced with the new element, producing {{1, 4}, 3} in this case.
+/// If `writeOnlyOnce` is true, and the target aggregate element to update
+/// already has a constant value, fail on the update.
+///
+/// This returns true on failure and false on success.
+///
+static bool updateIndexedElement(SymbolicValue &aggregate,
+ ArrayRef<unsigned> indices,
+ SymbolicValue newElement, Type type,
+ bool writeOnlyOnce,
+ ASTContext &astContext) {
+ // We're done if we've run out of indices.
+ if (indices.empty()) {
+ aggregate = newElement;
+ return false;
+ }
+
+ // If we have an uninit memory, then scalarize it into an aggregate to
+ // continue. This happens when memory objects are initialized piecewise.
+ if (aggregate.getKind() == SymbolicValue::UninitMemory) {
+ unsigned numMembers;
+ // We need to have either a struct or a tuple type.
+ if (auto *decl = type->getStructOrBoundGenericStruct()) {
+ numMembers = std::distance(decl->getStoredProperties().begin(),
+ decl->getStoredProperties().end());
+ } else if (auto tuple = type->getAs<TupleType>()) {
+ numMembers = tuple->getNumElements();
+ } else {
+ return true;
+ }
+
+ SmallVector<SymbolicValue, 4> newElts(numMembers,
+ SymbolicValue::getUninitMemory());
+ aggregate = SymbolicValue::getAggregate(newElts, astContext);
+ }
+
+ unsigned elementNo = indices.front();
+
+ // If we have a non-aggregate then fail.
+ if (aggregate.getKind() != SymbolicValue::Aggregate)
+ return true;
+
+ ArrayRef<SymbolicValue> oldElts;
+ Type eltType;
+
+ // We need to have a struct or a tuple type.
+ oldElts = aggregate.getAggregateValue();
+
+ if (auto *decl = type->getStructOrBoundGenericStruct()) {
+ auto it = decl->getStoredProperties().begin();
+ std::advance(it, elementNo);
+ eltType = (*it)->getType();
+ } else if (auto tuple = type->getAs<TupleType>()) {
+ assert(elementNo < tuple->getNumElements() && "invalid index");
+ eltType = tuple->getElement(elementNo).getType();
+ } else {
+ return true;
+ }
+
+ if (writeOnlyOnce &&
+ oldElts[elementNo].getKind() != SymbolicValue::UninitMemory) {
+ // Cannot overwrite an existing constant.
+ return true;
+ }
+
+ // Update the indexed element of the aggregate.
+ SmallVector<SymbolicValue, 4> newElts(oldElts.begin(), oldElts.end());
+ if (updateIndexedElement(newElts[elementNo], indices.drop_front(), newElement,
+ eltType, writeOnlyOnce, astContext))
+ return true;
+
+ aggregate = SymbolicValue::getAggregate(newElts, astContext);
+ return false;
+}
+
+/// Given the operand to a load, resolve it to a constant if possible.
+SymbolicValue ConstExprFunctionState::getConstAddrAndLoadResult(SILValue addr) {
+ auto addrVal = getConstantValue(addr);
+ if (!addrVal.isConstant())
+ return addrVal;
+
+ return loadAddrValue(addr, addrVal);
+}
+
+/// Load and return the underlying (const) object whose address is given by
+/// `addrVal`. On error, return a message based on `addr`.
+SymbolicValue ConstExprFunctionState::loadAddrValue(SILValue addr,
+ SymbolicValue addrVal) {
+ SmallVector<unsigned, 4> accessPath;
+ auto *memoryObject = addrVal.getAddressValue(accessPath);
+
+ // If this is a derived address, then we are digging into an aggregate
+ // value.
+ auto objectVal = memoryObject->getValue();
+
+ // Try digging through the aggregate to get to our value.
+ unsigned idx = 0, end = accessPath.size();
+ while (idx != end && objectVal.getKind() == SymbolicValue::Aggregate) {
+ objectVal = objectVal.getAggregateValue()[accessPath[idx]];
+ ++idx;
+ }
+
+ // If we successfully indexed down to our value, then we're done.
+ if (idx == end)
+ return objectVal;
+
+ // If the memory object had a reason, return it.
+ if (objectVal.isUnknown())
+ return objectVal;
+
+ // Otherwise, return a generic failure.
+ return evaluator.getUnknown(addr, UnknownReason::Default);
+}
+
+/// Evaluate a flow sensitive store to the specified pointer address.
+llvm::Optional<SymbolicValue>
+ConstExprFunctionState::computeFSStore(SymbolicValue storedCst, SILValue dest) {
+ // Only update existing memory locations that we're tracking.
+ auto it = calculatedValues.find(dest);
+ if (it == calculatedValues.end() || !it->second.isConstant())
+ return evaluator.getUnknown(dest, UnknownReason::Default);
+
+ SmallVector<unsigned, 4> accessPath;
+ auto *memoryObject = it->second.getAddressValue(accessPath);
+ auto objectVal = memoryObject->getValue();
+ auto objectType = memoryObject->getType();
+
+ if (updateIndexedElement(objectVal, accessPath, storedCst, objectType,
+ /*writeOnlyOnce*/ false, evaluator.getASTContext()))
+ return evaluator.getUnknown(dest, UnknownReason::Default);
+
+ memoryObject->setValue(objectVal);
+ return None;
+}
+
/// Evaluate the specified instruction in a flow sensitive way, for use by
/// the constexpr function evaluator. This does not handle control flow
/// statements. This returns None on success, and an Unknown SymbolicValue with
@@ -537,6 +725,19 @@
isa<StrongReleaseInst>(inst))
return None;
+ // If this is a special flow-sensitive instruction like a stack allocation,
+ // store, copy_addr, etc, we handle it specially here.
+ if (auto asi = dyn_cast<AllocStackInst>(inst)) {
+ createMemoryObject(asi, SymbolicValue::getUninitMemory());
+ return None;
+ }
+
+ // If this is a deallocation of a memory object that we are tracking, then
+ // don't do anything. The memory is allocated in a BumpPtrAllocator so there
+ // is no useful way to free it.
+ if (isa<DeallocStackInst>(inst))
+ return None;
+
if (isa<CondFailInst>(inst)) {
auto failed = getConstantValue(inst->getOperand(0));
if (failed.getKind() == SymbolicValue::Integer) {
@@ -551,6 +752,23 @@
if (auto apply = dyn_cast<ApplyInst>(inst))
return computeCallResult(apply);
+ if (isa<StoreInst>(inst)) {
+ auto stored = getConstantValue(inst->getOperand(0));
+ if (!stored.isConstant())
+ return stored;
+
+ return computeFSStore(stored, inst->getOperand(1));
+ }
+
+ // Copy addr is a load + store combination.
+ if (auto *copy = dyn_cast<CopyAddrInst>(inst)) {
+ auto value = getConstAddrAndLoadResult(copy->getOperand(0));
+ if (!value.isConstant())
+ return value;
+
+ return computeFSStore(value, copy->getOperand(1));
+ }
+
// If the instruction produces normal results, try constant folding it.
// If this fails, then we fail.
if (inst->getNumResults() != 0) {
diff --git a/lib/Sema/CSApply.cpp b/lib/Sema/CSApply.cpp
index b1f54ff..d3ec843 100644
--- a/lib/Sema/CSApply.cpp
+++ b/lib/Sema/CSApply.cpp
@@ -381,14 +381,15 @@
// Build and type check the string literal index value to the specific
// string type expected by the subscript.
Expr *nameExpr = new (ctx) StringLiteralExpr(name, loc, /*implicit*/true);
+ (void)cs.TC.typeCheckExpression(nameExpr, dc);
+ cs.cacheExprTypes(nameExpr);
// Build a tuple so that the argument has a label.
Expr *tuple = TupleExpr::create(ctx, loc, nameExpr, ctx.Id_dynamicMember,
loc, loc, /*hasTrailingClosure*/false,
/*implicit*/true);
- (void)cs.TC.typeCheckExpression(tuple, dc, TypeLoc::withoutLoc(ty),
- CTP_CallArgument);
- cs.cacheExprTypes(tuple);
+ cs.setType(tuple, ty);
+ tuple->setType(ty);
return tuple;
}
@@ -2802,23 +2803,14 @@
return simplifyExprType(expr);
}
- Type subExprType = cs.getType(expr->getSubExpr());
- Type targetType = simplifyType(subExprType);
-
- // If the subexpression is not optional, wrap it in
- // an InjectIntoOptionalExpr. Then use the type of the
- // subexpression as the type of the 'try?' expr
- bool subExprIsOptional = (bool) subExprType->getOptionalObjectType();
-
- if (!subExprIsOptional) {
- targetType = OptionalType::get(targetType);
- auto subExpr = coerceToType(expr->getSubExpr(), targetType,
- cs.getConstraintLocator(expr));
- if (!subExpr) return nullptr;
- expr->setSubExpr(subExpr);
- }
-
- cs.setType(expr, targetType);
+ Type exprType = simplifyType(cs.getType(expr));
+
+ auto subExpr = coerceToType(expr->getSubExpr(), exprType,
+ cs.getConstraintLocator(expr));
+ if (!subExpr) return nullptr;
+ expr->setSubExpr(subExpr);
+
+ cs.setType(expr, exprType);
return expr;
}
@@ -4419,13 +4411,17 @@
auto loc = origComponent.getLoc();
auto fieldName =
foundDecl->choice.getName().getBaseIdentifier().str();
- auto index = buildDynamicMemberLookupIndexExpr(fieldName, indexType,
- loc, dc, cs);
-
+
+ Expr *nameExpr = new (ctx) StringLiteralExpr(fieldName, loc,
+ /*implicit*/true);
+ (void)cs.TC.typeCheckExpression(nameExpr, dc);
+ cs.cacheExprTypes(nameExpr);
+
origComponent = KeyPathExpr::Component::
- forUnresolvedSubscript(ctx, loc, index, {}, loc, loc,
- /*trailingClosure*/nullptr);
- cs.setType(origComponent.getIndexExpr(), index->getType());
+ forUnresolvedSubscript(ctx, loc,
+ {nameExpr}, {ctx.Id_dynamicMember}, {loc},
+ loc, /*trailingClosure*/nullptr);
+ cs.setType(origComponent.getIndexExpr(), indexType);
}
auto subscriptType =
diff --git a/lib/Sema/CSRanking.cpp b/lib/Sema/CSRanking.cpp
index 66bdc3d..1bf2045 100644
--- a/lib/Sema/CSRanking.cpp
+++ b/lib/Sema/CSRanking.cpp
@@ -1133,17 +1133,6 @@
// The systems are not considered equivalent.
identical = false;
- // If one type is convertible to of the other, but not vice-versa.
- type1Better = tc.isConvertibleTo(type1, type2, cs.DC);
- type2Better = tc.isConvertibleTo(type2, type1, cs.DC);
- if (type1Better || type2Better) {
- if (type1Better)
- ++score1;
- if (type2Better)
- ++score2;
- continue;
- }
-
// A concrete type is better than an archetype.
// FIXME: Total hack.
if (type1->is<ArchetypeType>() != type2->is<ArchetypeType>()) {
diff --git a/lib/Sema/MiscDiagnostics.cpp b/lib/Sema/MiscDiagnostics.cpp
index 2f45401..225761e 100644
--- a/lib/Sema/MiscDiagnostics.cpp
+++ b/lib/Sema/MiscDiagnostics.cpp
@@ -203,10 +203,6 @@
while (auto Conv = dyn_cast<ImplicitConversionExpr>(Base))
Base = Conv->getSubExpr();
- // Record call arguments.
- if (auto Call = dyn_cast<CallExpr>(Base))
- CallArgs.insert(Call->getArg());
-
if (auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
// Verify metatype uses.
if (isa<TypeDecl>(DRE->getDecl())) {
@@ -235,7 +231,14 @@
if (isa<TypeExpr>(Base))
checkUseOfMetaTypeName(Base);
+ if (auto *TSE = dyn_cast<TupleShuffleExpr>(E)) {
+ if (CallArgs.count(TSE))
+ CallArgs.insert(TSE->getSubExpr());
+ }
+
if (auto *SE = dyn_cast<SubscriptExpr>(E)) {
+ CallArgs.insert(SE->getIndex());
+
// Implicit InOutExpr's are allowed in the base of a subscript expr.
if (auto *IOE = dyn_cast<InOutExpr>(SE->getBase()))
if (IOE->isImplicit())
@@ -248,6 +251,13 @@
});
}
+ if (auto *KPE = dyn_cast<KeyPathExpr>(E)) {
+ for (auto Comp : KPE->getComponents()) {
+ if (auto *Arg = Comp.getIndexExpr())
+ CallArgs.insert(Arg);
+ }
+ }
+
if (auto *AE = dyn_cast<CollectionExpr>(E)) {
visitCollectionElements(AE, [&](unsigned argIndex, Expr *arg) {
arg = lookThroughArgument(arg);
@@ -266,6 +276,9 @@
// Check function calls, looking through implicit conversions on the
// function and inspecting the arguments directly.
if (auto *Call = dyn_cast<ApplyExpr>(E)) {
+ // Record call arguments.
+ CallArgs.insert(Call->getArg());
+
// Warn about surprising implicit optional promotions.
checkOptionalPromotions(Call);
@@ -381,6 +394,18 @@
}
}
+ // Diagnose single-element tuple expressions.
+ if (auto *tupleExpr = dyn_cast<TupleExpr>(E)) {
+ if (!CallArgs.count(tupleExpr)) {
+ if (tupleExpr->getNumElements() == 1) {
+ TC.diagnose(tupleExpr->getElementNameLoc(0),
+ diag::tuple_single_element)
+ .fixItRemoveChars(tupleExpr->getElementNameLoc(0),
+ tupleExpr->getElement(0)->getStartLoc());
+ }
+ }
+ }
+
return { true, E };
}
diff --git a/lib/Sema/TypeCheckGeneric.cpp b/lib/Sema/TypeCheckGeneric.cpp
index 5c731e2..f059850 100644
--- a/lib/Sema/TypeCheckGeneric.cpp
+++ b/lib/Sema/TypeCheckGeneric.cpp
@@ -710,11 +710,11 @@
if (recursivelyVisitGenericParams) {
visitOuterToInner(genericParams,
[&](GenericParamList *gpList) {
- auto genericParamsDC = gpList->begin()[0]->getDeclContext();
- TypeResolution structuralResolution =
- TypeResolution::forStructural(genericParamsDC);
- checkGenericParamList(*this, &builder, gpList, nullptr,
- structuralResolution);
+ auto genericParamsDC = gpList->begin()[0]->getDeclContext();
+ TypeResolution structuralResolution =
+ TypeResolution::forStructural(genericParamsDC);
+ checkGenericParamList(*this, &builder, gpList, nullptr,
+ structuralResolution);
});
} else {
auto genericParamsDC = genericParams->begin()[0]->getDeclContext();
diff --git a/stdlib/public/Platform/visualc.apinotes b/stdlib/public/Platform/visualc.apinotes
new file mode 100644
index 0000000..88632f3
--- /dev/null
+++ b/stdlib/public/Platform/visualc.apinotes
@@ -0,0 +1,7 @@
+---
+Name: VisualC
+Functions:
+- Name: _setjmp
+ Availability: nonswift
+ AvailabilityMsg: 'Functions that return more than once are unavailable in swift'
+
diff --git a/stdlib/public/core/SIMDVectorTypes.swift.gyb b/stdlib/public/core/SIMDVectorTypes.swift.gyb
index b5d91ce..d079d77 100644
--- a/stdlib/public/core/SIMDVectorTypes.swift.gyb
+++ b/stdlib/public/core/SIMDVectorTypes.swift.gyb
@@ -109,6 +109,14 @@
}
}
+extension SIMD${n} : CustomDebugStringConvertible {
+ /// Debug string representation
+ public var debugDescription: String {
+ return "SIMD${n}<\(Scalar.self)>(${', '.join(map(lambda c:
+ '\\(self['+ str(c) + '])',
+ xrange(n)))})"
+ }
+}
public extension SIMD${n} where Scalar : BinaryFloatingPoint {
@inlinable
diff --git a/test/ClangImporter/availability_returns_twice-msvc.swift b/test/ClangImporter/availability_returns_twice-msvc.swift
new file mode 100644
index 0000000..36bee94
--- /dev/null
+++ b/test/ClangImporter/availability_returns_twice-msvc.swift
@@ -0,0 +1,11 @@
+// RUN: %target-typecheck-verify-swift
+// REQUIRES: OS=windows-msvc
+
+import MSVCRT
+typealias JumpBuffer = _JBTYPE
+
+func test_unavailable_returns_twice_function() {
+ var x: JumpBuffer
+ _ = _setjmp(&x) // expected-error {{'_setjmp' is unavailable in Swift: Functions that return more than once are unavailable in swift}}
+}
+
diff --git a/test/ClangImporter/availability_returns_twice.swift b/test/ClangImporter/availability_returns_twice.swift
index 05de4ea..4c4d39a 100644
--- a/test/ClangImporter/availability_returns_twice.swift
+++ b/test/ClangImporter/availability_returns_twice.swift
@@ -1,4 +1,5 @@
// RUN: %target-typecheck-verify-swift
+// UNSUPPORTED: OS=windows-msvc
#if os(macOS) || os(iOS) || os(watchOS) || os(tvOS)
import Darwin
@@ -6,9 +7,6 @@
#elseif os(Android) || os(Cygwin) || os(FreeBSD) || os(Linux)
import Glibc
typealias JumpBuffer = jmp_buf
-#elseif os(Windows)
- import MSVCRT
- typealias JumpBuffer = jmp_buf
#endif
func test_unavailable_returns_twice_function() {
diff --git a/test/Constraints/tuple.swift b/test/Constraints/tuple.swift
index 171c771..d3d9c14 100644
--- a/test/Constraints/tuple.swift
+++ b/test/Constraints/tuple.swift
@@ -56,7 +56,7 @@
// Tuples with existentials
var any : Any = ()
any = (1, 2)
-any = (label: 4)
+any = (label: 4) // expected-error {{cannot create a single-element tuple with an element label}}
// Scalars don't have .0/.1/etc
i = j.0 // expected-error{{value of type 'Int' has no member '0'}}
@@ -252,3 +252,11 @@
let y = ""
return b ? (x, y) : nil
}
+
+// Single element tuple expressions
+func singleElementTuple() {
+ let _ = (label: 123) // expected-error {{cannot create a single-element tuple with an element label}} {{12-19=}}
+ let _ = (label: 123).label // expected-error {{cannot create a single-element tuple with an element label}} {{12-19=}}
+ let _ = ((label: 123)) // expected-error {{cannot create a single-element tuple with an element label}} {{13-20=}}
+ let _ = ((label: 123)).label // expected-error {{cannot create a single-element tuple with an element label}} {{13-20=}}
+}
\ No newline at end of file
diff --git a/test/IRGen/lazy_field_metadata.swift b/test/IRGen/lazy_field_metadata.swift
new file mode 100644
index 0000000..702957e
--- /dev/null
+++ b/test/IRGen/lazy_field_metadata.swift
@@ -0,0 +1,18 @@
+// RUN: %target-swift-frontend -emit-ir -wmo -O %s | %FileCheck %s
+
+// Both should be emitted:
+
+// CHECK: @"$s19lazy_field_metadata011GenericWithD5FieldVMn" = hidden constant
+// CHECK: @"$s19lazy_field_metadata24GenericWithConcreteFieldVMn" = hidden constant
+
+struct GenericWithConcreteField<T> {
+ let z = 123
+}
+
+struct GenericWithGenericField<T> {
+ var field = GenericWithConcreteField<T>()
+}
+
+public func forceMetadata() -> Any.Type {
+ return GenericWithGenericField<Int>.self
+}
\ No newline at end of file
diff --git a/test/Interpreter/SDK/multi-file-imported-enum.swift b/test/Interpreter/SDK/multi-file-imported-enum.swift
index 0a72ae6..3123c11 100644
--- a/test/Interpreter/SDK/multi-file-imported-enum.swift
+++ b/test/Interpreter/SDK/multi-file-imported-enum.swift
@@ -2,9 +2,9 @@
// RUN: %target-build-swift -module-name test -whole-module-optimization %s %S/Inputs/multi-file-imported-enum/main.swift -o %t/a.out
// RUN: %target-codesign %t/a.out
// RUN: %target-run %t/a.out | %FileCheck %s
-// REQUIRES: executable_test
-// XFAIL: linux
+// REQUIRES: executable_test
+// REQUIRES: objc_interop
import Foundation
diff --git a/test/Interpreter/builtin_bridge_object.swift b/test/Interpreter/builtin_bridge_object.swift
index f92b6bf..a49ed02 100644
--- a/test/Interpreter/builtin_bridge_object.swift
+++ b/test/Interpreter/builtin_bridge_object.swift
@@ -2,10 +2,11 @@
// RUN: %target-build-swift -parse-stdlib %s -o %t/a.out
// RUN: %target-codesign %t/a.out
// RUN: %target-run %t/a.out | %FileCheck %s
+
// REQUIRES: executable_test
+// REQUIRES: objc_interop
// FIXME: rdar://problem/19648117 Needs splitting objc parts out
-// XFAIL: linux
import Swift
import SwiftShims
diff --git a/test/Interpreter/varargs.swift b/test/Interpreter/varargs.swift
index faf2746..1fe3689 100644
--- a/test/Interpreter/varargs.swift
+++ b/test/Interpreter/varargs.swift
@@ -1,7 +1,7 @@
// RUN: %target-run-simple-swift | %FileCheck %s
-// REQUIRES: executable_test
-// XFAIL: linux
+// REQUIRES: executable_test
+// REQUIRES: objc_interop
import Foundation
diff --git a/test/Parse/try.swift b/test/Parse/try.swift
index 54a2aa2..f58f4a0 100644
--- a/test/Parse/try.swift
+++ b/test/Parse/try.swift
@@ -258,3 +258,14 @@
let _: Int?? = try? producer.produceDoubleOptionalInt() // expected-error {{value of optional type 'Int???' not unwrapped; did you mean to use 'try!' or chain with '?'?}}
let _: Int??? = try? producer.produceDoubleOptionalInt() // good
let _: String = try? producer.produceDoubleOptionalInt() // expected-error {{cannot convert value of type 'Int???' to specified type 'String'}}
+
+// rdar://problem/46742002
+protocol Dummy : class {}
+
+class F<T> {
+ func wait() throws -> T { fatalError() }
+}
+
+func bar(_ a: F<Dummy>, _ b: F<Dummy>) {
+ _ = (try? a.wait()) === (try? b.wait())
+}
diff --git a/test/Parse/try_swift5.swift b/test/Parse/try_swift5.swift
index 6d463a0..ec05752 100644
--- a/test/Parse/try_swift5.swift
+++ b/test/Parse/try_swift5.swift
@@ -261,3 +261,14 @@
let _: Int?? = try? producer.produceDoubleOptionalInt() // good
let _: Int??? = try? producer.produceDoubleOptionalInt() // good
let _: String = try? producer.produceDoubleOptionalInt() // expected-error {{cannot convert value of type 'Int??' to specified type 'String'}}
+
+// rdar://problem/46742002
+protocol Dummy : class {}
+
+class F<T> {
+ func wait() throws -> T { fatalError() }
+}
+
+func bar(_ a: F<Dummy>, _ b: F<Dummy>) {
+ _ = (try? a.wait()) === (try? b.wait())
+}
diff --git a/test/SILGen/assignment.swift b/test/SILGen/assignment.swift
index efd0029..8765156 100644
--- a/test/SILGen/assignment.swift
+++ b/test/SILGen/assignment.swift
@@ -49,3 +49,12 @@
// CHECK: end_access [[WRITE]] : $*P
p.left = p.right
}
+
+// SR-5919
+func stupidGames() -> ((), ()) {
+ return ((), ())
+}
+
+func assignToNestedVoid() {
+ let _: ((), ()) = stupidGames()
+}
\ No newline at end of file
diff --git a/test/SILGen/default_arguments.swift b/test/SILGen/default_arguments.swift
index a795fa0..48bfaab 100644
--- a/test/SILGen/default_arguments.swift
+++ b/test/SILGen/default_arguments.swift
@@ -367,3 +367,19 @@
defaultEscaping()
autoclosureDefaultEscaping()
}
+
+func tupleDefaultArg(x: (Int, Int) = (1, 2)) {}
+
+// CHECK-LABEL: sil hidden @$s17default_arguments19callTupleDefaultArgyyF : $@convention(thin) () -> ()
+// CHECK: function_ref @$s17default_arguments15tupleDefaultArg1xySi_Sit_tFfA_ : $@convention(thin) () -> (Int, Int)
+// CHECK: function_ref @$s17default_arguments15tupleDefaultArg1xySi_Sit_tF : $@convention(thin) (Int, Int) -> ()
+// CHECK: return
+func callTupleDefaultArg() {
+ tupleDefaultArg()
+}
+
+// FIXME: Should this be banned?
+func stupidGames(x: Int = 3) -> Int {
+ return x
+}
+stupidGames(x:)()
\ No newline at end of file
diff --git a/test/SILOptimizer/pound_assert.sil b/test/SILOptimizer/pound_assert.sil
index a91cea7..38742ae 100644
--- a/test/SILOptimizer/pound_assert.sil
+++ b/test/SILOptimizer/pound_assert.sil
@@ -29,3 +29,77 @@
%5 = builtin "poundAssert"(%3 : $Builtin.Int1, %4 : $Builtin.RawPointer) : $()
return undef : $()
}
+
+// Tests that piecewise initialization of memory works, by piecewise
+// initializing a tuple.
+sil @piecewiseInit : $@convention(thin) () -> Bool {
+bb0:
+ // Allocate and initialize the tuple to (1, 2).
+ %0 = alloc_stack $(Int64, Int64), var, name "tup"
+ %1 = tuple_element_addr %0 : $*(Int64, Int64), 0
+ %2 = tuple_element_addr %0 : $*(Int64, Int64), 1
+ %3 = integer_literal $Builtin.Int64, 1
+ %4 = struct $Int64 (%3 : $Builtin.Int64)
+ store %4 to %1 : $*Int64
+ %6 = integer_literal $Builtin.Int64, 2
+ %7 = struct $Int64 (%6 : $Builtin.Int64)
+ store %7 to %2 : $*Int64
+
+ // Read the first element from the tuple.
+ %9 = begin_access [read] [static] %0 : $*(Int64, Int64)
+ %10 = tuple_element_addr %9 : $*(Int64, Int64), 0
+ %11 = load %10 : $*Int64
+ end_access %9 : $*(Int64, Int64)
+
+ // Check that the first element is what we put in.
+ %13 = struct_extract %11 : $Int64, #Int64._value
+ %14 = builtin "cmp_eq_Int64"(%3 : $Builtin.Int64, %13 : $Builtin.Int64) : $Builtin.Int1
+ %15 = struct $Bool (%14 : $Builtin.Int1)
+
+ // Deallocate and return.
+ dealloc_stack %0 : $*(Int64, Int64)
+ return %15 : $Bool
+}
+
+// Tests copy_addr interpretation.
+sil @copyAddr : $@convention(thin) () -> Bool {
+ // Allocate an initialize an Int64 to 1.
+ %0 = alloc_stack $Int64
+ %1 = integer_literal $Builtin.Int64, 1
+ %2 = struct $Int64 (%1 : $Builtin.Int64)
+ store %2 to %0 : $*Int64
+
+ // Allocate another Int64 and copy to it.
+ %4 = alloc_stack $Int64
+ copy_addr %0 to %4 : $*Int64
+
+ // Check that the value is what we put in the original Int64.
+ %5 = begin_access [read] [static] %4 : $*Int64
+ %6 = load %5 : $*Int64
+ end_access %5 : $*Int64
+ %8 = struct_extract %6 : $Int64, #Int64._value
+ %9 = builtin "cmp_eq_Int64"(%1 : $Builtin.Int64, %8 : $Builtin.Int64) : $Builtin.Int1
+ %10 = struct $Bool (%9 : $Builtin.Int1)
+
+ // Deallocate and return.
+ dealloc_stack %4 : $*Int64
+ dealloc_stack %0 : $*Int64
+ return %10 : $Bool
+}
+
+sil @invokeTests : $@convention(thin) () -> () {
+ %0 = function_ref @piecewiseInit : $@convention(thin) () -> Bool
+ %1 = apply %0() : $@convention(thin) () -> Bool
+ %2 = struct_extract %1 : $Bool, #Bool._value
+ %3 = string_literal utf8 ""
+ %4 = builtin "poundAssert"(%2 : $Builtin.Int1, %3 : $Builtin.RawPointer) : $()
+
+ %5 = function_ref @copyAddr : $@convention(thin) () -> Bool
+ %6 = apply %5() : $@convention(thin) () -> Bool
+ %7 = struct_extract %6 : $Bool, #Bool._value
+ %8 = string_literal utf8 ""
+ %9 = builtin "poundAssert"(%7 : $Builtin.Int1, %8 : $Builtin.RawPointer) : $()
+
+ %ret = tuple ()
+ return %ret : $()
+}
diff --git a/test/SILOptimizer/pound_assert.swift b/test/SILOptimizer/pound_assert.swift
index d959385..297aa1f 100644
--- a/test/SILOptimizer/pound_assert.swift
+++ b/test/SILOptimizer/pound_assert.swift
@@ -1,4 +1,4 @@
-// RUN: %target-swift-frontend -enable-experimental-static-assert -emit-sil %s -verify -Xllvm -debug -Xllvm -debug-only -Xllvm ConstExpr
+// RUN: %target-swift-frontend -enable-experimental-static-assert -emit-sil %s -verify
// REQUIRES: asserts
@@ -25,8 +25,23 @@
#assert(isOne(Int(readLine()!)!), "input is not 1") // expected-error{{#assert condition not constant}}
}
-// We don't support mutation, so the only loop we can make is infinite.
-// TODO: As soon as we support mutation, add tests with finite loops.
+func loops1(a: Int) -> Int {
+ var x = 42
+ while x <= 42 {
+ x += a
+ } // expected-note {{control flow loop found}}
+ return x
+}
+
+func loops2(a: Int) -> Int {
+ var x = 42
+ // expected-note @+1 {{could not fold operation}}
+ for i in 0 ... a {
+ x += i
+ }
+ return x
+}
+
func infiniteLoop() -> Int {
// expected-note @+2 {{condition always evaluates to true}}
// expected-note @+1 {{control flow loop found}}
@@ -35,7 +50,15 @@
return 1
}
-func test_infiniteLoop() {
+func test_loops() {
+ // expected-error @+2 {{#assert condition not constant}}
+ // expected-note @+1 {{when called from here}}
+ #assert(loops1(a: 20000) > 42)
+
+ // expected-error @+2 {{#assert condition not constant}}
+ // expected-note @+1 {{when called from here}}
+ #assert(loops2(a: 20000) > 42)
+
// expected-error @+2 {{#assert condition not constant}}
// expected-note @+1 {{when called from here}}
#assert(infiniteLoop() == 1)
@@ -82,12 +105,12 @@
var topLevelVar = 1 // expected-warning {{never mutated}}
#assert(topLevelVar == 1)
+ // expected-note @+1 {{could not fold operation}}
var topLevelVarConditionallyMutated = 1
if topLevelVarConditionallyMutated < 0 {
topLevelVarConditionallyMutated += 1
}
- // expected-error @+2 {{#assert condition not constant}}
- // expected-note @+1 {{could not fold operation}}
+ // expected-error @+1 {{#assert condition not constant}}
#assert(topLevelVarConditionallyMutated == 1)
// expected-error @+1 {{#assert condition not constant}}
@@ -156,3 +179,47 @@
#assert(cs.x.1 == 2)
#assert(cs.y == 3)
}
+
+//===----------------------------------------------------------------------===//
+// Mutation
+//===----------------------------------------------------------------------===//
+
+struct InnerStruct {
+ var a, b: Int
+}
+
+struct MutableStruct {
+ var x: InnerStruct
+ var y: Int
+}
+
+func addOne(to target: inout Int) {
+ target += 1
+}
+
+func callInout() -> Bool {
+ var myMs = MutableStruct(x: InnerStruct(a: 1, b: 2), y: 3)
+ addOne(to: &myMs.x.a)
+ addOne(to: &myMs.y)
+ return (myMs.x.a + myMs.x.b + myMs.y) == 8
+}
+
+func replaceAggregate() -> Bool {
+ var myMs = MutableStruct(x: InnerStruct(a: 1, b: 2), y: 3)
+ myMs.x = InnerStruct(a: 10, b: 20)
+ return myMs.x.a == 10 && myMs.x.b == 20 && myMs.y == 3
+}
+
+func shouldNotAlias() -> Bool {
+ var x = 1
+ var y = x
+ x += 1
+ y += 2
+ return x == 2 && y == 3
+}
+
+func invokeMutationTests() {
+ #assert(callInout())
+ #assert(replaceAggregate())
+ #assert(shouldNotAlias())
+}
diff --git a/test/decl/enum/enumtest.swift b/test/decl/enum/enumtest.swift
index c04ae3f..ae86aed 100644
--- a/test/decl/enum/enumtest.swift
+++ b/test/decl/enum/enumtest.swift
@@ -164,9 +164,9 @@
// Dot syntax.
_ = x2.origin.x
_ = x1.size.area()
- _ = (r : x1.size).r.area()
+ _ = (r : x1.size).r.area() // expected-error {{cannot create a single-element tuple with an element label}}
_ = x1.size.area()
- _ = (r : x1.size).r.area()
+ _ = (r : x1.size).r.area() // expected-error {{cannot create a single-element tuple with an element label}}
_ = x1.area
diff --git a/test/stdlib/simd.swift.gyb b/test/stdlib/simd.swift.gyb
index afadc16..84bedf3 100644
--- a/test/stdlib/simd.swift.gyb
+++ b/test/stdlib/simd.swift.gyb
@@ -313,5 +313,10 @@
% end # for type
}
+simdTestSuite.test("debug description") {
+ expectEqual("SIMD2<Float>(1.0, 2.5)",
+ SIMD2<Float>(1.0, 2.5).debugDescription)
+}
+
runAllTests()
diff --git a/validation-test/Sema/type_checker_perf/fast/rdar21720888.swift.gyb b/validation-test/Sema/type_checker_perf/fast/rdar21720888.swift.gyb
index 080fa45..e92c370 100644
--- a/validation-test/Sema/type_checker_perf/fast/rdar21720888.swift.gyb
+++ b/validation-test/Sema/type_checker_perf/fast/rdar21720888.swift.gyb
@@ -4,6 +4,6 @@
_ = [
%for i in range(0, N):
- (label: "string"),
+ (label: "string", another: 123),
%end
]