Merge pull request #14034 from graydon/batch-mode-driver-work
NFC: Batch mode driver work
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0a94fd6..1b1a8e6 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,7 @@
| Contents |
| :--------------------- |
+| [Swift 5.0](#swift-50) |
| [Swift 4.1](#swift-41) |
| [Swift 4.0](#swift-40) |
| [Swift 3.1](#swift-31) |
@@ -19,6 +20,16 @@
</details>
+Swift 5.0
+---------
+
+* C macros containing casts are no longer imported to Swift if the type in the
+ cast is unavailable or deprecated, or produces some other diagnostic when
+ referenced. (These macros were already only imported under very limited
+ circumstances with very simple values, so this is unlikely to affect
+ real-world code.)
+
+
Swift 4.1
---------
diff --git a/docs/ABI/TypeMetadata.rst b/docs/ABI/TypeMetadata.rst
index 51724fa..727d03f 100644
--- a/docs/ABI/TypeMetadata.rst
+++ b/docs/ABI/TypeMetadata.rst
@@ -431,10 +431,12 @@
the flags. It specifies the number of requirements that do not have default
implementations.
- **Number of requirements** is stored as a 16-bit integer after the flags. It
- specifies the total number of requirements for the protocl.
+ specifies the total number of requirements for the protocol.
- **Requirements pointer** stored as a 32-bit relative pointer to an array
of protocol requirements. The number of elements in the array is specified
by the preceding 16-bit integer.
+- **Superclass pointer** stored as a 32-bit relative pointer to class metadata,
+ describing the superclass bound of the protocol.
- **Associated type names** stored as a 32-bit relative pointer to a
null-terminated string. The string contains the names of the associated
types, in the order they apparent in the requirements list, separated by
diff --git a/include/swift/ABI/MetadataValues.h b/include/swift/ABI/MetadataValues.h
index 4fda3ea..2f22a45 100644
--- a/include/swift/ABI/MetadataValues.h
+++ b/include/swift/ABI/MetadataValues.h
@@ -916,6 +916,9 @@
/// mutate the array to fill in the direct arguments.
constexpr unsigned NumDirectGenericTypeMetadataAccessFunctionArgs = 3;
+/// The offset (in pointers) to the first requirement in a witness table.
+constexpr unsigned WitnessTableFirstRequirementOffset = 1;
+
} // end namespace swift
#endif /* SWIFT_ABI_METADATAVALUES_H */
diff --git a/include/swift/AST/Types.h b/include/swift/AST/Types.h
index 6fa679e..5f9e55c 100644
--- a/include/swift/AST/Types.h
+++ b/include/swift/AST/Types.h
@@ -1480,6 +1480,8 @@
SugarType(TypeKind K, const ASTContext *ctx,
RecursiveTypeProperties properties)
: TypeBase(K, nullptr, properties), Context(ctx) {
+ if (K != TypeKind::NameAlias)
+ assert(ctx != nullptr && "Context for SugarType should not be null");
Bits.SugarType.HasCachedType = false;
}
diff --git a/include/swift/Frontend/ArgsToFrontendInputsConverter.h b/include/swift/Frontend/ArgsToFrontendInputsConverter.h
new file mode 100644
index 0000000..1dfd5d4
--- /dev/null
+++ b/include/swift/Frontend/ArgsToFrontendInputsConverter.h
@@ -0,0 +1,76 @@
+//===--- ArgsToFrontendInputsConverter.h ------------------------*- C++ -*-===//
+//
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See https://swift.org/LICENSE.txt for license information
+// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SWIFT_FRONTEND_ARGSTOFRONTENDINPUTSCONVERTER_H
+#define SWIFT_FRONTEND_ARGSTOFRONTENDINPUTSCONVERTER_H
+
+#include "swift/AST/DiagnosticConsumer.h"
+#include "swift/AST/DiagnosticEngine.h"
+#include "swift/Frontend/FrontendOptions.h"
+#include "llvm/Option/ArgList.h"
+
+namespace swift {
+
+/// Implement argument semantics in a way that will make it easier to have
+/// >1 primary file (or even a primary file list) in the future without
+/// breaking anything today.
+///
+/// Semantics today:
+/// If input files are on command line, primary files on command line are also
+/// input files; they are not repeated without -primary-file. If input files are
+/// in a file list, the primary files on the command line are repeated in the
+/// file list. Thus, if there are any primary files, it is illegal to have both
+/// (non-primary) input files and a file list. Finally, the order of input files
+/// must match the order given on the command line or the file list.
+///
+/// Side note:
+/// since each input file will cause a lot of work for the compiler, this code
+/// is biased towards clarity and not optimized.
+/// In the near future, it will be possible to put primary files in the
+/// filelist, or to have a separate filelist for primaries. The organization
+/// here anticipates that evolution.
+
+class ArgsToFrontendInputsConverter {
+ DiagnosticEngine &Diags;
+ const llvm::opt::ArgList &Args;
+ FrontendInputs &Inputs;
+
+ llvm::opt::Arg const *const FilelistPathArg;
+ llvm::opt::Arg const *const PrimaryFilelistPathArg;
+
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 4> BuffersToKeepAlive;
+
+ llvm::SetVector<StringRef> Files;
+
+public:
+ ArgsToFrontendInputsConverter(DiagnosticEngine &diags,
+ const llvm::opt::ArgList &args,
+ FrontendInputs &inputs);
+
+ bool convert();
+
+private:
+ bool enforceFilelistExclusion();
+ bool readInputFilesFromCommandLine();
+ bool readInputFilesFromFilelist();
+ bool forAllFilesInFilelist(llvm::opt::Arg const *const pathArg,
+ llvm::function_ref<void(StringRef)> fn);
+ bool addFile(StringRef file);
+ Optional<std::set<StringRef>> readPrimaryFiles();
+ std::set<StringRef>
+ createInputFilesConsumingPrimaries(std::set<StringRef> primaryFiles);
+ bool checkForMissingPrimaryFiles(std::set<StringRef> primaryFiles);
+};
+
+} // namespace swift
+
+#endif /* SWIFT_FRONTEND_ARGSTOFRONTENDINPUTSCONVERTER_H */
diff --git a/include/swift/Frontend/ArgsToFrontendOptionsConverter.h b/include/swift/Frontend/ArgsToFrontendOptionsConverter.h
new file mode 100644
index 0000000..a3ce348
--- /dev/null
+++ b/include/swift/Frontend/ArgsToFrontendOptionsConverter.h
@@ -0,0 +1,98 @@
+//===------ArgsToFrontendOptionsConverter.h-- --------------------*-C++ -*-===//
+//
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See https://swift.org/LICENSE.txt for license information
+// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SWIFT_FRONTEND_ARGSTOFRONTENDOPTIONSCONVERTER_H
+#define SWIFT_FRONTEND_ARGSTOFRONTENDOPTIONSCONVERTER_H
+
+#include "swift/AST/DiagnosticConsumer.h"
+#include "swift/AST/DiagnosticEngine.h"
+#include "swift/Frontend/FrontendOptions.h"
+#include "swift/Option/Options.h"
+#include "llvm/Option/ArgList.h"
+
+#include <vector>
+
+namespace swift {
+
+class ArgsToFrontendOptionsConverter {
+private:
+ DiagnosticEngine &Diags;
+ const llvm::opt::ArgList &Args;
+ FrontendOptions &Opts;
+
+ Optional<const std::vector<std::string>>
+ cachedOutputFilenamesFromCommandLineOrFilelist;
+
+ void handleDebugCrashGroupArguments();
+
+ void computeDebugTimeOptions();
+ bool computeFallbackModuleName();
+ bool computeModuleName();
+ bool computeOutputFilenames();
+ void computeDumpScopeMapLocations();
+ void computeHelpOptions();
+ void computeImplicitImportModuleNames();
+ void computeImportObjCHeaderOptions();
+ void computeLLVMArgs();
+ void computePlaygroundOptions();
+ void computePrintStatsOptions();
+ void computeTBDOptions();
+
+ void setUnsignedIntegerArgument(options::ID optionID, unsigned max,
+ unsigned &valueToSet);
+
+ FrontendOptions::ActionType determineRequestedAction() const;
+
+ bool setUpForSILOrLLVM();
+
+ /// Determine the correct output filename when none was specified.
+ ///
+ /// Such an absence should only occur when invoking the frontend
+ /// without the driver,
+ /// because the driver will always pass -o with an appropriate filename
+ /// if output is required for the requested action.
+ bool deriveOutputFilenameFromInputFile();
+
+ /// Determine the correct output filename when a directory was specified.
+ ///
+ /// Such a specification should only occur when invoking the frontend
+ /// directly, because the driver will always pass -o with an appropriate
+ /// filename if output is required for the requested action.
+ bool deriveOutputFilenameForDirectory(StringRef outputDir);
+
+ std::string determineBaseNameOfOutput() const;
+
+ void deriveOutputFilenameFromParts(StringRef dir, StringRef base);
+
+ void determineSupplementaryOutputFilenames();
+
+ /// Returns the output filenames on the command line or in the output
+ /// filelist. If there
+ /// were neither -o's nor an output filelist, returns an empty vector.
+ ArrayRef<std::string> getOutputFilenamesFromCommandLineOrFilelist();
+
+ bool checkForUnusedOutputPaths() const;
+
+ std::vector<std::string> readOutputFileList(StringRef filelistPath) const;
+
+public:
+ ArgsToFrontendOptionsConverter(DiagnosticEngine &Diags,
+ const llvm::opt::ArgList &Args,
+ FrontendOptions &Opts)
+ : Diags(Diags), Args(Args), Opts(Opts) {}
+
+ bool convert();
+};
+
+} // namespace swift
+
+#endif /* SWIFT_FRONTEND_ARGSTOFRONTENDOPTIONSCONVERTER_H */
diff --git a/include/swift/Frontend/FrontendInputs.h b/include/swift/Frontend/FrontendInputs.h
new file mode 100644
index 0000000..8f7b920
--- /dev/null
+++ b/include/swift/Frontend/FrontendInputs.h
@@ -0,0 +1,127 @@
+//===--- FrontendInputs.h ---------------------------------------*- C++ -*-===//
+//
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See https://swift.org/LICENSE.txt for license information
+// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SWIFT_FRONTEND_FRONTENDINPUTS_H
+#define SWIFT_FRONTEND_FRONTENDINPUTS_H
+
+#include "swift/AST/Module.h"
+#include "swift/Frontend/FrontendInputs.h"
+#include "swift/Frontend/InputFile.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/MapVector.h"
+
+#include <string>
+#include <vector>
+
+namespace llvm {
+class MemoryBuffer;
+}
+
+namespace swift {
+
+/// Information about all the inputs to the frontend.
+class FrontendInputs {
+ friend class ArgsToFrontendInputsConverter;
+
+ std::vector<InputFile> AllFiles;
+ typedef llvm::StringMap<unsigned> InputFileMap;
+ InputFileMap PrimaryInputs;
+
+public:
+ FrontendInputs() = default;
+
+ FrontendInputs(const FrontendInputs &other);
+
+ FrontendInputs &operator=(const FrontendInputs &other);
+
+ // Readers:
+
+ ArrayRef<InputFile> getAllFiles() const { return AllFiles; }
+
+ std::vector<std::string> getInputFilenames() const;
+
+ unsigned inputCount() const { return getAllFiles().size(); }
+
+ bool hasInputs() const { return !AllFiles.empty(); }
+
+ bool hasSingleInput() const { return inputCount() == 1; }
+
+ StringRef getFilenameOfFirstInput() const;
+
+ bool isReadingFromStdin() const;
+
+ // If we have exactly one input filename, and its extension is "bc" or "ll",
+ // treat the input as LLVM_IR.
+ bool shouldTreatAsLLVM() const;
+
+ // Primary input readers
+
+private:
+ void assertMustNotBeMoreThanOnePrimaryInput() const;
+
+ bool areAllNonPrimariesSIB() const;
+
+public:
+ unsigned primaryInputCount() const { return PrimaryInputs.size(); }
+
+ // Primary count readers:
+
+ bool hasUniquePrimaryInput() const { return primaryInputCount() == 1; }
+
+ bool hasPrimaryInputs() const { return primaryInputCount() > 0; }
+
+ bool isWholeModule() const { return !hasPrimaryInputs(); }
+
+ // Count-dependend readers:
+
+ /// \return the unique primary input, if one exists.
+ const InputFile *getUniquePrimaryInput() const;
+
+ const InputFile &getRequiredUniquePrimaryInput() const;
+
+ /// \return the name of the unique primary input, or an empty StrinRef if
+ /// there isn't one.
+ StringRef getNameOfUniquePrimaryInputFile() const;
+
+ bool isFilePrimary(StringRef file) const;
+
+ unsigned numberOfPrimaryInputsEndingWith(const char *extension) const;
+
+ // Multi-facet readers
+
+ bool shouldTreatAsSIL() const;
+
+ /// \return true for error
+ bool verifyInputs(DiagnosticEngine &diags, bool treatAsSIL,
+ bool isREPLRequested, bool isNoneRequested) const;
+
+ // Writers
+
+ void addInputFile(StringRef file, llvm::MemoryBuffer *buffer = nullptr) {
+ addInput(InputFile(file, false, buffer));
+ }
+ void addPrimaryInputFile(StringRef file,
+ llvm::MemoryBuffer *buffer = nullptr) {
+ addInput(InputFile(file, true, buffer));
+ }
+
+ void addInput(const InputFile &input);
+
+ void clearInputs() {
+ AllFiles.clear();
+ PrimaryInputs.clear();
+ }
+};
+
+} // namespace swift
+
+#endif /* SWIFT_FRONTEND_FRONTENDINPUTS_H */
diff --git a/include/swift/Frontend/FrontendOptions.h b/include/swift/Frontend/FrontendOptions.h
index 273e620..2bf7510 100644
--- a/include/swift/Frontend/FrontendOptions.h
+++ b/include/swift/Frontend/FrontendOptions.h
@@ -14,6 +14,8 @@
#define SWIFT_FRONTEND_FRONTENDOPTIONS_H
#include "swift/AST/Module.h"
+#include "swift/Frontend/FrontendInputs.h"
+#include "swift/Frontend/InputFile.h"
#include "llvm/ADT/Hashing.h"
#include <string>
@@ -25,187 +27,10 @@
namespace swift {
-enum class InputFileKind {
- IFK_None,
- IFK_Swift,
- IFK_Swift_Library,
- IFK_Swift_REPL,
- IFK_SIL,
- IFK_LLVM_IR
-};
-
-// Inputs may include buffers that override contents, and eventually should
-// always include a buffer.
-class InputFile {
- std::string Filename;
- bool IsPrimary;
- /// Null if the contents are not overridden.
- llvm::MemoryBuffer *Buffer;
-
-public:
- /// Does not take ownership of \p buffer. Does take ownership of (copy) a
- /// string.
- InputFile(StringRef name, bool isPrimary,
- llvm::MemoryBuffer *buffer = nullptr)
- : Filename(name), IsPrimary(isPrimary), Buffer(buffer) {
- assert(!name.empty());
- }
-
- bool isPrimary() const { return IsPrimary; }
- llvm::MemoryBuffer *buffer() const { return Buffer; }
- StringRef file() const {
- assert(!Filename.empty());
- return Filename;
- }
-
- /// Return Swift-standard file name from a buffer name set by
- /// llvm::MemoryBuffer::getFileOrSTDIN, which uses "<stdin>" instead of "-".
- static StringRef convertBufferNameFromLLVM_getFileOrSTDIN_toSwiftConventions(
- StringRef filename) {
- return filename.equals("<stdin>") ? "-" : filename;
- }
-};
-
-/// Information about all the inputs to the frontend.
-class FrontendInputs {
- friend class ArgsToFrontendInputsConverter;
-
- std::vector<InputFile> AllFiles;
- typedef llvm::StringMap<unsigned> InputFileMap;
- InputFileMap PrimaryInputs;
-
-public:
- FrontendInputs() = default;
-
- FrontendInputs(const FrontendInputs &other) {
- for (InputFile input : other.getAllFiles())
- addInput(input);
- }
-
- FrontendInputs &operator=(const FrontendInputs &other) {
- clearInputs();
- for (InputFile input : other.getAllFiles())
- addInput(input);
- return *this;
- }
-
- // Readers:
-
- ArrayRef<InputFile> getAllFiles() const { return AllFiles; }
-
- std::vector<std::string> getInputFilenames() const {
- std::vector<std::string> filenames;
- for (auto &input : getAllFiles()) {
- filenames.push_back(input.file());
- }
- return filenames;
- }
-
- unsigned inputCount() const { return getAllFiles().size(); }
-
- bool hasInputs() const { return !AllFiles.empty(); }
-
- bool hasSingleInput() const { return inputCount() == 1; }
-
- StringRef getFilenameOfFirstInput() const {
- assert(hasInputs());
- const InputFile &inp = getAllFiles()[0];
- StringRef f = inp.file();
- assert(!f.empty());
- return f;
- }
-
- bool isReadingFromStdin() const {
- return hasSingleInput() && getFilenameOfFirstInput() == "-";
- }
-
- // If we have exactly one input filename, and its extension is "bc" or "ll",
- // treat the input as LLVM_IR.
- bool shouldTreatAsLLVM() const;
-
- // Primary input readers
-
-private:
- void assertMustNotBeMoreThanOnePrimaryInput() const {
- assert(primaryInputCount() < 2 &&
- "have not implemented >1 primary input yet");
- }
- bool areAllNonPrimariesSIB() const;
-
-public:
- unsigned primaryInputCount() const { return PrimaryInputs.size(); }
-
- // Primary count readers:
-
- bool hasUniquePrimaryInput() const { return primaryInputCount() == 1; }
-
- bool hasPrimaryInputs() const { return primaryInputCount() > 0; }
-
- bool isWholeModule() const { return !hasPrimaryInputs(); }
-
- // Count-dependend readers:
-
- /// Return the unique primary input, if one exists.
- const InputFile *getUniquePrimaryInput() const {
- assertMustNotBeMoreThanOnePrimaryInput();
- const auto b = PrimaryInputs.begin();
- return b == PrimaryInputs.end() ? nullptr : &AllFiles[b->second];
- }
-
- const InputFile &getRequiredUniquePrimaryInput() const {
- if (const auto *input = getUniquePrimaryInput())
- return *input;
- llvm_unreachable("No primary when one is required");
- }
-
- /// Return the name of the unique primary input, or an empty StrinRef if there
- /// isn't one.
- StringRef getNameOfUniquePrimaryInputFile() const {
- const auto *input = getUniquePrimaryInput();
- return input == nullptr ? StringRef() : input->file();
- }
-
- bool isFilePrimary(StringRef file) {
- auto iterator = PrimaryInputs.find(file);
- return iterator != PrimaryInputs.end() &&
- AllFiles[iterator->second].isPrimary();
- }
-
- unsigned numberOfPrimaryInputsEndingWith(const char *extension) const;
-
- // Multi-facet readers
-
- bool shouldTreatAsSIL() const;
-
- /// Return true for error
- bool verifyInputs(DiagnosticEngine &diags, bool treatAsSIL,
- bool isREPLRequested, bool isNoneRequested) const;
-
- // Writers
-
- void addInputFile(StringRef file, llvm::MemoryBuffer *buffer = nullptr) {
- addInput(InputFile(file, false, buffer));
- }
- void addPrimaryInputFile(StringRef file,
- llvm::MemoryBuffer *buffer = nullptr) {
- addInput(InputFile(file, true, buffer));
- }
-
- void addInput(const InputFile &input) {
- if (!input.file().empty() && input.isPrimary())
- PrimaryInputs.insert(std::make_pair(input.file(), AllFiles.size()));
- AllFiles.push_back(input);
- }
-
- void clearInputs() {
- AllFiles.clear();
- PrimaryInputs.clear();
- }
-};
/// Options for controlling the behavior of the frontend.
class FrontendOptions {
- friend class FrontendArgsToOptionsConverter;
+ friend class ArgsToFrontendOptionsConverter;
public:
FrontendInputs Inputs;
@@ -494,8 +319,6 @@
}
private:
- static const char *suffixForPrincipalOutputFileForAction(ActionType);
-
bool hasUnusedDependenciesFilePath() const;
static bool canActionEmitDependencies(ActionType);
bool hasUnusedObjCHeaderOutputPath() const;
@@ -507,9 +330,11 @@
bool hasUnusedModuleDocOutputPath() const;
static bool canActionEmitModuleDoc(ActionType);
+public:
static bool doesActionProduceOutput(ActionType);
static bool doesActionProduceTextualOutput(ActionType);
static bool needsProperModuleName(ActionType);
+ static const char *suffixForPrincipalOutputFileForAction(ActionType);
};
}
diff --git a/include/swift/Frontend/InputFile.h b/include/swift/Frontend/InputFile.h
new file mode 100644
index 0000000..abc3fb6
--- /dev/null
+++ b/include/swift/Frontend/InputFile.h
@@ -0,0 +1,65 @@
+//===--- InputFile.h --------------------------------------------*- C++ -*-===//
+//
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See https://swift.org/LICENSE.txt for license information
+// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SWIFT_FRONTEND_INPUTFILE_H
+#define SWIFT_FRONTEND_INPUTFILE_H
+
+#include "llvm/Support/MemoryBuffer.h"
+#include <string>
+#include <vector>
+
+namespace swift {
+
+enum class InputFileKind {
+ IFK_None,
+ IFK_Swift,
+ IFK_Swift_Library,
+ IFK_Swift_REPL,
+ IFK_SIL,
+ IFK_LLVM_IR
+};
+
+// Inputs may include buffers that override contents, and eventually should
+// always include a buffer.
+class InputFile {
+ std::string Filename;
+ bool IsPrimary;
+ /// Null if the contents are not overridden.
+ llvm::MemoryBuffer *Buffer;
+
+public:
+ /// Does not take ownership of \p buffer. Does take ownership of (copy) a
+ /// string.
+ InputFile(StringRef name, bool isPrimary,
+ llvm::MemoryBuffer *buffer = nullptr)
+ : Filename(name), IsPrimary(isPrimary), Buffer(buffer) {
+ assert(!name.empty());
+ }
+
+ bool isPrimary() const { return IsPrimary; }
+ llvm::MemoryBuffer *buffer() const { return Buffer; }
+ StringRef file() const {
+ assert(!Filename.empty());
+ return Filename;
+ }
+
+ /// Return Swift-standard file name from a buffer name set by
+ /// llvm::MemoryBuffer::getFileOrSTDIN, which uses "<stdin>" instead of "-".
+ static StringRef convertBufferNameFromLLVM_getFileOrSTDIN_toSwiftConventions(
+ StringRef filename) {
+ return filename.equals("<stdin>") ? "-" : filename;
+ }
+};
+
+} // namespace swift
+
+#endif /* SWIFT_FRONTEND_INPUTFILE_H */
diff --git a/include/swift/Runtime/Debug.h b/include/swift/Runtime/Debug.h
index 38934d8..e8f4412 100644
--- a/include/swift/Runtime/Debug.h
+++ b/include/swift/Runtime/Debug.h
@@ -128,6 +128,10 @@
LLVM_ATTRIBUTE_NORETURN LLVM_ATTRIBUTE_NOINLINE
void swift_abortUnownedRetainOverflow();
+// Halt due to an overflow in incrementWeak().
+LLVM_ATTRIBUTE_NORETURN LLVM_ATTRIBUTE_NOINLINE
+void swift_abortWeakRetainOverflow();
+
/// This function dumps one line of a stack trace. It is assumed that \p framePC
/// is the address of the stack frame at index \p index. If \p shortOutput is
/// true, this functions prints only the name of the symbol and offset, ignores
diff --git a/include/swift/Runtime/Metadata.h b/include/swift/Runtime/Metadata.h
index 46488bf..c37f9e0 100644
--- a/include/swift/Runtime/Metadata.h
+++ b/include/swift/Runtime/Metadata.h
@@ -177,6 +177,8 @@
template <typename Runtime> struct TargetMetadata;
using Metadata = TargetMetadata<InProcess>;
+template <typename Runtime> struct TargetProtocolConformanceDescriptor;
+
/// Storage for an arbitrary value. In C/C++ terms, this is an
/// 'object', because it is rooted in memory.
///
@@ -2177,6 +2179,10 @@
/// Requirement descriptions.
RelativeDirectPointer<TargetProtocolRequirement<Runtime>> Requirements;
+ /// The superclass of which all conforming types must be a subclass.
+ RelativeDirectPointer<const TargetClassMetadata<Runtime>, /*Nullable=*/true>
+ Superclass;
+
/// Associated type names, as a space-separated list in the same order
/// as the requirements.
RelativeDirectPointer<const char, /*Nullable=*/true> AssociatedTypeNames;
@@ -2199,15 +2205,25 @@
NumMandatoryRequirements(0),
NumRequirements(0),
Requirements(nullptr),
+ Superclass(nullptr),
AssociatedTypeNames(nullptr)
{}
};
using ProtocolDescriptor = TargetProtocolDescriptor<InProcess>;
-/// A witness table for a protocol. This type is intentionally opaque because
+/// A witness table for a protocol.
+///
+/// With the exception of the initial protocol conformance descriptor,
/// the layout of a witness table is dependent on the protocol being
/// represented.
-struct WitnessTable;
+template <typename Runtime>
+struct TargetWitnessTable {
+ /// The protocol conformance descriptor from which this witness table
+ /// was generated.
+ const TargetProtocolConformanceDescriptor<Runtime> *Description;
+};
+
+using WitnessTable = TargetWitnessTable<InProcess>;
/// The basic layout of an opaque (non-class-bounded) existential type.
template <typename Runtime>
@@ -2216,12 +2232,13 @@
const TargetMetadata<Runtime> *Type;
// const void *WitnessTables[];
- const WitnessTable **getWitnessTables() {
- return reinterpret_cast<const WitnessTable **>(this + 1);
+ const TargetWitnessTable<Runtime> **getWitnessTables() {
+ return reinterpret_cast<const TargetWitnessTable<Runtime> **>(this + 1);
}
- const WitnessTable * const *getWitnessTables() const {
- return reinterpret_cast<const WitnessTable * const *>(this + 1);
+ const TargetWitnessTable<Runtime> * const *getWitnessTables() const {
+ return reinterpret_cast<const TargetWitnessTable<Runtime> * const *>(
+ this + 1);
}
void copyTypeInto(swift::TargetOpaqueExistentialContainer<Runtime> *dest,
@@ -2305,8 +2322,9 @@
/// Get a witness table from an existential container of the type described
/// by this metadata.
- const WitnessTable * getWitnessTable(const OpaqueValue *container,
- unsigned i) const;
+ const TargetWitnessTable<Runtime> * getWitnessTable(
+ const OpaqueValue *container,
+ unsigned i) const;
/// Return true iff all the protocol constraints are @objc.
bool isObjC() const {
@@ -2345,11 +2363,11 @@
struct TargetExistentialMetatypeContainer {
const TargetMetadata<Runtime> *Value;
- const WitnessTable **getWitnessTables() {
- return reinterpret_cast<const WitnessTable**>(this + 1);
+ const TargetWitnessTable<Runtime> **getWitnessTables() {
+ return reinterpret_cast<const TargetWitnessTable<Runtime>**>(this + 1);
}
- const WitnessTable * const *getWitnessTables() const {
- return reinterpret_cast<const WitnessTable* const *>(this + 1);
+ const TargetWitnessTable<Runtime> * const *getWitnessTables() const {
+ return reinterpret_cast<const TargetWitnessTable<Runtime>* const *>(this + 1);
}
void copyTypeInto(TargetExistentialMetatypeContainer *dest,
@@ -2532,10 +2550,10 @@
/*nullable*/ true> Protocol;
/// The pattern.
- RelativeDirectPointer<const WitnessTable> Pattern;
+ RelativeDirectPointer<const TargetWitnessTable<Runtime>> Pattern;
/// The instantiation function, which is called after the template is copied.
- RelativeDirectPointer<void(WitnessTable *instantiatedTable,
+ RelativeDirectPointer<void(TargetWitnessTable<Runtime> *instantiatedTable,
const TargetMetadata<Runtime> *type,
void * const *instantiationArgs),
/*nullable*/ true> Instantiator;
@@ -2609,9 +2627,9 @@
struct TargetProtocolConformanceDescriptor {
public:
using WitnessTableAccessorFn
- = const WitnessTable *(const TargetMetadata<Runtime>*,
- const WitnessTable **,
- size_t);
+ = const TargetWitnessTable<Runtime> *(const TargetMetadata<Runtime>*,
+ const TargetWitnessTable<Runtime> **,
+ size_t);
private:
/// The protocol being conformed to.
@@ -2637,7 +2655,7 @@
// The conformance, or a generator function for the conformance.
union {
/// A direct reference to the witness table for the conformance.
- RelativeDirectPointer<const WitnessTable> WitnessTable;
+ RelativeDirectPointer<const TargetWitnessTable<Runtime>> WitnessTable;
/// A function that produces the witness table given an instance of the
/// type.
@@ -2696,7 +2714,7 @@
}
/// Get the directly-referenced static witness table.
- const swift::WitnessTable *getStaticWitnessTable() const {
+ const swift::TargetWitnessTable<Runtime> *getStaticWitnessTable() const {
switch (getConformanceKind()) {
case ConformanceFlags::ConformanceKind::WitnessTable:
break;
@@ -2727,7 +2745,7 @@
/// Get the witness table for the specified type, realizing it if
/// necessary, or return null if the conformance does not apply to the
/// type.
- const swift::WitnessTable *
+ const swift::TargetWitnessTable<Runtime> *
getWitnessTable(const TargetMetadata<Runtime> *type) const;
#if !defined(NDEBUG) && SWIFT_OBJC_INTEROP
diff --git a/include/swift/SIL/SILWitnessVisitor.h b/include/swift/SIL/SILWitnessVisitor.h
index 6fc81ab..64797d1 100644
--- a/include/swift/SIL/SILWitnessVisitor.h
+++ b/include/swift/SIL/SILWitnessVisitor.h
@@ -48,6 +48,9 @@
public:
void visitProtocolDecl(ProtocolDecl *protocol) {
+ // The protocol conformance descriptor gets added first.
+ asDerived().addProtocolConformanceDescriptor();
+
// Associated types get added after the inherited conformances, but
// before all the function requirements.
bool haveAddedAssociatedTypes = false;
diff --git a/include/swift/SILOptimizer/Analysis/SideEffectAnalysis.h b/include/swift/SILOptimizer/Analysis/SideEffectAnalysis.h
index 2490645..a950db4 100644
--- a/include/swift/SILOptimizer/Analysis/SideEffectAnalysis.h
+++ b/include/swift/SILOptimizer/Analysis/SideEffectAnalysis.h
@@ -247,6 +247,9 @@
/// Get the array of parameter effects. If a side-effect can be associated
/// to a specific parameter, it is contained here instead of the global
/// effects.
+ /// Note that if a parameter effect is mayRelease(), it means that the
+ /// global function effects can be anything, because the destructor of an
+ /// object can have arbitrary side effects.
ArrayRef<Effects> getParameterEffects() const { return ParamEffects; }
/// Merge effects from \p RHS.
diff --git a/include/swift/SILOptimizer/PassManager/Passes.def b/include/swift/SILOptimizer/PassManager/Passes.def
index c8f6fe9..f0e424e 100644
--- a/include/swift/SILOptimizer/PassManager/Passes.def
+++ b/include/swift/SILOptimizer/PassManager/Passes.def
@@ -200,6 +200,8 @@
"Move SIL cond_fail by Hoisting Checks")
PASS(NoReturnFolding, "noreturn-folding",
"Prune Control Flow at No-Return Calls Using SIL unreachable")
+PASS(ObjectOutliner, "object-outliner",
+ "Outlining of Global Objects")
PASS(Outliner, "outliner",
"Function Outlining Optimization")
PASS(OwnershipModelEliminator, "ownership-model-eliminator",
diff --git a/include/swift/SILOptimizer/Utils/ConstantFolding.h b/include/swift/SILOptimizer/Utils/ConstantFolding.h
index 0121737..b47fa10 100644
--- a/include/swift/SILOptimizer/Utils/ConstantFolding.h
+++ b/include/swift/SILOptimizer/Utils/ConstantFolding.h
@@ -18,6 +18,9 @@
#define SWIFT_SIL_CONSTANTFOLDING_H
#include "swift/SIL/SILInstruction.h"
+#include "swift/SILOptimizer/Analysis/Analysis.h"
+#include "llvm/ADT/SetVector.h"
+#include <functional>
namespace swift {
@@ -47,6 +50,52 @@
/// The \p ID must be the ID of a trunc/sext/zext builtin.
APInt constantFoldCast(APInt val, const BuiltinInfo &BI);
+
+/// A utility class to do constant folding.
+class ConstantFolder {
+private:
+ /// The worklist of the constants that could be folded into their users.
+ llvm::SetVector<SILInstruction *> WorkList;
+
+ /// The assert configuration of SILOptions.
+ unsigned AssertConfiguration;
+
+ /// Print diagnostics as part of mandatory constant propagation.
+ bool EnableDiagnostics;
+
+ /// Called for each constant folded instruction.
+ std::function<void (SILInstruction *)> Callback;
+
+ bool constantFoldStringConcatenation(ApplyInst *AI);
+
+public:
+ /// The constructor.
+ ///
+ /// \param AssertConfiguration The assert configuration of SILOptions.
+ /// \param EnableDiagnostics Print diagnostics as part of mandatory constant
+ /// propagation.
+ /// \param Callback Called for each constant folded instruction.
+ ConstantFolder(unsigned AssertConfiguration,
+ bool EnableDiagnostics = false,
+ std::function<void (SILInstruction *)> Callback =
+ [](SILInstruction *){}) :
+ AssertConfiguration(AssertConfiguration),
+ EnableDiagnostics(EnableDiagnostics),
+ Callback(Callback) { }
+
+ /// Initialize the worklist with all instructions of the function \p F.
+ void initializeWorklist(SILFunction &F);
+
+ /// Initialize the worklist with a single instruction \p I.
+ void addToWorklist(SILInstruction *I) {
+ WorkList.insert(I);
+ }
+
+ /// Constant fold everything in the worklist and transitively all uses of
+ /// folded instructions.
+ SILAnalysis::InvalidationKind processWorkList();
+};
+
} // end namespace swift
#endif
diff --git a/include/swift/SILOptimizer/Utils/Local.h b/include/swift/SILOptimizer/Utils/Local.h
index 12b3cb5..6d1a5a6 100644
--- a/include/swift/SILOptimizer/Utils/Local.h
+++ b/include/swift/SILOptimizer/Utils/Local.h
@@ -616,6 +616,48 @@
void hoistAddressProjections(Operand &Op, SILInstruction *InsertBefore,
DominanceInfo *DomTree);
+/// Utility class for cloning init values into the static initializer of a
+/// SILGlobalVariable.
+class StaticInitCloner : public SILCloner<StaticInitCloner> {
+ friend class SILInstructionVisitor<StaticInitCloner>;
+ friend class SILCloner<StaticInitCloner>;
+
+ /// The number of not yet cloned operands for each instruction.
+ llvm::DenseMap<SILInstruction *, int> NumOpsToClone;
+
+ /// List of instructions for which all operands are already cloned (or which
+ /// don't have any operands).
+ llvm::SmallVector<SILInstruction *, 8> ReadyToClone;
+
+public:
+ StaticInitCloner(SILGlobalVariable *GVar)
+ : SILCloner<StaticInitCloner>(GVar) { }
+
+ /// Add \p InitVal and all its operands (transitively) for cloning.
+ ///
+ /// Note: all init values must are added, before calling clone().
+ void add(SILInstruction *InitVal);
+
+ /// Clone \p InitVal and all its operands into the initializer of the
+ /// SILGlobalVariable.
+ ///
+ /// \return Returns the cloned instruction in the SILGlobalVariable.
+ SingleValueInstruction *clone(SingleValueInstruction *InitVal);
+
+ /// Convenience function to clone a single \p InitVal.
+ static void appendToInitializer(SILGlobalVariable *GVar,
+ SingleValueInstruction *InitVal) {
+ StaticInitCloner Cloner(GVar);
+ Cloner.add(InitVal);
+ Cloner.clone(InitVal);
+ }
+
+protected:
+ SILLocation remapLocation(SILLocation Loc) {
+ return ArtificialUnreachableLocation();
+ }
+};
+
} // end namespace swift
#endif
diff --git a/lib/AST/LookupVisibleDecls.cpp b/lib/AST/LookupVisibleDecls.cpp
index c34f9fb..384a01a 100644
--- a/lib/AST/LookupVisibleDecls.cpp
+++ b/lib/AST/LookupVisibleDecls.cpp
@@ -291,6 +291,10 @@
if (D->getOverriddenDecl())
return;
+ // If the declaration is not @objc, it cannot be called dynamically.
+ if (!D->isObjC())
+ return;
+
// Ensure that the declaration has a type.
if (!D->hasInterfaceType()) {
if (!TypeResolver) return;
diff --git a/lib/AST/NameLookup.cpp b/lib/AST/NameLookup.cpp
index 93da761..6c46754 100644
--- a/lib/AST/NameLookup.cpp
+++ b/lib/AST/NameLookup.cpp
@@ -1835,6 +1835,10 @@
if (decl->getOverriddenDecl())
continue;
+ // If the declaration is not @objc, it cannot be called dynamically.
+ if (!decl->isObjC())
+ continue;
+
auto dc = decl->getDeclContext();
auto nominal = dyn_cast<NominalTypeDecl>(dc);
if (!nominal) {
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index e37e260..e2e88d9 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -1269,7 +1269,6 @@
Type SugarType::getSinglyDesugaredTypeSlow() {
// Find the generic type that implements this syntactic sugar type.
- auto &ctx = *Context;
NominalTypeDecl *implDecl;
// XXX -- If the Decl and Type class hierarchies agreed on spelling, then
@@ -1291,16 +1290,16 @@
return UTy;
}
case TypeKind::ArraySlice:
- implDecl = ctx.getArrayDecl();
+ implDecl = Context->getArrayDecl();
break;
case TypeKind::Optional:
- implDecl = ctx.getOptionalDecl();
+ implDecl = Context->getOptionalDecl();
break;
case TypeKind::ImplicitlyUnwrappedOptional:
- implDecl = ctx.getImplicitlyUnwrappedOptionalDecl();
+ implDecl = Context->getImplicitlyUnwrappedOptionalDecl();
break;
case TypeKind::Dictionary:
- implDecl = ctx.getDictionaryDecl();
+ implDecl = Context->getDictionaryDecl();
break;
}
assert(implDecl && "Type has not been set yet");
diff --git a/lib/ClangImporter/ImportMacro.cpp b/lib/ClangImporter/ImportMacro.cpp
index 0049a23..b169288 100644
--- a/lib/ClangImporter/ImportMacro.cpp
+++ b/lib/ClangImporter/ImportMacro.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Sema.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "swift/AST/ASTContext.h"
@@ -361,10 +362,16 @@
}
auto identifierName = identifierInfo->getName();
auto &identifier = impl.getClangASTContext().Idents.get(identifierName);
+
+ clang::sema::DelayedDiagnosticPool diagPool{
+ impl.getClangSema().DelayedDiagnostics.getCurrentPool()};
+ auto diagState = impl.getClangSema().DelayedDiagnostics.push(diagPool);
auto parsedType = impl.getClangSema().getTypeName(identifier,
clang::SourceLocation(),
/*scope*/nullptr);
- if (parsedType) {
+ impl.getClangSema().DelayedDiagnostics.popWithoutEmitting(diagState);
+
+ if (parsedType && diagPool.empty()) {
castType = parsedType.get();
} else {
return nullptr;
diff --git a/lib/Frontend/ArgsToFrontendInputsConverter.cpp b/lib/Frontend/ArgsToFrontendInputsConverter.cpp
new file mode 100644
index 0000000..5d8d758
--- /dev/null
+++ b/lib/Frontend/ArgsToFrontendInputsConverter.cpp
@@ -0,0 +1,151 @@
+//===--- ArgsToFrontendInputsConverter.cpp ----------------------*- C++ -*-===//
+//
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See https://swift.org/LICENSE.txt for license information
+// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+//===----------------------------------------------------------------------===//
+
+#include "swift/Frontend/ArgsToFrontendInputsConverter.h"
+
+#include "swift/AST/DiagnosticsFrontend.h"
+#include "swift/Frontend/FrontendOptions.h"
+#include "swift/Option/Options.h"
+#include "swift/Parse/Lexer.h"
+#include "swift/Strings.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/Path.h"
+
+using namespace swift;
+using namespace llvm::opt;
+
+ArgsToFrontendInputsConverter::ArgsToFrontendInputsConverter(
+ DiagnosticEngine &diags, const ArgList &args, FrontendInputs &inputs)
+ : Diags(diags), Args(args), Inputs(inputs),
+ FilelistPathArg(args.getLastArg(options::OPT_filelist)),
+ PrimaryFilelistPathArg(args.getLastArg(options::OPT_primary_filelist)) {}
+
+bool ArgsToFrontendInputsConverter::convert() {
+ if (enforceFilelistExclusion())
+ return true;
+ if (FilelistPathArg ? readInputFilesFromFilelist()
+ : readInputFilesFromCommandLine())
+ return true;
+ Optional<std::set<StringRef>> primaryFiles = readPrimaryFiles();
+ if (!primaryFiles)
+ return true;
+ std::set<StringRef> unusedPrimaryFiles =
+ createInputFilesConsumingPrimaries(*primaryFiles);
+ return checkForMissingPrimaryFiles(unusedPrimaryFiles);
+}
+
+bool ArgsToFrontendInputsConverter::enforceFilelistExclusion() {
+ if (Args.hasArg(options::OPT_INPUT) && FilelistPathArg) {
+ Diags.diagnose(SourceLoc(),
+ diag::error_cannot_have_input_files_with_file_list);
+ return true;
+ }
+ // The following is not strictly necessary, but the restriction makes
+ // it easier to understand a given command line:
+ if (Args.hasArg(options::OPT_primary_file) && PrimaryFilelistPathArg) {
+ Diags.diagnose(
+ SourceLoc(),
+ diag::error_cannot_have_primary_files_with_primary_file_list);
+ return true;
+ }
+ return false;
+}
+
+bool ArgsToFrontendInputsConverter::readInputFilesFromCommandLine() {
+ bool hadDuplicates = false;
+ for (const Arg *A :
+ Args.filtered(options::OPT_INPUT, options::OPT_primary_file)) {
+ hadDuplicates = addFile(A->getValue()) || hadDuplicates;
+ }
+ return false; // FIXME: Don't bail out for duplicates, too many tests depend
+ // on it.
+}
+
+bool ArgsToFrontendInputsConverter::readInputFilesFromFilelist() {
+ bool hadDuplicates = false;
+ bool hadError =
+ forAllFilesInFilelist(FilelistPathArg, [&](StringRef file) -> void {
+ hadDuplicates = addFile(file) || hadDuplicates;
+ });
+ if (hadError)
+ return true;
+ return false; // FIXME: Don't bail out for duplicates, too many tests depend
+ // on it.
+}
+
+bool ArgsToFrontendInputsConverter::forAllFilesInFilelist(
+ Arg const *const pathArg, llvm::function_ref<void(StringRef)> fn) {
+ if (!pathArg)
+ return false;
+ StringRef path = pathArg->getValue();
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> filelistBufferOrError =
+ llvm::MemoryBuffer::getFile(path);
+ if (!filelistBufferOrError) {
+ Diags.diagnose(SourceLoc(), diag::cannot_open_file, path,
+ filelistBufferOrError.getError().message());
+ return true;
+ }
+ for (auto file :
+ llvm::make_range(llvm::line_iterator(*filelistBufferOrError->get()),
+ llvm::line_iterator()))
+ fn(file);
+ BuffersToKeepAlive.push_back(std::move(*filelistBufferOrError));
+ return false;
+}
+
+bool ArgsToFrontendInputsConverter::addFile(StringRef file) {
+ if (Files.insert(file))
+ return false;
+ Diags.diagnose(SourceLoc(), diag::error_duplicate_input_file, file);
+ return true;
+}
+
+Optional<std::set<StringRef>>
+ArgsToFrontendInputsConverter::readPrimaryFiles() {
+ std::set<StringRef> primaryFiles;
+ for (const Arg *A : Args.filtered(options::OPT_primary_file))
+ primaryFiles.insert(A->getValue());
+ if (forAllFilesInFilelist(
+ PrimaryFilelistPathArg,
+ [&](StringRef file) -> void { primaryFiles.insert(file); }))
+ return None;
+ return primaryFiles;
+}
+
+std::set<StringRef>
+ArgsToFrontendInputsConverter::createInputFilesConsumingPrimaries(
+ std::set<StringRef> primaryFiles) {
+ for (auto &file : Files) {
+ bool isPrimary = primaryFiles.count(file) > 0;
+ Inputs.addInput(InputFile(file, isPrimary));
+ if (isPrimary)
+ primaryFiles.erase(file);
+ }
+ return primaryFiles;
+}
+
+bool ArgsToFrontendInputsConverter::checkForMissingPrimaryFiles(
+ std::set<StringRef> primaryFiles) {
+ for (auto &file : primaryFiles) {
+ // Catch "swiftc -frontend -c -filelist foo -primary-file
+ // some-file-not-in-foo".
+ assert(FilelistPathArg && "Missing primary with no filelist");
+ Diags.diagnose(SourceLoc(), diag::error_primary_file_not_found, file,
+ FilelistPathArg->getValue());
+ }
+ return !primaryFiles.empty();
+}
diff --git a/lib/Frontend/ArgsToFrontendOptionsConverter.cpp b/lib/Frontend/ArgsToFrontendOptionsConverter.cpp
new file mode 100644
index 0000000..ba6fb62
--- /dev/null
+++ b/lib/Frontend/ArgsToFrontendOptionsConverter.cpp
@@ -0,0 +1,664 @@
+//===--- ArgsToFrontendOptionsConverter -------------------------*- C++ -*-===//
+//
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See https://swift.org/LICENSE.txt for license information
+// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+//===----------------------------------------------------------------------===//
+
+#include "swift/Frontend/ArgsToFrontendOptionsConverter.h"
+
+#include "swift/AST/DiagnosticsFrontend.h"
+#include "swift/Basic/Platform.h"
+#include "swift/Frontend/ArgsToFrontendInputsConverter.h"
+#include "swift/Frontend/Frontend.h"
+#include "swift/Option/Options.h"
+#include "swift/Option/SanitizerOptions.h"
+#include "swift/Parse/Lexer.h"
+#include "swift/Strings.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/Path.h"
+
+using namespace swift;
+using namespace llvm::opt;
+
+// This is a separate function so that it shows up in stack traces.
+LLVM_ATTRIBUTE_NOINLINE
+static void debugFailWithAssertion() {
+ // This assertion should always fail, per the user's request, and should
+ // not be converted to llvm_unreachable.
+ assert(0 && "This is an assertion!");
+}
+
+// This is a separate function so that it shows up in stack traces.
+LLVM_ATTRIBUTE_NOINLINE
+static void debugFailWithCrash() { LLVM_BUILTIN_TRAP; }
+
+bool ArgsToFrontendOptionsConverter::convert() {
+ using namespace options;
+
+ handleDebugCrashGroupArguments();
+
+ if (const Arg *A = Args.getLastArg(OPT_dump_api_path)) {
+ Opts.DumpAPIPath = A->getValue();
+ }
+ if (const Arg *A = Args.getLastArg(OPT_group_info_path)) {
+ Opts.GroupInfoPath = A->getValue();
+ }
+ if (const Arg *A = Args.getLastArg(OPT_index_store_path)) {
+ Opts.IndexStorePath = A->getValue();
+ }
+ Opts.IndexSystemModules |= Args.hasArg(OPT_index_system_modules);
+
+ Opts.EmitVerboseSIL |= Args.hasArg(OPT_emit_verbose_sil);
+ Opts.EmitSortedSIL |= Args.hasArg(OPT_emit_sorted_sil);
+
+ Opts.EnableTesting |= Args.hasArg(OPT_enable_testing);
+ Opts.EnableResilience |= Args.hasArg(OPT_enable_resilience);
+
+ computePrintStatsOptions();
+ computeDebugTimeOptions();
+ computeTBDOptions();
+
+ setUnsignedIntegerArgument(OPT_warn_long_function_bodies, 10,
+ Opts.WarnLongFunctionBodies);
+ setUnsignedIntegerArgument(OPT_warn_long_expression_type_checking, 10,
+ Opts.WarnLongExpressionTypeChecking);
+ setUnsignedIntegerArgument(OPT_solver_expression_time_threshold_EQ, 10,
+ Opts.SolverExpressionTimeThreshold);
+
+ computePlaygroundOptions();
+
+ // This can be enabled independently of the playground transform.
+ Opts.PCMacro |= Args.hasArg(OPT_pc_macro);
+
+ computeHelpOptions();
+ if (ArgsToFrontendInputsConverter(Diags, Args, Opts.Inputs).convert())
+ return true;
+
+ Opts.ParseStdlib |= Args.hasArg(OPT_parse_stdlib);
+
+ if (const Arg *A = Args.getLastArg(OPT_verify_generic_signatures)) {
+ Opts.VerifyGenericSignaturesInModule = A->getValue();
+ }
+
+ computeDumpScopeMapLocations();
+ Opts.RequestedAction = determineRequestedAction();
+
+ if (Opts.RequestedAction == FrontendOptions::ActionType::Immediate &&
+ Opts.Inputs.hasPrimaryInputs()) {
+ Diags.diagnose(SourceLoc(), diag::error_immediate_mode_primary_file);
+ return true;
+ }
+
+ if (setUpForSILOrLLVM())
+ return true;
+
+ if (computeModuleName())
+ return true;
+
+ if (computeOutputFilenames())
+ return true;
+ determineSupplementaryOutputFilenames();
+
+ if (checkForUnusedOutputPaths())
+ return true;
+
+ if (const Arg *A = Args.getLastArg(OPT_module_link_name)) {
+ Opts.ModuleLinkName = A->getValue();
+ }
+
+ Opts.AlwaysSerializeDebuggingOptions |=
+ Args.hasArg(OPT_serialize_debugging_options);
+ Opts.EnableSourceImport |= Args.hasArg(OPT_enable_source_import);
+ Opts.ImportUnderlyingModule |= Args.hasArg(OPT_import_underlying_module);
+ Opts.EnableSerializationNestedTypeLookupTable &=
+ !Args.hasArg(OPT_disable_serialization_nested_type_lookup_table);
+
+ computeImportObjCHeaderOptions();
+ computeImplicitImportModuleNames();
+ computeLLVMArgs();
+
+ return false;
+}
+
+void ArgsToFrontendOptionsConverter::handleDebugCrashGroupArguments() {
+ using namespace options;
+
+ if (const Arg *A = Args.getLastArg(OPT_debug_crash_Group)) {
+ Option Opt = A->getOption();
+ if (Opt.matches(OPT_debug_assert_immediately)) {
+ debugFailWithAssertion();
+ } else if (Opt.matches(OPT_debug_crash_immediately)) {
+ debugFailWithCrash();
+ } else if (Opt.matches(OPT_debug_assert_after_parse)) {
+ // Set in FrontendOptions
+ Opts.CrashMode = FrontendOptions::DebugCrashMode::AssertAfterParse;
+ } else if (Opt.matches(OPT_debug_crash_after_parse)) {
+ // Set in FrontendOptions
+ Opts.CrashMode = FrontendOptions::DebugCrashMode::CrashAfterParse;
+ } else {
+ llvm_unreachable("Unknown debug_crash_Group option!");
+ }
+ }
+}
+
+void ArgsToFrontendOptionsConverter::computePrintStatsOptions() {
+ using namespace options;
+ Opts.PrintStats |= Args.hasArg(OPT_print_stats);
+ Opts.PrintClangStats |= Args.hasArg(OPT_print_clang_stats);
+#if defined(NDEBUG) && !defined(LLVM_ENABLE_STATS)
+ if (Opts.PrintStats || Opts.PrintClangStats)
+ Diags.diagnose(SourceLoc(), diag::stats_disabled);
+#endif
+}
+
+void ArgsToFrontendOptionsConverter::computeDebugTimeOptions() {
+ using namespace options;
+ Opts.DebugTimeFunctionBodies |= Args.hasArg(OPT_debug_time_function_bodies);
+ Opts.DebugTimeExpressionTypeChecking |=
+ Args.hasArg(OPT_debug_time_expression_type_checking);
+ Opts.DebugTimeCompilation |= Args.hasArg(OPT_debug_time_compilation);
+ if (const Arg *A = Args.getLastArg(OPT_stats_output_dir)) {
+ Opts.StatsOutputDir = A->getValue();
+ if (Args.getLastArg(OPT_trace_stats_events)) {
+ Opts.TraceStats = true;
+ }
+ }
+}
+
+void ArgsToFrontendOptionsConverter::computeTBDOptions() {
+ using namespace options;
+ if (const Arg *A = Args.getLastArg(OPT_validate_tbd_against_ir_EQ)) {
+ using Mode = FrontendOptions::TBDValidationMode;
+ StringRef value = A->getValue();
+ if (value == "none") {
+ Opts.ValidateTBDAgainstIR = Mode::None;
+ } else if (value == "missing") {
+ Opts.ValidateTBDAgainstIR = Mode::MissingFromTBD;
+ } else if (value == "all") {
+ Opts.ValidateTBDAgainstIR = Mode::All;
+ } else {
+ Diags.diagnose(SourceLoc(), diag::error_unsupported_option_argument,
+ A->getOption().getPrefixedName(), value);
+ }
+ }
+ if (const Arg *A = Args.getLastArg(OPT_tbd_install_name)) {
+ Opts.TBDInstallName = A->getValue();
+ }
+}
+
+void ArgsToFrontendOptionsConverter::setUnsignedIntegerArgument(
+ options::ID optionID, unsigned max, unsigned &valueToSet) {
+ if (const Arg *A = Args.getLastArg(optionID)) {
+ unsigned attempt;
+ if (StringRef(A->getValue()).getAsInteger(max, attempt)) {
+ Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value,
+ A->getAsString(Args), A->getValue());
+ } else {
+ valueToSet = attempt;
+ }
+ }
+}
+
+void ArgsToFrontendOptionsConverter::computePlaygroundOptions() {
+ using namespace options;
+ Opts.PlaygroundTransform |= Args.hasArg(OPT_playground);
+ if (Args.hasArg(OPT_disable_playground_transform))
+ Opts.PlaygroundTransform = false;
+ Opts.PlaygroundHighPerformance |=
+ Args.hasArg(OPT_playground_high_performance);
+}
+
+void ArgsToFrontendOptionsConverter::computeHelpOptions() {
+ using namespace options;
+ if (const Arg *A = Args.getLastArg(OPT_help, OPT_help_hidden)) {
+ if (A->getOption().matches(OPT_help)) {
+ Opts.PrintHelp = true;
+ } else if (A->getOption().matches(OPT_help_hidden)) {
+ Opts.PrintHelpHidden = true;
+ } else {
+ llvm_unreachable("Unknown help option parsed");
+ }
+ }
+}
+
+void ArgsToFrontendOptionsConverter::computeDumpScopeMapLocations() {
+ using namespace options;
+ const Arg *A = Args.getLastArg(OPT_modes_Group);
+ if (!A || !A->getOption().matches(OPT_dump_scope_maps))
+ return;
+ StringRef value = A->getValue();
+ if (value == "expanded") {
+ // Note: fully expanded the scope map.
+ return;
+ }
+ // Parse a comma-separated list of line:column for lookups to
+ // perform (and dump the result of).
+ SmallVector<StringRef, 4> locations;
+ value.split(locations, ',');
+
+ bool invalid = false;
+ for (auto location : locations) {
+ auto lineColumnStr = location.split(':');
+ unsigned line, column;
+ if (lineColumnStr.first.getAsInteger(10, line) ||
+ lineColumnStr.second.getAsInteger(10, column)) {
+ Diags.diagnose(SourceLoc(), diag::error_invalid_source_location_str,
+ location);
+ invalid = true;
+ continue;
+ }
+ Opts.DumpScopeMapLocations.push_back({line, column});
+ }
+
+ if (!invalid && Opts.DumpScopeMapLocations.empty())
+ Diags.diagnose(SourceLoc(), diag::error_no_source_location_scope_map);
+}
+
+FrontendOptions::ActionType
+ArgsToFrontendOptionsConverter::determineRequestedAction() const {
+ using namespace options;
+ const Arg *A = Args.getLastArg(OPT_modes_Group);
+ if (!A) {
+ // We don't have a mode, so determine a default.
+ if (Args.hasArg(OPT_emit_module, OPT_emit_module_path)) {
+ // We've been told to emit a module, but have no other mode indicators.
+ // As a result, put the frontend into EmitModuleOnly mode.
+ // (Setting up module output will be handled below.)
+ return FrontendOptions::ActionType::EmitModuleOnly;
+ }
+ return FrontendOptions::ActionType::NoneAction;
+ }
+ Option Opt = A->getOption();
+ if (Opt.matches(OPT_emit_object))
+ return FrontendOptions::ActionType::EmitObject;
+ if (Opt.matches(OPT_emit_assembly))
+ return FrontendOptions::ActionType::EmitAssembly;
+ if (Opt.matches(OPT_emit_ir))
+ return FrontendOptions::ActionType::EmitIR;
+ if (Opt.matches(OPT_emit_bc))
+ return FrontendOptions::ActionType::EmitBC;
+ if (Opt.matches(OPT_emit_sil))
+ return FrontendOptions::ActionType::EmitSIL;
+ if (Opt.matches(OPT_emit_silgen))
+ return FrontendOptions::ActionType::EmitSILGen;
+ if (Opt.matches(OPT_emit_sib))
+ return FrontendOptions::ActionType::EmitSIB;
+ if (Opt.matches(OPT_emit_sibgen))
+ return FrontendOptions::ActionType::EmitSIBGen;
+ if (Opt.matches(OPT_emit_pch))
+ return FrontendOptions::ActionType::EmitPCH;
+ if (Opt.matches(OPT_emit_imported_modules))
+ return FrontendOptions::ActionType::EmitImportedModules;
+ if (Opt.matches(OPT_parse))
+ return FrontendOptions::ActionType::Parse;
+ if (Opt.matches(OPT_typecheck))
+ return FrontendOptions::ActionType::Typecheck;
+ if (Opt.matches(OPT_dump_parse))
+ return FrontendOptions::ActionType::DumpParse;
+ if (Opt.matches(OPT_dump_ast))
+ return FrontendOptions::ActionType::DumpAST;
+ if (Opt.matches(OPT_emit_syntax))
+ return FrontendOptions::ActionType::EmitSyntax;
+ if (Opt.matches(OPT_merge_modules))
+ return FrontendOptions::ActionType::MergeModules;
+ if (Opt.matches(OPT_dump_scope_maps))
+ return FrontendOptions::ActionType::DumpScopeMaps;
+ if (Opt.matches(OPT_dump_type_refinement_contexts))
+ return FrontendOptions::ActionType::DumpTypeRefinementContexts;
+ if (Opt.matches(OPT_dump_interface_hash))
+ return FrontendOptions::ActionType::DumpInterfaceHash;
+ if (Opt.matches(OPT_print_ast))
+ return FrontendOptions::ActionType::PrintAST;
+
+ if (Opt.matches(OPT_repl) || Opt.matches(OPT_deprecated_integrated_repl))
+ return FrontendOptions::ActionType::REPL;
+ if (Opt.matches(OPT_interpret))
+ return FrontendOptions::ActionType::Immediate;
+
+ llvm_unreachable("Unhandled mode option");
+}
+
+bool ArgsToFrontendOptionsConverter::setUpForSILOrLLVM() {
+ using namespace options;
+ bool treatAsSIL =
+ Args.hasArg(OPT_parse_sil) || Opts.Inputs.shouldTreatAsSIL();
+ bool treatAsLLVM = Opts.Inputs.shouldTreatAsLLVM();
+
+ if (Opts.Inputs.verifyInputs(
+ Diags, treatAsSIL,
+ Opts.RequestedAction == FrontendOptions::ActionType::REPL,
+ Opts.RequestedAction == FrontendOptions::ActionType::NoneAction)) {
+ return true;
+ }
+ if (Opts.RequestedAction == FrontendOptions::ActionType::Immediate) {
+ Opts.ImmediateArgv.push_back(
+ Opts.Inputs.getFilenameOfFirstInput()); // argv[0]
+ if (const Arg *A = Args.getLastArg(OPT__DASH_DASH)) {
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i) {
+ Opts.ImmediateArgv.push_back(A->getValue(i));
+ }
+ }
+ }
+
+ if (treatAsSIL)
+ Opts.InputKind = InputFileKind::IFK_SIL;
+ else if (treatAsLLVM)
+ Opts.InputKind = InputFileKind::IFK_LLVM_IR;
+ else if (Args.hasArg(OPT_parse_as_library))
+ Opts.InputKind = InputFileKind::IFK_Swift_Library;
+ else if (Opts.RequestedAction == FrontendOptions::ActionType::REPL)
+ Opts.InputKind = InputFileKind::IFK_Swift_REPL;
+ else
+ Opts.InputKind = InputFileKind::IFK_Swift;
+
+ return false;
+}
+
+bool ArgsToFrontendOptionsConverter::computeModuleName() {
+ const Arg *A = Args.getLastArg(options::OPT_module_name);
+ if (A) {
+ Opts.ModuleName = A->getValue();
+ } else if (Opts.ModuleName.empty()) {
+ // The user did not specify a module name, so determine a default fallback
+ // based on other options.
+
+ // Note: this code path will only be taken when running the frontend
+ // directly; the driver should always pass -module-name when invoking the
+ // frontend.
+ if (computeFallbackModuleName())
+ return true;
+ }
+
+ if (Lexer::isIdentifier(Opts.ModuleName) &&
+ (Opts.ModuleName != STDLIB_NAME || Opts.ParseStdlib)) {
+ return false;
+ }
+ if (!FrontendOptions::needsProperModuleName(Opts.RequestedAction) ||
+ Opts.isCompilingExactlyOneSwiftFile()) {
+ Opts.ModuleName = "main";
+ return false;
+ }
+ auto DID = (Opts.ModuleName == STDLIB_NAME) ? diag::error_stdlib_module_name
+ : diag::error_bad_module_name;
+ Diags.diagnose(SourceLoc(), DID, Opts.ModuleName, A == nullptr);
+ Opts.ModuleName = "__bad__";
+ return false; // FIXME: Must continue to run to pass the tests, but should not
+ // have to.
+}
+
+bool ArgsToFrontendOptionsConverter::computeFallbackModuleName() {
+ if (Opts.RequestedAction == FrontendOptions::ActionType::REPL) {
+ // Default to a module named "REPL" if we're in REPL mode.
+ Opts.ModuleName = "REPL";
+ return false;
+ }
+ // In order to pass some tests, must leave ModuleName empty.
+ if (!Opts.Inputs.hasInputs()) {
+ Opts.ModuleName = StringRef();
+ // FIXME: This is a bug that should not happen, but does in tests.
+ // The compiler should bail out earlier, where "no frontend action was
+ // selected".
+ return false;
+ }
+ ArrayRef<std::string> outputFilenames =
+ getOutputFilenamesFromCommandLineOrFilelist();
+
+ bool isOutputAUniqueOrdinaryFile =
+ outputFilenames.size() == 1 && outputFilenames[0] != "-" &&
+ !llvm::sys::fs::is_directory(outputFilenames[0]);
+ std::string nameToStem = isOutputAUniqueOrdinaryFile
+ ? outputFilenames[0]
+ : Opts.Inputs.getFilenameOfFirstInput().str();
+ Opts.ModuleName = llvm::sys::path::stem(nameToStem);
+ return false;
+}
+
+bool ArgsToFrontendOptionsConverter::computeOutputFilenames() {
+ assert(Opts.OutputFilenames.empty() &&
+ "Output filename should not be set at this point");
+ if (!FrontendOptions::doesActionProduceOutput(Opts.RequestedAction)) {
+ return false;
+ }
+ ArrayRef<std::string> outputFilenamesFromCommandLineOrFilelist =
+ getOutputFilenamesFromCommandLineOrFilelist();
+
+ if (outputFilenamesFromCommandLineOrFilelist.size() > 1) {
+ // WMO, threaded with N files (also someday batch mode).
+ Opts.OutputFilenames = outputFilenamesFromCommandLineOrFilelist;
+ return false;
+ }
+
+ if (outputFilenamesFromCommandLineOrFilelist.empty()) {
+ // When the Frontend is invoked without going through the driver
+ // (e.g. for testing), it is convenient to derive output filenames from
+ // input.
+ return deriveOutputFilenameFromInputFile();
+ }
+
+ StringRef outputFilename = outputFilenamesFromCommandLineOrFilelist[0];
+ if (!llvm::sys::fs::is_directory(outputFilename)) {
+ // Could be -primary-file (1), or -wmo (non-threaded w/ N (input) files)
+ Opts.OutputFilenames = outputFilenamesFromCommandLineOrFilelist;
+ return false;
+ }
+ // Only used for testing & when invoking frontend directly.
+ return deriveOutputFilenameForDirectory(outputFilename);
+}
+
+bool ArgsToFrontendOptionsConverter::deriveOutputFilenameFromInputFile() {
+ if (Opts.Inputs.isReadingFromStdin() ||
+ FrontendOptions::doesActionProduceTextualOutput(Opts.RequestedAction)) {
+ Opts.setOutputFilenameToStdout();
+ return false;
+ }
+ std::string baseName = determineBaseNameOfOutput();
+ if (baseName.empty()) {
+ if (Opts.RequestedAction != FrontendOptions::ActionType::REPL &&
+ Opts.RequestedAction != FrontendOptions::ActionType::Immediate &&
+ Opts.RequestedAction != FrontendOptions::ActionType::NoneAction) {
+ Diags.diagnose(SourceLoc(), diag::error_no_output_filename_specified);
+ return true;
+ }
+ return false;
+ }
+ deriveOutputFilenameFromParts("", baseName);
+ return false;
+}
+
+bool ArgsToFrontendOptionsConverter::deriveOutputFilenameForDirectory(
+ StringRef outputDir) {
+
+ std::string baseName = determineBaseNameOfOutput();
+ if (baseName.empty()) {
+ Diags.diagnose(SourceLoc(), diag::error_implicit_output_file_is_directory,
+ outputDir);
+ return true;
+ }
+ deriveOutputFilenameFromParts(outputDir, baseName);
+ return false;
+}
+
+void ArgsToFrontendOptionsConverter::deriveOutputFilenameFromParts(
+ StringRef dir, StringRef base) {
+ assert(!base.empty());
+ llvm::SmallString<128> path(dir);
+ llvm::sys::path::append(path, base);
+ StringRef suffix = FrontendOptions::suffixForPrincipalOutputFileForAction(
+ Opts.RequestedAction);
+ llvm::sys::path::replace_extension(path, suffix);
+ Opts.OutputFilenames.push_back(path.str());
+}
+
+std::string ArgsToFrontendOptionsConverter::determineBaseNameOfOutput() const {
+ std::string nameToStem;
+ if (Opts.Inputs.hasPrimaryInputs()) {
+ nameToStem = Opts.Inputs.getRequiredUniquePrimaryInput().file();
+ } else if (auto UserSpecifiedModuleName =
+ Args.getLastArg(options::OPT_module_name)) {
+ nameToStem = UserSpecifiedModuleName->getValue();
+ } else if (Opts.Inputs.hasSingleInput()) {
+ nameToStem = Opts.Inputs.getFilenameOfFirstInput();
+ } else
+ nameToStem = "";
+
+ return llvm::sys::path::stem(nameToStem).str();
+}
+
+ArrayRef<std::string>
+ArgsToFrontendOptionsConverter::getOutputFilenamesFromCommandLineOrFilelist() {
+ if (cachedOutputFilenamesFromCommandLineOrFilelist) {
+ return *cachedOutputFilenamesFromCommandLineOrFilelist;
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT_output_filelist)) {
+ assert(!Args.hasArg(options::OPT_o) &&
+ "don't use -o with -output-filelist");
+ cachedOutputFilenamesFromCommandLineOrFilelist.emplace(
+ readOutputFileList(A->getValue()));
+ } else {
+ cachedOutputFilenamesFromCommandLineOrFilelist.emplace(
+ Args.getAllArgValues(options::OPT_o));
+ }
+ return *cachedOutputFilenamesFromCommandLineOrFilelist;
+}
+
+/// Try to read an output file list file.
+std::vector<std::string> ArgsToFrontendOptionsConverter::readOutputFileList(
+ const StringRef filelistPath) const {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> buffer =
+ llvm::MemoryBuffer::getFile(filelistPath);
+ if (!buffer) {
+ Diags.diagnose(SourceLoc(), diag::cannot_open_file, filelistPath,
+ buffer.getError().message());
+ }
+ std::vector<std::string> outputFiles;
+ for (StringRef line : make_range(llvm::line_iterator(*buffer.get()), {})) {
+ outputFiles.push_back(line.str());
+ }
+ return outputFiles;
+}
+
+void ArgsToFrontendOptionsConverter::determineSupplementaryOutputFilenames() {
+ using namespace options;
+ auto determineOutputFilename =
+ [&](std::string &output, OptSpecifier optWithoutPath,
+ OptSpecifier optWithPath, const char *extension, bool useMainOutput) {
+ if (const Arg *A = Args.getLastArg(optWithPath)) {
+ Args.ClaimAllArgs(optWithoutPath);
+ output = A->getValue();
+ return;
+ }
+
+ if (!Args.hasArg(optWithoutPath))
+ return;
+
+ if (useMainOutput && !Opts.OutputFilenames.empty()) {
+ output = Opts.getSingleOutputFilename();
+ return;
+ }
+
+ if (!output.empty())
+ return;
+
+ llvm::SmallString<128> path(Opts.originalPath());
+ llvm::sys::path::replace_extension(path, extension);
+ output = path.str();
+ };
+
+ determineOutputFilename(Opts.DependenciesFilePath, OPT_emit_dependencies,
+ OPT_emit_dependencies_path, "d", false);
+ determineOutputFilename(
+ Opts.ReferenceDependenciesFilePath, OPT_emit_reference_dependencies,
+ OPT_emit_reference_dependencies_path, "swiftdeps", false);
+ determineOutputFilename(Opts.SerializedDiagnosticsPath,
+ OPT_serialize_diagnostics,
+ OPT_serialize_diagnostics_path, "dia", false);
+ determineOutputFilename(Opts.ObjCHeaderOutputPath, OPT_emit_objc_header,
+ OPT_emit_objc_header_path, "h", false);
+ determineOutputFilename(
+ Opts.LoadedModuleTracePath, OPT_emit_loaded_module_trace,
+ OPT_emit_loaded_module_trace_path, "trace.json", false);
+
+ determineOutputFilename(Opts.TBDPath, OPT_emit_tbd, OPT_emit_tbd_path, "tbd",
+ false);
+
+ if (const Arg *A = Args.getLastArg(OPT_emit_fixits_path)) {
+ Opts.FixitsOutputPath = A->getValue();
+ }
+
+ bool isSIB = Opts.RequestedAction == FrontendOptions::ActionType::EmitSIB ||
+ Opts.RequestedAction == FrontendOptions::ActionType::EmitSIBGen;
+ bool canUseMainOutputForModule =
+ Opts.RequestedAction == FrontendOptions::ActionType::MergeModules ||
+ Opts.RequestedAction == FrontendOptions::ActionType::EmitModuleOnly ||
+ isSIB;
+ auto ext = isSIB ? SIB_EXTENSION : SERIALIZED_MODULE_EXTENSION;
+ auto sibOpt = Opts.RequestedAction == FrontendOptions::ActionType::EmitSIB
+ ? OPT_emit_sib
+ : OPT_emit_sibgen;
+ determineOutputFilename(Opts.ModuleOutputPath,
+ isSIB ? sibOpt : OPT_emit_module,
+ OPT_emit_module_path, ext, canUseMainOutputForModule);
+
+ determineOutputFilename(Opts.ModuleDocOutputPath, OPT_emit_module_doc,
+ OPT_emit_module_doc_path,
+ SERIALIZED_MODULE_DOC_EXTENSION, false);
+}
+
+bool ArgsToFrontendOptionsConverter::checkForUnusedOutputPaths() const {
+ if (Opts.hasUnusedDependenciesFilePath()) {
+ Diags.diagnose(SourceLoc(), diag::error_mode_cannot_emit_dependencies);
+ return true;
+ }
+ if (Opts.hasUnusedObjCHeaderOutputPath()) {
+ Diags.diagnose(SourceLoc(), diag::error_mode_cannot_emit_header);
+ return true;
+ }
+ if (Opts.hasUnusedLoadedModuleTracePath()) {
+ Diags.diagnose(SourceLoc(),
+ diag::error_mode_cannot_emit_loaded_module_trace);
+ return true;
+ }
+ if (Opts.hasUnusedModuleOutputPath()) {
+ Diags.diagnose(SourceLoc(), diag::error_mode_cannot_emit_module);
+ return true;
+ }
+ if (Opts.hasUnusedModuleDocOutputPath()) {
+ Diags.diagnose(SourceLoc(), diag::error_mode_cannot_emit_module_doc);
+ return true;
+ }
+ return false;
+}
+
+void ArgsToFrontendOptionsConverter::computeImportObjCHeaderOptions() {
+ using namespace options;
+ if (const Arg *A = Args.getLastArgNoClaim(OPT_import_objc_header)) {
+ Opts.ImplicitObjCHeaderPath = A->getValue();
+ Opts.SerializeBridgingHeader |=
+ !Opts.Inputs.hasPrimaryInputs() && !Opts.ModuleOutputPath.empty();
+ }
+}
+void ArgsToFrontendOptionsConverter::computeImplicitImportModuleNames() {
+ using namespace options;
+ for (const Arg *A : Args.filtered(OPT_import_module)) {
+ Opts.ImplicitImportModuleNames.push_back(A->getValue());
+ }
+}
+void ArgsToFrontendOptionsConverter::computeLLVMArgs() {
+ using namespace options;
+ for (const Arg *A : Args.filtered(OPT_Xllvm)) {
+ Opts.LLVMArgs.push_back(A->getValue());
+ }
+}
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index 9664563..b47b60a 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -1,7 +1,10 @@
add_swift_library(swiftFrontend STATIC
+ ArgsToFrontendInputsConverter.cpp
+ ArgsToFrontendOptionsConverter.cpp
CompilerInvocation.cpp
DiagnosticVerifier.cpp
Frontend.cpp
+ FrontendInputs.cpp
FrontendOptions.cpp
PrintingDiagnosticConsumer.cpp
SerializedDiagnosticConsumer.cpp
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index da40d80..fe3237d 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -11,8 +11,10 @@
//===----------------------------------------------------------------------===//
#include "swift/Frontend/Frontend.h"
+
#include "swift/AST/DiagnosticsFrontend.h"
#include "swift/Basic/Platform.h"
+#include "swift/Frontend/ArgsToFrontendOptionsConverter.h"
#include "swift/Option/Options.h"
#include "swift/Option/SanitizerOptions.h"
#include "swift/Strings.h"
@@ -79,865 +81,9 @@
llvm_unreachable("Unhandled InputFileKind in switch.");
}
-// This is a separate function so that it shows up in stack traces.
-LLVM_ATTRIBUTE_NOINLINE
-static void debugFailWithAssertion() {
- // This assertion should always fail, per the user's request, and should
- // not be converted to llvm_unreachable.
- assert(0 && "This is an assertion!");
-}
-
-// This is a separate function so that it shows up in stack traces.
-LLVM_ATTRIBUTE_NOINLINE
-static void debugFailWithCrash() { LLVM_BUILTIN_TRAP; }
-
-namespace swift {
-
-/// Implement argument semantics in a way that will make it easier to have
-/// >1 primary file (or even a primary file list) in the future without
-/// breaking anything today.
-///
-/// Semantics today:
-/// If input files are on command line, primary files on command line are also
-/// input files; they are not repeated without -primary-file. If input files are
-/// in a file list, the primary files on the command line are repeated in the
-/// file list. Thus, if there are any primary files, it is illegal to have both
-/// (non-primary) input files and a file list. Finally, the order of input files
-/// must match the order given on the command line or the file list.
-///
-/// Side note:
-/// since each input file will cause a lot of work for the compiler, this code
-/// is biased towards clarity and not optimized.
-/// In the near future, it will be possible to put primary files in the
-/// filelist, or to have a separate filelist for primaries. The organization
-/// here anticipates that evolution.
-
-class ArgsToFrontendInputsConverter {
- DiagnosticEngine &Diags;
- const ArgList &Args;
- FrontendInputs &Inputs;
-
- Arg const *const FilelistPathArg;
- Arg const *const PrimaryFilelistPathArg;
-
- SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 4> BuffersToKeepAlive;
-
- llvm::SetVector<StringRef> Files;
-
-public:
- ArgsToFrontendInputsConverter(DiagnosticEngine &Diags, const ArgList &Args,
- FrontendInputs &Inputs)
- : Diags(Diags), Args(Args), Inputs(Inputs),
- FilelistPathArg(Args.getLastArg(options::OPT_filelist)),
- PrimaryFilelistPathArg(Args.getLastArg(options::OPT_primary_filelist)) {
- }
-
- bool convert() {
- if (enforceFilelistExclusion())
- return true;
- if (FilelistPathArg ? readInputFilesFromFilelist()
- : readInputFilesFromCommandLine())
- return true;
- Optional<std::set<StringRef>> primaryFiles = readPrimaryFiles();
- if (!primaryFiles)
- return true;
- std::set<StringRef> unusedPrimaryFiles =
- createInputFilesConsumingPrimaries(*primaryFiles);
- return checkForMissingPrimaryFiles(unusedPrimaryFiles);
- }
-
-private:
- bool enforceFilelistExclusion() {
- if (Args.hasArg(options::OPT_INPUT) && FilelistPathArg) {
- Diags.diagnose(SourceLoc(),
- diag::error_cannot_have_input_files_with_file_list);
- return true;
- }
- // The following is not strictly necessary, but the restriction makes
- // it easier to understand a given command line:
- if (Args.hasArg(options::OPT_primary_file) && PrimaryFilelistPathArg) {
- Diags.diagnose(
- SourceLoc(),
- diag::error_cannot_have_primary_files_with_primary_file_list);
- return true;
- }
- return false;
- }
-
- bool readInputFilesFromCommandLine() {
- bool hadDuplicates = false;
- for (const Arg *A :
- Args.filtered(options::OPT_INPUT, options::OPT_primary_file)) {
- hadDuplicates = addFile(A->getValue()) || hadDuplicates;
- }
- return false; // FIXME: Don't bail out for duplicates, too many tests depend
- // on it.
- }
-
- bool readInputFilesFromFilelist() {
- bool hadDuplicates = false;
- bool hadError =
- forAllFilesInFilelist(FilelistPathArg, [&](StringRef file) -> void {
- hadDuplicates = addFile(file) || hadDuplicates;
- });
- if (hadError)
- return true;
- return false; // FIXME: Don't bail out for duplicates, too many tests depend on it.
- }
-
- bool forAllFilesInFilelist(Arg const *const pathArg,
- llvm::function_ref<void(StringRef)> fn) {
- if (!pathArg)
- return false;
- StringRef path = pathArg->getValue();
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> filelistBufferOrError =
- llvm::MemoryBuffer::getFile(path);
- if (!filelistBufferOrError) {
- Diags.diagnose(SourceLoc(), diag::cannot_open_file, path,
- filelistBufferOrError.getError().message());
- return true;
- }
- for (auto file :
- llvm::make_range(llvm::line_iterator(*filelistBufferOrError->get()),
- llvm::line_iterator()))
- fn(file);
- BuffersToKeepAlive.push_back(std::move(*filelistBufferOrError));
- return false;
- }
-
- bool addFile(StringRef file) {
- if (Files.insert(file))
- return false;
- Diags.diagnose(SourceLoc(), diag::error_duplicate_input_file, file);
- return true;
- }
-
- Optional<std::set<StringRef>> readPrimaryFiles() {
- std::set<StringRef> primaryFiles;
- for (const Arg *A : Args.filtered(options::OPT_primary_file))
- primaryFiles.insert(A->getValue());
- if (forAllFilesInFilelist(
- PrimaryFilelistPathArg,
- [&](StringRef file) -> void { primaryFiles.insert(file); }))
- return None;
- return primaryFiles;
- }
-
- std::set<StringRef>
- createInputFilesConsumingPrimaries(std::set<StringRef> primaryFiles) {
- for (auto &file : Files) {
- bool isPrimary = primaryFiles.count(file) > 0;
- Inputs.addInput(InputFile(file, isPrimary));
- if (isPrimary)
- primaryFiles.erase(file);
- }
- return primaryFiles;
- }
-
- bool checkForMissingPrimaryFiles(std::set<StringRef> primaryFiles) {
- for (auto &file : primaryFiles) {
- // Catch "swiftc -frontend -c -filelist foo -primary-file
- // some-file-not-in-foo".
- assert(FilelistPathArg && "Missing primary with no filelist");
- Diags.diagnose(SourceLoc(), diag::error_primary_file_not_found, file,
- FilelistPathArg->getValue());
- }
- return !primaryFiles.empty();
- }
-};
-class FrontendArgsToOptionsConverter {
-private:
- DiagnosticEngine &Diags;
- const llvm::opt::ArgList &Args;
- FrontendOptions &Opts;
-
- Optional<const std::vector<std::string>>
- cachedOutputFilenamesFromCommandLineOrFilelist;
-
- void handleDebugCrashGroupArguments();
-
- void computeDebugTimeOptions();
- bool computeFallbackModuleName();
- bool computeModuleName();
- bool computeOutputFilenames();
- void computeDumpScopeMapLocations();
- void computeHelpOptions();
- void computeImplicitImportModuleNames();
- void computeImportObjCHeaderOptions();
- void computeLLVMArgs();
- void computePlaygroundOptions();
- void computePrintStatsOptions();
- void computeTBDOptions();
-
- void setUnsignedIntegerArgument(options::ID optionID, unsigned max,
- unsigned &valueToSet);
-
- FrontendOptions::ActionType determineRequestedAction() const;
-
- bool setUpForSILOrLLVM();
-
- /// Determine the correct output filename when none was specified.
- ///
- /// Such an absence should only occur when invoking the frontend
- /// without the driver,
- /// because the driver will always pass -o with an appropriate filename
- /// if output is required for the requested action.
- bool deriveOutputFilenameFromInputFile();
-
- /// Determine the correct output filename when a directory was specified.
- ///
- /// Such a specification should only occur when invoking the frontend
- /// directly, because the driver will always pass -o with an appropriate
- /// filename if output is required for the requested action.
- bool deriveOutputFilenameForDirectory(StringRef outputDir);
-
- std::string determineBaseNameOfOutput() const;
-
- void deriveOutputFilenameFromParts(StringRef dir, StringRef base);
-
- void determineSupplementaryOutputFilenames();
-
- /// Returns the output filenames on the command line or in the output
- /// filelist. If there
- /// were neither -o's nor an output filelist, returns an empty vector.
- ArrayRef<std::string> getOutputFilenamesFromCommandLineOrFilelist();
-
- bool checkForUnusedOutputPaths() const;
-
- std::vector<std::string> readOutputFileList(StringRef filelistPath) const;
-
-public:
- FrontendArgsToOptionsConverter(DiagnosticEngine &Diags,
- const llvm::opt::ArgList &Args,
- FrontendOptions &Opts)
- : Diags(Diags), Args(Args), Opts(Opts) {}
-
- bool convert();
-};
-} // namespace swift
-
-bool FrontendArgsToOptionsConverter::convert() {
- using namespace options;
-
- handleDebugCrashGroupArguments();
-
- if (const Arg *A = Args.getLastArg(OPT_dump_api_path)) {
- Opts.DumpAPIPath = A->getValue();
- }
- if (const Arg *A = Args.getLastArg(OPT_group_info_path)) {
- Opts.GroupInfoPath = A->getValue();
- }
- if (const Arg *A = Args.getLastArg(OPT_index_store_path)) {
- Opts.IndexStorePath = A->getValue();
- }
- Opts.IndexSystemModules |= Args.hasArg(OPT_index_system_modules);
-
- Opts.EmitVerboseSIL |= Args.hasArg(OPT_emit_verbose_sil);
- Opts.EmitSortedSIL |= Args.hasArg(OPT_emit_sorted_sil);
-
- Opts.EnableTesting |= Args.hasArg(OPT_enable_testing);
- Opts.EnableResilience |= Args.hasArg(OPT_enable_resilience);
-
- computePrintStatsOptions();
- computeDebugTimeOptions();
- computeTBDOptions();
-
- setUnsignedIntegerArgument(OPT_warn_long_function_bodies, 10,
- Opts.WarnLongFunctionBodies);
- setUnsignedIntegerArgument(OPT_warn_long_expression_type_checking, 10,
- Opts.WarnLongExpressionTypeChecking);
- setUnsignedIntegerArgument(OPT_solver_expression_time_threshold_EQ, 10,
- Opts.SolverExpressionTimeThreshold);
-
- computePlaygroundOptions();
-
- // This can be enabled independently of the playground transform.
- Opts.PCMacro |= Args.hasArg(OPT_pc_macro);
-
- computeHelpOptions();
- if (ArgsToFrontendInputsConverter(Diags, Args, Opts.Inputs).convert())
- return true;
-
- Opts.ParseStdlib |= Args.hasArg(OPT_parse_stdlib);
-
- if (const Arg *A = Args.getLastArg(OPT_verify_generic_signatures)) {
- Opts.VerifyGenericSignaturesInModule = A->getValue();
- }
-
- computeDumpScopeMapLocations();
- Opts.RequestedAction = determineRequestedAction();
-
- if (Opts.RequestedAction == FrontendOptions::ActionType::Immediate &&
- Opts.Inputs.hasPrimaryInputs()) {
- Diags.diagnose(SourceLoc(), diag::error_immediate_mode_primary_file);
- return true;
- }
-
- if (setUpForSILOrLLVM())
- return true;
-
- if (computeModuleName())
- return true;
-
- if (computeOutputFilenames())
- return true;
- determineSupplementaryOutputFilenames();
-
- if (checkForUnusedOutputPaths())
- return true;
-
- if (const Arg *A = Args.getLastArg(OPT_module_link_name)) {
- Opts.ModuleLinkName = A->getValue();
- }
-
- Opts.AlwaysSerializeDebuggingOptions |=
- Args.hasArg(OPT_serialize_debugging_options);
- Opts.EnableSourceImport |= Args.hasArg(OPT_enable_source_import);
- Opts.ImportUnderlyingModule |= Args.hasArg(OPT_import_underlying_module);
- Opts.EnableSerializationNestedTypeLookupTable &=
- !Args.hasArg(OPT_disable_serialization_nested_type_lookup_table);
-
- computeImportObjCHeaderOptions();
- computeImplicitImportModuleNames();
- computeLLVMArgs();
-
- return false;
-}
-
-void FrontendArgsToOptionsConverter::handleDebugCrashGroupArguments() {
- using namespace options;
-
- if (const Arg *A = Args.getLastArg(OPT_debug_crash_Group)) {
- Option Opt = A->getOption();
- if (Opt.matches(OPT_debug_assert_immediately)) {
- debugFailWithAssertion();
- } else if (Opt.matches(OPT_debug_crash_immediately)) {
- debugFailWithCrash();
- } else if (Opt.matches(OPT_debug_assert_after_parse)) {
- // Set in FrontendOptions
- Opts.CrashMode = FrontendOptions::DebugCrashMode::AssertAfterParse;
- } else if (Opt.matches(OPT_debug_crash_after_parse)) {
- // Set in FrontendOptions
- Opts.CrashMode = FrontendOptions::DebugCrashMode::CrashAfterParse;
- } else {
- llvm_unreachable("Unknown debug_crash_Group option!");
- }
- }
-}
-
-void FrontendArgsToOptionsConverter::computePrintStatsOptions() {
- using namespace options;
- Opts.PrintStats |= Args.hasArg(OPT_print_stats);
- Opts.PrintClangStats |= Args.hasArg(OPT_print_clang_stats);
-#if defined(NDEBUG) && !defined(LLVM_ENABLE_STATS)
- if (Opts.PrintStats || Opts.PrintClangStats)
- Diags.diagnose(SourceLoc(), diag::stats_disabled);
-#endif
-}
-
-void FrontendArgsToOptionsConverter::computeDebugTimeOptions() {
- using namespace options;
- Opts.DebugTimeFunctionBodies |= Args.hasArg(OPT_debug_time_function_bodies);
- Opts.DebugTimeExpressionTypeChecking |=
- Args.hasArg(OPT_debug_time_expression_type_checking);
- Opts.DebugTimeCompilation |= Args.hasArg(OPT_debug_time_compilation);
- if (const Arg *A = Args.getLastArg(OPT_stats_output_dir)) {
- Opts.StatsOutputDir = A->getValue();
- if (Args.getLastArg(OPT_trace_stats_events)) {
- Opts.TraceStats = true;
- }
- }
-}
-
-void FrontendArgsToOptionsConverter::computeTBDOptions() {
- using namespace options;
- if (const Arg *A = Args.getLastArg(OPT_validate_tbd_against_ir_EQ)) {
- using Mode = FrontendOptions::TBDValidationMode;
- StringRef value = A->getValue();
- if (value == "none") {
- Opts.ValidateTBDAgainstIR = Mode::None;
- } else if (value == "missing") {
- Opts.ValidateTBDAgainstIR = Mode::MissingFromTBD;
- } else if (value == "all") {
- Opts.ValidateTBDAgainstIR = Mode::All;
- } else {
- Diags.diagnose(SourceLoc(), diag::error_unsupported_option_argument,
- A->getOption().getPrefixedName(), value);
- }
- }
- if (const Arg *A = Args.getLastArg(OPT_tbd_install_name)) {
- Opts.TBDInstallName = A->getValue();
- }
-}
-
-void FrontendArgsToOptionsConverter::setUnsignedIntegerArgument(
- options::ID optionID, unsigned max, unsigned &valueToSet) {
- if (const Arg *A = Args.getLastArg(optionID)) {
- unsigned attempt;
- if (StringRef(A->getValue()).getAsInteger(max, attempt)) {
- Diags.diagnose(SourceLoc(), diag::error_invalid_arg_value,
- A->getAsString(Args), A->getValue());
- } else {
- valueToSet = attempt;
- }
- }
-}
-
-void FrontendArgsToOptionsConverter::computePlaygroundOptions() {
- using namespace options;
- Opts.PlaygroundTransform |= Args.hasArg(OPT_playground);
- if (Args.hasArg(OPT_disable_playground_transform))
- Opts.PlaygroundTransform = false;
- Opts.PlaygroundHighPerformance |=
- Args.hasArg(OPT_playground_high_performance);
-}
-
-void FrontendArgsToOptionsConverter::computeHelpOptions() {
- using namespace options;
- if (const Arg *A = Args.getLastArg(OPT_help, OPT_help_hidden)) {
- if (A->getOption().matches(OPT_help)) {
- Opts.PrintHelp = true;
- } else if (A->getOption().matches(OPT_help_hidden)) {
- Opts.PrintHelpHidden = true;
- } else {
- llvm_unreachable("Unknown help option parsed");
- }
- }
-}
-
-void FrontendArgsToOptionsConverter::computeDumpScopeMapLocations() {
- using namespace options;
- const Arg *A = Args.getLastArg(OPT_modes_Group);
- if (!A || !A->getOption().matches(OPT_dump_scope_maps))
- return;
- StringRef value = A->getValue();
- if (value == "expanded") {
- // Note: fully expanded the scope map.
- return;
- }
- // Parse a comma-separated list of line:column for lookups to
- // perform (and dump the result of).
- SmallVector<StringRef, 4> locations;
- value.split(locations, ',');
-
- bool invalid = false;
- for (auto location : locations) {
- auto lineColumnStr = location.split(':');
- unsigned line, column;
- if (lineColumnStr.first.getAsInteger(10, line) ||
- lineColumnStr.second.getAsInteger(10, column)) {
- Diags.diagnose(SourceLoc(), diag::error_invalid_source_location_str,
- location);
- invalid = true;
- continue;
- }
- Opts.DumpScopeMapLocations.push_back({line, column});
- }
-
- if (!invalid && Opts.DumpScopeMapLocations.empty())
- Diags.diagnose(SourceLoc(), diag::error_no_source_location_scope_map);
-}
-
-FrontendOptions::ActionType
-FrontendArgsToOptionsConverter::determineRequestedAction() const {
- using namespace options;
- const Arg *A = Args.getLastArg(OPT_modes_Group);
- if (!A) {
- // We don't have a mode, so determine a default.
- if (Args.hasArg(OPT_emit_module, OPT_emit_module_path)) {
- // We've been told to emit a module, but have no other mode indicators.
- // As a result, put the frontend into EmitModuleOnly mode.
- // (Setting up module output will be handled below.)
- return FrontendOptions::ActionType::EmitModuleOnly;
- }
- return FrontendOptions::ActionType::NoneAction;
- }
- Option Opt = A->getOption();
- if (Opt.matches(OPT_emit_object))
- return FrontendOptions::ActionType::EmitObject;
- if (Opt.matches(OPT_emit_assembly))
- return FrontendOptions::ActionType::EmitAssembly;
- if (Opt.matches(OPT_emit_ir))
- return FrontendOptions::ActionType::EmitIR;
- if (Opt.matches(OPT_emit_bc))
- return FrontendOptions::ActionType::EmitBC;
- if (Opt.matches(OPT_emit_sil))
- return FrontendOptions::ActionType::EmitSIL;
- if (Opt.matches(OPT_emit_silgen))
- return FrontendOptions::ActionType::EmitSILGen;
- if (Opt.matches(OPT_emit_sib))
- return FrontendOptions::ActionType::EmitSIB;
- if (Opt.matches(OPT_emit_sibgen))
- return FrontendOptions::ActionType::EmitSIBGen;
- if (Opt.matches(OPT_emit_pch))
- return FrontendOptions::ActionType::EmitPCH;
- if (Opt.matches(OPT_emit_imported_modules))
- return FrontendOptions::ActionType::EmitImportedModules;
- if (Opt.matches(OPT_parse))
- return FrontendOptions::ActionType::Parse;
- if (Opt.matches(OPT_typecheck))
- return FrontendOptions::ActionType::Typecheck;
- if (Opt.matches(OPT_dump_parse))
- return FrontendOptions::ActionType::DumpParse;
- if (Opt.matches(OPT_dump_ast))
- return FrontendOptions::ActionType::DumpAST;
- if (Opt.matches(OPT_emit_syntax))
- return FrontendOptions::ActionType::EmitSyntax;
- if (Opt.matches(OPT_merge_modules))
- return FrontendOptions::ActionType::MergeModules;
- if (Opt.matches(OPT_dump_scope_maps))
- return FrontendOptions::ActionType::DumpScopeMaps;
- if (Opt.matches(OPT_dump_type_refinement_contexts))
- return FrontendOptions::ActionType::DumpTypeRefinementContexts;
- if (Opt.matches(OPT_dump_interface_hash))
- return FrontendOptions::ActionType::DumpInterfaceHash;
- if (Opt.matches(OPT_print_ast))
- return FrontendOptions::ActionType::PrintAST;
-
- if (Opt.matches(OPT_repl) || Opt.matches(OPT_deprecated_integrated_repl))
- return FrontendOptions::ActionType::REPL;
- if (Opt.matches(OPT_interpret))
- return FrontendOptions::ActionType::Immediate;
-
- llvm_unreachable("Unhandled mode option");
-}
-
-bool FrontendArgsToOptionsConverter::setUpForSILOrLLVM() {
- using namespace options;
- bool treatAsSIL =
- Args.hasArg(OPT_parse_sil) || Opts.Inputs.shouldTreatAsSIL();
- bool treatAsLLVM = Opts.Inputs.shouldTreatAsLLVM();
-
- if (Opts.Inputs.verifyInputs(
- Diags, treatAsSIL,
- Opts.RequestedAction == FrontendOptions::ActionType::REPL,
- Opts.RequestedAction == FrontendOptions::ActionType::NoneAction)) {
- return true;
- }
- if (Opts.RequestedAction == FrontendOptions::ActionType::Immediate) {
- Opts.ImmediateArgv.push_back(
- Opts.Inputs.getFilenameOfFirstInput()); // argv[0]
- if (const Arg *A = Args.getLastArg(OPT__DASH_DASH)) {
- for (unsigned i = 0, e = A->getNumValues(); i != e; ++i) {
- Opts.ImmediateArgv.push_back(A->getValue(i));
- }
- }
- }
-
- if (treatAsSIL)
- Opts.InputKind = InputFileKind::IFK_SIL;
- else if (treatAsLLVM)
- Opts.InputKind = InputFileKind::IFK_LLVM_IR;
- else if (Args.hasArg(OPT_parse_as_library))
- Opts.InputKind = InputFileKind::IFK_Swift_Library;
- else if (Opts.RequestedAction == FrontendOptions::ActionType::REPL)
- Opts.InputKind = InputFileKind::IFK_Swift_REPL;
- else
- Opts.InputKind = InputFileKind::IFK_Swift;
-
- return false;
-}
-
-bool FrontendArgsToOptionsConverter::computeModuleName() {
- const Arg *A = Args.getLastArg(options::OPT_module_name);
- if (A) {
- Opts.ModuleName = A->getValue();
- } else if (Opts.ModuleName.empty()) {
- // The user did not specify a module name, so determine a default fallback
- // based on other options.
-
- // Note: this code path will only be taken when running the frontend
- // directly; the driver should always pass -module-name when invoking the
- // frontend.
- if (computeFallbackModuleName())
- return true;
- }
-
- if (Lexer::isIdentifier(Opts.ModuleName) &&
- (Opts.ModuleName != STDLIB_NAME || Opts.ParseStdlib)) {
- return false;
- }
- if (!FrontendOptions::needsProperModuleName(Opts.RequestedAction) ||
- Opts.isCompilingExactlyOneSwiftFile()) {
- Opts.ModuleName = "main";
- return false;
- }
- auto DID = (Opts.ModuleName == STDLIB_NAME) ? diag::error_stdlib_module_name
- : diag::error_bad_module_name;
- Diags.diagnose(SourceLoc(), DID, Opts.ModuleName, A == nullptr);
- Opts.ModuleName = "__bad__";
- return false; // FIXME: Must continue to run to pass the tests, but should not
- // have to.
-}
-
-bool FrontendArgsToOptionsConverter::computeFallbackModuleName() {
- if (Opts.RequestedAction == FrontendOptions::ActionType::REPL) {
- // Default to a module named "REPL" if we're in REPL mode.
- Opts.ModuleName = "REPL";
- return false;
- }
- // In order to pass some tests, must leave ModuleName empty.
- if (!Opts.Inputs.hasInputs()) {
- Opts.ModuleName = StringRef();
- // FIXME: This is a bug that should not happen, but does in tests.
- // The compiler should bail out earlier, where "no frontend action was
- // selected".
- return false;
- }
- ArrayRef<std::string> outputFilenames =
- getOutputFilenamesFromCommandLineOrFilelist();
-
- bool isOutputAUniqueOrdinaryFile =
- outputFilenames.size() == 1 && outputFilenames[0] != "-" &&
- !llvm::sys::fs::is_directory(outputFilenames[0]);
- std::string nameToStem = isOutputAUniqueOrdinaryFile
- ? outputFilenames[0]
- : Opts.Inputs.getFilenameOfFirstInput().str();
- Opts.ModuleName = llvm::sys::path::stem(nameToStem);
- return false;
-}
-
-bool FrontendArgsToOptionsConverter::computeOutputFilenames() {
- assert(Opts.OutputFilenames.empty() &&
- "Output filename should not be set at this point");
- if (!FrontendOptions::doesActionProduceOutput(Opts.RequestedAction)) {
- return false;
- }
- ArrayRef<std::string> outputFilenamesFromCommandLineOrFilelist =
- getOutputFilenamesFromCommandLineOrFilelist();
-
- if (outputFilenamesFromCommandLineOrFilelist.size() > 1) {
- // WMO, threaded with N files (also someday batch mode).
- Opts.OutputFilenames = outputFilenamesFromCommandLineOrFilelist;
- return false;
- }
-
- if (outputFilenamesFromCommandLineOrFilelist.empty()) {
- // When the Frontend is invoked without going through the driver
- // (e.g. for testing), it is convenient to derive output filenames from
- // input.
- return deriveOutputFilenameFromInputFile();
- }
-
- StringRef outputFilename = outputFilenamesFromCommandLineOrFilelist[0];
- if (!llvm::sys::fs::is_directory(outputFilename)) {
- // Could be -primary-file (1), or -wmo (non-threaded w/ N (input) files)
- Opts.OutputFilenames = outputFilenamesFromCommandLineOrFilelist;
- return false;
- }
- // Only used for testing & when invoking frontend directly.
- return deriveOutputFilenameForDirectory(outputFilename);
-}
-
-bool FrontendArgsToOptionsConverter::deriveOutputFilenameFromInputFile() {
- if (Opts.Inputs.isReadingFromStdin() ||
- FrontendOptions::doesActionProduceTextualOutput(Opts.RequestedAction)) {
- Opts.setOutputFilenameToStdout();
- return false;
- }
- std::string baseName = determineBaseNameOfOutput();
- if (baseName.empty()) {
- if (Opts.RequestedAction != FrontendOptions::ActionType::REPL &&
- Opts.RequestedAction != FrontendOptions::ActionType::Immediate &&
- Opts.RequestedAction != FrontendOptions::ActionType::NoneAction) {
- Diags.diagnose(SourceLoc(), diag::error_no_output_filename_specified);
- return true;
- }
- return false;
- }
- deriveOutputFilenameFromParts("", baseName);
- return false;
-}
-
-bool FrontendArgsToOptionsConverter::deriveOutputFilenameForDirectory(
- StringRef outputDir) {
-
- std::string baseName = determineBaseNameOfOutput();
- if (baseName.empty()) {
- Diags.diagnose(SourceLoc(), diag::error_implicit_output_file_is_directory,
- outputDir);
- return true;
- }
- deriveOutputFilenameFromParts(outputDir, baseName);
- return false;
-}
-
-void FrontendArgsToOptionsConverter::deriveOutputFilenameFromParts(
- StringRef dir, StringRef base) {
- assert(!base.empty());
- llvm::SmallString<128> path(dir);
- llvm::sys::path::append(path, base);
- StringRef suffix = FrontendOptions::suffixForPrincipalOutputFileForAction(
- Opts.RequestedAction);
- llvm::sys::path::replace_extension(path, suffix);
- Opts.OutputFilenames.push_back(path.str());
-}
-
-std::string FrontendArgsToOptionsConverter::determineBaseNameOfOutput() const {
- std::string nameToStem;
- if (Opts.Inputs.hasPrimaryInputs()) {
- nameToStem = Opts.Inputs.getRequiredUniquePrimaryInput().file();
- } else if (auto UserSpecifiedModuleName =
- Args.getLastArg(options::OPT_module_name)) {
- nameToStem = UserSpecifiedModuleName->getValue();
- } else if (Opts.Inputs.hasSingleInput()) {
- nameToStem = Opts.Inputs.getFilenameOfFirstInput();
- } else
- nameToStem = "";
-
- return llvm::sys::path::stem(nameToStem).str();
-}
-
-ArrayRef<std::string>
-FrontendArgsToOptionsConverter::getOutputFilenamesFromCommandLineOrFilelist() {
- if (cachedOutputFilenamesFromCommandLineOrFilelist) {
- return *cachedOutputFilenamesFromCommandLineOrFilelist;
- }
-
- if (const Arg *A = Args.getLastArg(options::OPT_output_filelist)) {
- assert(!Args.hasArg(options::OPT_o) &&
- "don't use -o with -output-filelist");
- cachedOutputFilenamesFromCommandLineOrFilelist.emplace(
- readOutputFileList(A->getValue()));
- } else {
- cachedOutputFilenamesFromCommandLineOrFilelist.emplace(
- Args.getAllArgValues(options::OPT_o));
- }
- return *cachedOutputFilenamesFromCommandLineOrFilelist;
-}
-
-/// Try to read an output file list file.
-std::vector<std::string> FrontendArgsToOptionsConverter::readOutputFileList(
- const StringRef filelistPath) const {
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> buffer =
- llvm::MemoryBuffer::getFile(filelistPath);
- if (!buffer) {
- Diags.diagnose(SourceLoc(), diag::cannot_open_file, filelistPath,
- buffer.getError().message());
- }
- std::vector<std::string> outputFiles;
- for (StringRef line : make_range(llvm::line_iterator(*buffer.get()), {})) {
- outputFiles.push_back(line.str());
- }
- return outputFiles;
-}
-
-void FrontendArgsToOptionsConverter::determineSupplementaryOutputFilenames() {
- using namespace options;
- auto determineOutputFilename =
- [&](std::string &output, OptSpecifier optWithoutPath,
- OptSpecifier optWithPath, const char *extension, bool useMainOutput) {
- if (const Arg *A = Args.getLastArg(optWithPath)) {
- Args.ClaimAllArgs(optWithoutPath);
- output = A->getValue();
- return;
- }
-
- if (!Args.hasArg(optWithoutPath))
- return;
-
- if (useMainOutput && !Opts.OutputFilenames.empty()) {
- output = Opts.getSingleOutputFilename();
- return;
- }
-
- if (!output.empty())
- return;
-
- llvm::SmallString<128> path(Opts.originalPath());
- llvm::sys::path::replace_extension(path, extension);
- output = path.str();
- };
-
- determineOutputFilename(Opts.DependenciesFilePath, OPT_emit_dependencies,
- OPT_emit_dependencies_path, "d", false);
- determineOutputFilename(
- Opts.ReferenceDependenciesFilePath, OPT_emit_reference_dependencies,
- OPT_emit_reference_dependencies_path, "swiftdeps", false);
- determineOutputFilename(Opts.SerializedDiagnosticsPath,
- OPT_serialize_diagnostics,
- OPT_serialize_diagnostics_path, "dia", false);
- determineOutputFilename(Opts.ObjCHeaderOutputPath, OPT_emit_objc_header,
- OPT_emit_objc_header_path, "h", false);
- determineOutputFilename(
- Opts.LoadedModuleTracePath, OPT_emit_loaded_module_trace,
- OPT_emit_loaded_module_trace_path, "trace.json", false);
-
- determineOutputFilename(Opts.TBDPath, OPT_emit_tbd, OPT_emit_tbd_path, "tbd",
- false);
-
- if (const Arg *A = Args.getLastArg(OPT_emit_fixits_path)) {
- Opts.FixitsOutputPath = A->getValue();
- }
-
- bool isSIB = Opts.RequestedAction == FrontendOptions::ActionType::EmitSIB ||
- Opts.RequestedAction == FrontendOptions::ActionType::EmitSIBGen;
- bool canUseMainOutputForModule =
- Opts.RequestedAction == FrontendOptions::ActionType::MergeModules ||
- Opts.RequestedAction == FrontendOptions::ActionType::EmitModuleOnly ||
- isSIB;
- auto ext = isSIB ? SIB_EXTENSION : SERIALIZED_MODULE_EXTENSION;
- auto sibOpt = Opts.RequestedAction == FrontendOptions::ActionType::EmitSIB
- ? OPT_emit_sib
- : OPT_emit_sibgen;
- determineOutputFilename(Opts.ModuleOutputPath,
- isSIB ? sibOpt : OPT_emit_module,
- OPT_emit_module_path, ext, canUseMainOutputForModule);
-
- determineOutputFilename(Opts.ModuleDocOutputPath, OPT_emit_module_doc,
- OPT_emit_module_doc_path,
- SERIALIZED_MODULE_DOC_EXTENSION, false);
-}
-
-bool FrontendArgsToOptionsConverter::checkForUnusedOutputPaths() const {
- if (Opts.hasUnusedDependenciesFilePath()) {
- Diags.diagnose(SourceLoc(), diag::error_mode_cannot_emit_dependencies);
- return true;
- }
- if (Opts.hasUnusedObjCHeaderOutputPath()) {
- Diags.diagnose(SourceLoc(), diag::error_mode_cannot_emit_header);
- return true;
- }
- if (Opts.hasUnusedLoadedModuleTracePath()) {
- Diags.diagnose(SourceLoc(),
- diag::error_mode_cannot_emit_loaded_module_trace);
- return true;
- }
- if (Opts.hasUnusedModuleOutputPath()) {
- Diags.diagnose(SourceLoc(), diag::error_mode_cannot_emit_module);
- return true;
- }
- if (Opts.hasUnusedModuleDocOutputPath()) {
- Diags.diagnose(SourceLoc(), diag::error_mode_cannot_emit_module_doc);
- return true;
- }
- return false;
-}
-
-void FrontendArgsToOptionsConverter::computeImportObjCHeaderOptions() {
- using namespace options;
- if (const Arg *A = Args.getLastArgNoClaim(OPT_import_objc_header)) {
- Opts.ImplicitObjCHeaderPath = A->getValue();
- Opts.SerializeBridgingHeader |=
- !Opts.Inputs.hasPrimaryInputs() && !Opts.ModuleOutputPath.empty();
- }
-}
-void FrontendArgsToOptionsConverter::computeImplicitImportModuleNames() {
- using namespace options;
- for (const Arg *A : Args.filtered(OPT_import_module)) {
- Opts.ImplicitImportModuleNames.push_back(A->getValue());
- }
-}
-void FrontendArgsToOptionsConverter::computeLLVMArgs() {
- using namespace options;
- for (const Arg *A : Args.filtered(OPT_Xllvm)) {
- Opts.LLVMArgs.push_back(A->getValue());
- }
-}
-
static bool ParseFrontendArgs(FrontendOptions &opts, ArgList &args,
DiagnosticEngine &diags) {
- return FrontendArgsToOptionsConverter(diags, args, opts).convert();
+ return ArgsToFrontendOptionsConverter(diags, args, opts).convert();
}
static void diagnoseSwiftVersion(Optional<version::Version> &vers, Arg *verArg,
diff --git a/lib/Frontend/FrontendInputs.cpp b/lib/Frontend/FrontendInputs.cpp
new file mode 100644
index 0000000..e821620
--- /dev/null
+++ b/lib/Frontend/FrontendInputs.cpp
@@ -0,0 +1,176 @@
+//===--- FrontendInputs.cpp -------------------------------------*- C++ -*-===//
+//
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2018 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See https://swift.org/LICENSE.txt for license information
+// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+//===----------------------------------------------------------------------===//
+
+#include "swift/Frontend/FrontendInputs.h"
+
+#include "swift/AST/DiagnosticsFrontend.h"
+#include "swift/Frontend/FrontendOptions.h"
+#include "swift/AST/DiagnosticsFrontend.h"
+#include "swift/Option/Options.h"
+#include "swift/Parse/Lexer.h"
+#include "swift/Strings.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/Path.h"
+
+using namespace swift;
+using namespace llvm::opt;
+
+FrontendInputs::FrontendInputs(const FrontendInputs &other) {
+ for (InputFile input : other.getAllFiles())
+ addInput(input);
+}
+
+FrontendInputs &FrontendInputs::operator=(const FrontendInputs &other) {
+ clearInputs();
+ for (InputFile input : other.getAllFiles())
+ addInput(input);
+ return *this;
+}
+
+std::vector<std::string> FrontendInputs::getInputFilenames() const {
+ std::vector<std::string> filenames;
+ for (auto &input : getAllFiles()) {
+ filenames.push_back(input.file());
+ }
+ return filenames;
+}
+
+bool FrontendInputs::isReadingFromStdin() const {
+ return hasSingleInput() && getFilenameOfFirstInput() == "-";
+}
+
+void FrontendInputs::assertMustNotBeMoreThanOnePrimaryInput() const {
+ assert(primaryInputCount() < 2 &&
+ "have not implemented >1 primary input yet");
+}
+
+const InputFile *FrontendInputs::getUniquePrimaryInput() const {
+ assertMustNotBeMoreThanOnePrimaryInput();
+ const auto b = PrimaryInputs.begin();
+ return b == PrimaryInputs.end() ? nullptr : &AllFiles[b->second];
+}
+
+const InputFile &FrontendInputs::getRequiredUniquePrimaryInput() const {
+ if (const auto *input = getUniquePrimaryInput())
+ return *input;
+ llvm_unreachable("No primary when one is required");
+}
+
+StringRef FrontendInputs::getNameOfUniquePrimaryInputFile() const {
+ const auto *input = getUniquePrimaryInput();
+ return input == nullptr ? StringRef() : input->file();
+}
+
+bool FrontendInputs::isFilePrimary(StringRef file) const {
+ auto iterator = PrimaryInputs.find(file);
+ return iterator != PrimaryInputs.end() &&
+ AllFiles[iterator->second].isPrimary();
+}
+
+StringRef FrontendInputs::getFilenameOfFirstInput() const {
+ assert(hasInputs());
+ const InputFile &inp = getAllFiles()[0];
+ StringRef f = inp.file();
+ assert(!f.empty());
+ return f;
+}
+
+bool FrontendInputs::shouldTreatAsLLVM() const {
+ if (hasSingleInput()) {
+ StringRef Input(getFilenameOfFirstInput());
+ return llvm::sys::path::extension(Input).endswith(LLVM_BC_EXTENSION) ||
+ llvm::sys::path::extension(Input).endswith(LLVM_IR_EXTENSION);
+ }
+ return false;
+}
+
+bool FrontendInputs::shouldTreatAsSIL() const {
+ if (hasSingleInput()) {
+ // If we have exactly one input filename, and its extension is "sil",
+ // treat the input as SIL.
+ StringRef Input(getFilenameOfFirstInput());
+ return llvm::sys::path::extension(Input).endswith(SIL_EXTENSION);
+ }
+ // If we have one primary input and it's a filename with extension "sil",
+ // treat the input as SIL.
+ unsigned silPrimaryCount = numberOfPrimaryInputsEndingWith(SIL_EXTENSION);
+ if (silPrimaryCount == 0)
+ return false;
+ if (silPrimaryCount == primaryInputCount()) {
+ // Not clear what to do someday with multiple primaries
+ assertMustNotBeMoreThanOnePrimaryInput();
+ return true;
+ }
+ llvm_unreachable("Either all primaries or none must end with .sil");
+}
+
+void FrontendInputs::addInput(const InputFile &input) {
+ if (!input.file().empty() && input.isPrimary())
+ PrimaryInputs.insert(std::make_pair(input.file(), AllFiles.size()));
+ AllFiles.push_back(input);
+}
+
+unsigned
+FrontendInputs::numberOfPrimaryInputsEndingWith(const char *extension) const {
+ return count_if(
+ PrimaryInputs, [&](const llvm::StringMapEntry<unsigned> &elem) -> bool {
+ StringRef filename = AllFiles[elem.second].file();
+ return llvm::sys::path::extension(filename).endswith(extension);
+ });
+}
+
+bool FrontendInputs::verifyInputs(DiagnosticEngine &diags, bool treatAsSIL,
+ bool isREPLRequested,
+ bool isNoneRequested) const {
+ if (isREPLRequested) {
+ if (hasInputs()) {
+ diags.diagnose(SourceLoc(), diag::error_repl_requires_no_input_files);
+ return true;
+ }
+ } else if (treatAsSIL) {
+ if (isWholeModule()) {
+ if (inputCount() != 1) {
+ diags.diagnose(SourceLoc(), diag::error_mode_requires_one_input_file);
+ return true;
+ }
+ } else {
+ assertMustNotBeMoreThanOnePrimaryInput();
+ // If we have the SIL as our primary input, we can waive the one file
+ // requirement as long as all the other inputs are SIBs.
+ if (!areAllNonPrimariesSIB()) {
+ diags.diagnose(SourceLoc(),
+ diag::error_mode_requires_one_sil_multi_sib);
+ return true;
+ }
+ }
+ } else if (!isNoneRequested && !hasInputs()) {
+ diags.diagnose(SourceLoc(), diag::error_mode_requires_an_input_file);
+ return true;
+ }
+ return false;
+}
+
+bool FrontendInputs::areAllNonPrimariesSIB() const {
+ for (const InputFile &input : getAllFiles()) {
+ if (input.isPrimary())
+ continue;
+ if (!llvm::sys::path::extension(input.file()).endswith(SIB_EXTENSION)) {
+ return false;
+ }
+ }
+ return true;
+}
diff --git a/lib/Frontend/FrontendOptions.cpp b/lib/Frontend/FrontendOptions.cpp
index 3c373ad..745778c 100644
--- a/lib/Frontend/FrontendOptions.cpp
+++ b/lib/Frontend/FrontendOptions.cpp
@@ -27,86 +27,6 @@
using namespace swift;
using namespace llvm::opt;
-bool FrontendInputs::shouldTreatAsLLVM() const {
- if (hasSingleInput()) {
- StringRef Input(getFilenameOfFirstInput());
- return llvm::sys::path::extension(Input).endswith(LLVM_BC_EXTENSION) ||
- llvm::sys::path::extension(Input).endswith(LLVM_IR_EXTENSION);
- }
- return false;
-}
-
-bool FrontendInputs::shouldTreatAsSIL() const {
- if (hasSingleInput()) {
- // If we have exactly one input filename, and its extension is "sil",
- // treat the input as SIL.
- StringRef Input(getFilenameOfFirstInput());
- return llvm::sys::path::extension(Input).endswith(SIL_EXTENSION);
- }
- // If we have one primary input and it's a filename with extension "sil",
- // treat the input as SIL.
- unsigned silPrimaryCount = numberOfPrimaryInputsEndingWith(SIL_EXTENSION);
- if (silPrimaryCount == 0)
- return false;
- if (silPrimaryCount == primaryInputCount()) {
- // Not clear what to do someday with multiple primaries
- assertMustNotBeMoreThanOnePrimaryInput();
- return true;
- }
- llvm_unreachable("Either all primaries or none must end with .sil");
-}
-
-unsigned
-FrontendInputs::numberOfPrimaryInputsEndingWith(const char *extension) const {
- return count_if(
- PrimaryInputs, [&](const llvm::StringMapEntry<unsigned> &elem) -> bool {
- StringRef filename = AllFiles[elem.second].file();
- return llvm::sys::path::extension(filename).endswith(extension);
- });
-}
-
-bool FrontendInputs::verifyInputs(DiagnosticEngine &diags, bool treatAsSIL,
- bool isREPLRequested,
- bool isNoneRequested) const {
- if (isREPLRequested) {
- if (hasInputs()) {
- diags.diagnose(SourceLoc(), diag::error_repl_requires_no_input_files);
- return true;
- }
- } else if (treatAsSIL) {
- if (isWholeModule()) {
- if (inputCount() != 1) {
- diags.diagnose(SourceLoc(), diag::error_mode_requires_one_input_file);
- return true;
- }
- } else {
- assertMustNotBeMoreThanOnePrimaryInput();
- // If we have the SIL as our primary input, we can waive the one file
- // requirement as long as all the other inputs are SIBs.
- if (!areAllNonPrimariesSIB()) {
- diags.diagnose(SourceLoc(),
- diag::error_mode_requires_one_sil_multi_sib);
- return true;
- }
- }
- } else if (!isNoneRequested && !hasInputs()) {
- diags.diagnose(SourceLoc(), diag::error_mode_requires_an_input_file);
- return true;
- }
- return false;
-}
-
-bool FrontendInputs::areAllNonPrimariesSIB() const {
- for (const InputFile &input : getAllFiles()) {
- if (input.isPrimary())
- continue;
- if (!llvm::sys::path::extension(input.file()).endswith(SIB_EXTENSION)) {
- return false;
- }
- }
- return true;
-}
-
bool FrontendOptions::needsProperModuleName(ActionType action) {
switch (action) {
case ActionType::NoneAction:
diff --git a/lib/IDE/Refactoring.cpp b/lib/IDE/Refactoring.cpp
index 97f8c0c..0a86ac4 100644
--- a/lib/IDE/Refactoring.cpp
+++ b/lib/IDE/Refactoring.cpp
@@ -1717,6 +1717,9 @@
bool walkToExprPre(Expr *E) {
if (E->isImplicit())
return true;
+ // FIXME: we should have ErrorType instead of null.
+ if (E->getType().isNull())
+ return true;
auto ExprType = E->getType()->getNominalOrBoundGenericNominal();
//Only binary concatenation operators should exist in expression
if (E->getKind() == ExprKind::Binary) {
diff --git a/lib/IRGen/GenEnum.cpp b/lib/IRGen/GenEnum.cpp
index eb1bca6..38208e4 100644
--- a/lib/IRGen/GenEnum.cpp
+++ b/lib/IRGen/GenEnum.cpp
@@ -4393,7 +4393,7 @@
payloadTI.collectArchetypeMetadata(IGF, typeToMetadataVec, PayloadT);
}
if (typeToMetadataVec.find(canType) == typeToMetadataVec.end() &&
- typeToMetadataVec.size() != SZ) {
+ ((typeToMetadataVec.size() != SZ) || (TIK < Fixed))) {
auto *metadata = IGF.emitTypeMetadataRefForLayout(T);
assert(metadata && "Expected Type Metadata Ref");
typeToMetadataVec.insert(std::make_pair(canType, metadata));
@@ -5851,7 +5851,8 @@
unsigned numTagBits = llvm::Log2_32(numTags-1) + 1;
ExtraTagBitCount = numTagBits <= commonSpareBitCount
? 0 : numTagBits - commonSpareBitCount;
- NumExtraTagValues = numTags >> commonSpareBitCount;
+ NumExtraTagValues =
+ (commonSpareBitCount < 32) ? numTags >> commonSpareBitCount : 0;
// Create the type. We need enough bits to store the largest payload plus
// extra tag bits we need.
diff --git a/lib/IRGen/GenMeta.cpp b/lib/IRGen/GenMeta.cpp
index 7b319e5..dbf8874 100644
--- a/lib/IRGen/GenMeta.cpp
+++ b/lib/IRGen/GenMeta.cpp
@@ -3422,6 +3422,12 @@
void addClassFlags() {
auto flags = ClassFlags();
+#if !SWIFT_DARWIN_ENABLE_STABLE_ABI_BIT
+ // FIXME: Remove this after enabling stable ABI.
+ // This bit is NOT conditioned on UseDarwinPreStableABIBit.
+ flags |= ClassFlags::IsSwiftPreStableABI;
+#endif
+
// Set a flag if the class uses Swift refcounting.
auto type = Target->getDeclaredType()->getCanonicalType();
if (getReferenceCountingForType(IGM, type)
@@ -3940,7 +3946,12 @@
// The rodata pointer will be instantiated here.
// Make sure we at least set the 'is Swift class' bit, though.
ClassRODataPtrOffset = getNextOffsetFromTemplateHeader();
- B.addInt(IGM.MetadataKindTy, 1);
+ if (!IGM.ObjCInterop) {
+ // FIXME: Remove null data altogether rdar://problem/18801263
+ B.addInt(IGM.MetadataKindTy, 1);
+ } else {
+ B.addInt(IGM.MetadataKindTy, IGM.UseDarwinPreStableABIBit ? 1 : 2);
+ }
}
void addDependentData() {
@@ -5361,6 +5372,7 @@
addSize();
addFlags();
addRequirements();
+ addSuperclass();
addAssociatedTypeNames();
B.suggestType(IGM.ProtocolDescriptorStructTy);
@@ -5559,6 +5571,11 @@
return nullptr;
}
+ void addSuperclass() {
+ // FIXME: Implement.
+ B.addRelativeAddressOrNull(nullptr);
+ }
+
void addAssociatedTypeNames() {
llvm::Constant *global = nullptr;
if (!AssociatedTypeNames.empty()) {
diff --git a/lib/IRGen/GenProto.cpp b/lib/IRGen/GenProto.cpp
index fd936dc..a3d57f5 100644
--- a/lib/IRGen/GenProto.cpp
+++ b/lib/IRGen/GenProto.cpp
@@ -746,6 +746,8 @@
SmallVector<WitnessTableEntry, 16> Entries;
public:
+ void addProtocolConformanceDescriptor() { }
+
/// The next witness is an out-of-line base protocol.
void addOutOfLineBaseProtocol(ProtocolDecl *baseProto) {
Entries.push_back(WitnessTableEntry::forOutOfLineBase(baseProto));
@@ -822,7 +824,7 @@
llvm::Value *apply(IRGenFunction &IGF, llvm::Value *wtable) const {
for (unsigned i = ReversePath.size(); i != 0; --i) {
wtable = emitInvariantLoadOfOpaqueWitness(IGF, wtable,
- ReversePath[i-1]);
+ ReversePath[i-1].forProtocolWitnessTable());
wtable = IGF.Builder.CreateBitCast(wtable, IGF.IGM.WitnessTablePtrTy);
}
return wtable;
@@ -1224,6 +1226,18 @@
/// Create the access function.
void buildAccessFunction(llvm::Constant *wtable);
+ /// Add reference to the protocol conformance descriptor that generated
+ /// this table.
+ void addProtocolConformanceDescriptor() {
+ if (Conformance.isBehaviorConformance()) {
+ Table.addNullPointer(IGM.Int8PtrTy);
+ } else {
+ auto descriptor =
+ IGM.getAddrOfProtocolConformanceDescriptor(&Conformance);
+ Table.addBitCast(descriptor, IGM.Int8PtrTy);
+ }
+ }
+
/// A base protocol is witnessed by a pointer to the conformance
/// of this type to that protocol.
void addOutOfLineBaseProtocol(ProtocolDecl *baseProto) {
@@ -1234,7 +1248,8 @@
assert(entry.getBaseProtocolWitness().Requirement == baseProto
&& "sil witness table does not match protocol");
auto piIndex = PI.getBaseIndex(baseProto);
- assert((size_t)piIndex.getValue() == Table.size() &&
+ assert((size_t)piIndex.getValue() ==
+ Table.size() - WitnessTableFirstRequirementOffset &&
"offset doesn't match ProtocolInfo layout");
#endif
@@ -1279,7 +1294,8 @@
&& "sil witness table does not match protocol");
auto piIndex =
PI.getFunctionIndex(cast<AbstractFunctionDecl>(requirement.getDecl()));
- assert((size_t)piIndex.getValue() == Table.size() &&
+ assert((size_t)piIndex.getValue() ==
+ Table.size() - WitnessTableFirstRequirementOffset &&
"offset doesn't match ProtocolInfo layout");
#endif
@@ -1310,7 +1326,8 @@
== requirement.getAssociation()
&& "sil witness table does not match protocol");
auto piIndex = PI.getAssociatedTypeIndex(requirement);
- assert((size_t)piIndex.getValue() == Table.size() &&
+ assert((size_t)piIndex.getValue() ==
+ Table.size() - WitnessTableFirstRequirementOffset &&
"offset doesn't match ProtocolInfo layout");
#endif
@@ -1358,7 +1375,8 @@
requirement.getAssociatedRequirement()
&& "sil witness table does not match protocol");
auto piIndex = PI.getAssociatedConformanceIndex(requirement);
- assert((size_t)piIndex.getValue() == Table.size() &&
+ assert((size_t)piIndex.getValue() ==
+ Table.size() - WitnessTableFirstRequirementOffset &&
"offset doesn't match ProtocolInfo layout");
#endif
@@ -2057,16 +2075,17 @@
// Build the witnesses.
ConstantInitBuilder builder(*this);
- auto witnesses = builder.beginArray(Int8PtrTy);
- WitnessTableBuilder wtableBuilder(*this, witnesses, wt);
+ auto wtableContents = builder.beginArray(Int8PtrTy);
+ WitnessTableBuilder wtableBuilder(*this, wtableContents, wt);
wtableBuilder.build();
- assert(getProtocolInfo(wt->getConformance()->getProtocol())
- .getNumWitnesses() == witnesses.size()
+ assert((getProtocolInfo(wt->getConformance()->getProtocol())
+ .getNumWitnesses() + WitnessTableFirstRequirementOffset)
+ == wtableContents.size()
&& "witness table size doesn't match ProtocolInfo");
// Produce the initializer value.
- auto initializer = witnesses.finishAndCreateFuture();
+ auto initializer = wtableContents.finishAndCreateFuture();
auto global = cast<llvm::GlobalVariable>(
getAddrOfWitnessTable(wt->getConformance(), initializer));
@@ -2264,7 +2283,8 @@
llvm::Value *wtable,
WitnessIndex index,
llvm::Value *associatedTypeMetadata) {
- llvm::Value *witness = emitInvariantLoadOfOpaqueWitness(IGF, wtable, index);
+ llvm::Value *witness = emitInvariantLoadOfOpaqueWitness(IGF, wtable,
+ index.forProtocolWitnessTable());
// Cast the witness to the appropriate function type.
auto sig = IGF.IGM.getAssociatedTypeWitnessTableAccessFunctionSignature();
@@ -2360,7 +2380,8 @@
if (source) {
WitnessIndex index(component.getPrimaryIndex(), /*prefix*/ false);
- source = emitInvariantLoadOfOpaqueWitness(IGF, source, index);
+ source = emitInvariantLoadOfOpaqueWitness(IGF, source,
+ index.forProtocolWitnessTable());
source = IGF.Builder.CreateBitCast(source, IGF.IGM.WitnessTablePtrTy);
setProtocolWitnessTableName(IGF.IGM, source, sourceKey.Type,
inheritedProtocol);
@@ -3090,7 +3111,8 @@
auto &fnProtoInfo = IGF.IGM.getProtocolInfo(proto);
auto index = fnProtoInfo.getFunctionIndex(fn);
llvm::Value *witnessFnPtr =
- emitInvariantLoadOfOpaqueWitness(IGF, wtable, index);
+ emitInvariantLoadOfOpaqueWitness(IGF, wtable,
+ index.forProtocolWitnessTable());
auto fnType = IGF.IGM.getSILTypes().getConstantFunctionType(member);
Signature signature = IGF.IGM.getSignature(fnType);
@@ -3134,7 +3156,8 @@
AssociatedType associatedType) {
auto &pi = IGF.IGM.getProtocolInfo(associatedType.getSourceProtocol());
auto index = pi.getAssociatedTypeIndex(associatedType);
- llvm::Value *witness = emitInvariantLoadOfOpaqueWitness(IGF, wtable, index);
+ llvm::Value *witness = emitInvariantLoadOfOpaqueWitness(IGF, wtable,
+ index.forProtocolWitnessTable());
// Cast the witness to the appropriate function type.
auto sig = IGF.IGM.getAssociatedTypeMetadataAccessFunctionSignature();
diff --git a/lib/IRGen/IRGenModule.cpp b/lib/IRGen/IRGenModule.cpp
index 231d0b9..3d5fba7 100644
--- a/lib/IRGen/IRGenModule.cpp
+++ b/lib/IRGen/IRGenModule.cpp
@@ -200,6 +200,7 @@
Int16Ty, // mandatory requirement count
Int16Ty, // total requirement count
Int32Ty, // requirements array
+ RelativeAddressTy, // superclass
RelativeAddressTy // associated type names
});
diff --git a/lib/IRGen/LocalTypeData.cpp b/lib/IRGen/LocalTypeData.cpp
index 1e5fdee..480a781 100644
--- a/lib/IRGen/LocalTypeData.cpp
+++ b/lib/IRGen/LocalTypeData.cpp
@@ -264,7 +264,8 @@
/*prefix*/ false);
auto table =
- emitInvariantLoadOfOpaqueWitness(*this, selfTable, wIndex);
+ emitInvariantLoadOfOpaqueWitness(*this, selfTable,
+ wIndex.forProtocolWitnessTable());
table = Builder.CreateBitCast(table, IGM.WitnessTablePtrTy);
setProtocolWitnessTableName(IGM, table, archetype, proto);
diff --git a/lib/IRGen/WitnessIndex.h b/lib/IRGen/WitnessIndex.h
index 64d006e..25717e4 100644
--- a/lib/IRGen/WitnessIndex.h
+++ b/lib/IRGen/WitnessIndex.h
@@ -18,6 +18,7 @@
#ifndef SWIFT_IRGEN_WITNESSINDEX_H
#define SWIFT_IRGEN_WITNESSINDEX_H
+#include "swift/ABI/MetadataValues.h"
#include "swift/IRGen/ValueWitness.h"
namespace swift {
@@ -38,6 +39,15 @@
int getValue() const { return Value; }
bool isPrefix() const { return IsPrefix; }
+
+ /// Adjust the index to refer into a protocol witness table (rather than
+ /// a value witness table).
+ WitnessIndex forProtocolWitnessTable() const {
+ int NewValue = Value < 0
+ ? Value
+ : Value + WitnessTableFirstRequirementOffset;
+ return WitnessIndex(NewValue, IsPrefix);
+ }
};
} // end namespace irgen
diff --git a/lib/SIL/SILVerifier.cpp b/lib/SIL/SILVerifier.cpp
index 05102c2..89e2936 100644
--- a/lib/SIL/SILVerifier.cpp
+++ b/lib/SIL/SILVerifier.cpp
@@ -1629,8 +1629,10 @@
"EnumInst operand must be an object");
SILType caseTy = UI->getType().getEnumElementType(UI->getElement(),
F.getModule());
- require(caseTy == UI->getOperand()->getType(),
- "EnumInst operand type does not match type of case");
+ if (UI->getModule().getStage() != SILStage::Lowered) {
+ require(caseTy == UI->getOperand()->getType(),
+ "EnumInst operand type does not match type of case");
+ }
}
}
diff --git a/lib/SILGen/SILGenType.cpp b/lib/SILGen/SILGenType.cpp
index a8f568a..e141786 100644
--- a/lib/SILGen/SILGenType.cpp
+++ b/lib/SILGen/SILGenType.cpp
@@ -439,6 +439,10 @@
Entries, ConditionalConformances);
}
+ void addProtocolConformanceDescriptor() {
+ }
+
+
void addOutOfLineBaseProtocol(ProtocolDecl *baseProtocol) {
assert(Lowering::TypeConverter::protocolRequiresWitnessTable(baseProtocol));
@@ -725,6 +729,8 @@
DefaultWitnesses.push_back(SILDefaultWitnessTable::Entry());
}
+ void addProtocolConformanceDescriptor() { }
+
void addOutOfLineBaseProtocol(ProtocolDecl *baseProto) {
addMissingDefault();
}
diff --git a/lib/SILOptimizer/Analysis/EscapeAnalysis.cpp b/lib/SILOptimizer/Analysis/EscapeAnalysis.cpp
index de7e32f..89da451 100644
--- a/lib/SILOptimizer/Analysis/EscapeAnalysis.cpp
+++ b/lib/SILOptimizer/Analysis/EscapeAnalysis.cpp
@@ -295,6 +295,8 @@
} else {
Node->pointsTo = pointsTo;
}
+ // Update use-points if the use-point information is already calculated.
+ pointsTo->mergeUsePoints(Node);
}
// Add all adjacent nodes to the WorkList.
diff --git a/lib/SILOptimizer/Analysis/MemoryBehavior.cpp b/lib/SILOptimizer/Analysis/MemoryBehavior.cpp
index cc51faf..78b7ba5 100644
--- a/lib/SILOptimizer/Analysis/MemoryBehavior.cpp
+++ b/lib/SILOptimizer/Analysis/MemoryBehavior.cpp
@@ -268,6 +268,10 @@
Idx < End && Behavior < MemBehavior::MayHaveSideEffects; ++Idx) {
auto &ArgEffect = ApplyEffects.getParameterEffects()[Idx];
auto ArgBehavior = ArgEffect.getMemBehavior(InspectionMode);
+ if (ArgEffect.mayRelease()) {
+ Behavior = MemBehavior::MayHaveSideEffects;
+ break;
+ }
auto NewBehavior = combineMemoryBehavior(Behavior, ArgBehavior);
if (NewBehavior != Behavior) {
SILValue Arg = AI->getArgument(Idx);
diff --git a/lib/SILOptimizer/Analysis/SideEffectAnalysis.cpp b/lib/SILOptimizer/Analysis/SideEffectAnalysis.cpp
index bda9abb..8d69d66 100644
--- a/lib/SILOptimizer/Analysis/SideEffectAnalysis.cpp
+++ b/lib/SILOptimizer/Analysis/SideEffectAnalysis.cpp
@@ -120,6 +120,7 @@
case ValueKind::TupleExtractInst:
case ValueKind::UncheckedEnumDataInst:
case ValueKind::UncheckedTrivialBitCastInst:
+ case ValueKind::UncheckedRefCastInst:
V = cast<SingleValueInstruction>(V)->getOperand(0);
break;
default:
@@ -346,10 +347,6 @@
case SILInstructionKind::ReleaseValueInst:
case SILInstructionKind::UnownedReleaseInst:
FInfo->FE.getEffectsOn(I->getOperand(0))->Releases = true;
-
- // TODO: Check the call graph to be less conservative about what
- // destructors might be called.
- FInfo->FE.setWorstEffects();
return;
case SILInstructionKind::UnconditionalCheckedCastInst:
FInfo->FE.getEffectsOn(cast<UnconditionalCheckedCastInst>(I)->getOperand())->Reads = true;
diff --git a/lib/SILOptimizer/IPO/GlobalOpt.cpp b/lib/SILOptimizer/IPO/GlobalOpt.cpp
index 6266fb6..44589fb 100644
--- a/lib/SILOptimizer/IPO/GlobalOpt.cpp
+++ b/lib/SILOptimizer/IPO/GlobalOpt.cpp
@@ -74,17 +74,13 @@
// Keep track of cold blocks.
ColdBlockInfo ColdBlocks;
- NominalTypeDecl *ArrayDecl;
- int GlobIdx = 0;
-
// Whether we see a "once" call to callees that we currently don't handle.
bool UnhandledOnceCallee = false;
// Record number of times a globalinit_func is called by "once".
llvm::DenseMap<SILFunction*, unsigned> InitializerCount;
public:
SILGlobalOpt(SILModule *M, DominanceAnalysis *DA)
- : Module(M), DA(DA), ColdBlocks(DA),
- ArrayDecl(M->getASTContext().getArrayDecl()) {}
+ : Module(M), DA(DA), ColdBlocks(DA) {}
bool run();
@@ -94,25 +90,6 @@
void collectGlobalStore(StoreInst *SI, SILGlobalVariable *SILG);
void collectGlobalAccess(GlobalAddrInst *GAI);
- bool isCOWType(SILType type) {
- return type.getNominalOrBoundGenericNominal() == ArrayDecl;
- }
-
- bool isValidUseOfObject(SILInstruction *Val, bool isCOWObject,
- ApplyInst **FindStringCall = nullptr);
-
- bool getObjectInitVals(SILValue Val,
- llvm::DenseMap<VarDecl *, StoreInst *> &MemberStores,
- llvm::SmallVectorImpl<StoreInst *> &TailStores,
- ApplyInst **FindStringCall);
- bool handleTailAddr(int TailIdx, SILInstruction *I,
- llvm::SmallVectorImpl<StoreInst *> &TailStores);
-
- void
- optimizeObjectAllocation(AllocRefInst *ARI,
- llvm::SmallVector<SILInstruction *, 4> &ToRemove);
- void replaceFindStringCall(ApplyInst *FindStringCall);
-
SILGlobalVariable *getVariableOfGlobalInit(SILFunction *AddrF);
bool isInLoop(SILBasicBlock *CurBB);
void placeInitializers(SILFunction *InitF, ArrayRef<ApplyInst*> Calls);
@@ -684,92 +661,6 @@
namespace {
-/// Utility class for cloning init values into the static initializer of a
-/// SILGlobalVariable.
-class StaticInitCloner : public SILCloner<StaticInitCloner> {
- friend class SILInstructionVisitor<StaticInitCloner>;
- friend class SILCloner<StaticInitCloner>;
-
- /// The number of not yet cloned operands for each instruction.
- llvm::DenseMap<SILInstruction *, int> NumOpsToClone;
-
- /// List of instructions for which all operands are already cloned (or which
- /// don't have any operands).
- llvm::SmallVector<SILInstruction *, 8> ReadyToClone;
-
-public:
- StaticInitCloner(SILGlobalVariable *GVar)
- : SILCloner<StaticInitCloner>(GVar) { }
-
- /// Add \p InitVal and all its operands (transitively) for cloning.
- ///
- /// Note: all init values must are added, before calling clone().
- void add(SILInstruction *InitVal);
-
- /// Clone \p InitVal and all its operands into the initializer of the
- /// SILGlobalVariable.
- ///
- /// \return Returns the cloned instruction in the SILGlobalVariable.
- SingleValueInstruction *clone(SingleValueInstruction *InitVal);
-
- /// Convenience function to clone a single \p InitVal.
- static void appendToInitializer(SILGlobalVariable *GVar,
- SingleValueInstruction *InitVal) {
- StaticInitCloner Cloner(GVar);
- Cloner.add(InitVal);
- Cloner.clone(InitVal);
- }
-
-protected:
- SILLocation remapLocation(SILLocation Loc) {
- return ArtificialUnreachableLocation();
- }
-};
-
-void StaticInitCloner::add(SILInstruction *InitVal) {
- // Don't schedule an instruction twice for cloning.
- if (NumOpsToClone.count(InitVal) != 0)
- return;
-
- ArrayRef<Operand> Ops = InitVal->getAllOperands();
- NumOpsToClone[InitVal] = Ops.size();
- if (Ops.empty()) {
- // It's an instruction without operands, e.g. a literal. It's ready to be
- // cloned first.
- ReadyToClone.push_back(InitVal);
- } else {
- // Recursively add all operands.
- for (const Operand &Op : Ops) {
- add(cast<SingleValueInstruction>(Op.get()));
- }
- }
-}
-
-SingleValueInstruction *
-StaticInitCloner::clone(SingleValueInstruction *InitVal) {
- assert(NumOpsToClone.count(InitVal) != 0 && "InitVal was not added");
- // Find the right order to clone: all operands of an instruction must be
- // cloned before the instruction itself.
- while (!ReadyToClone.empty()) {
- SILInstruction *I = ReadyToClone.pop_back_val();
-
- // Clone the instruction into the SILGlobalVariable
- visit(I);
-
- // Check if users of I can now be cloned.
- for (SILValue result : I->getResults()) {
- for (Operand *Use : result->getUses()) {
- SILInstruction *User = Use->getUser();
- if (NumOpsToClone.count(User) != 0 && --NumOpsToClone[User] == 0)
- ReadyToClone.push_back(User);
- }
- }
- }
- assert(ValueMap.count(InitVal) != 0 &&
- "Could not schedule all instructions for cloning");
- return cast<SingleValueInstruction>(ValueMap[InitVal]);
-}
-
} // end anonymous namespace
/// Replace loads from a global variable by the known value.
@@ -1010,395 +901,6 @@
}
}
-/// Get all stored properties of a class, including it's super classes.
-static void getFields(ClassDecl *Cl, SmallVectorImpl<VarDecl *> &Fields) {
- if (ClassDecl *SuperCl = Cl->getSuperclassDecl()) {
- getFields(SuperCl, Fields);
- }
- for (VarDecl *Field : Cl->getStoredProperties()) {
- Fields.push_back(Field);
- }
-}
-
-/// Check if \p V is a valid instruction for a static initializer, including
-/// all its operands.
-static bool isValidInitVal(SILValue V) {
- if (auto I = dyn_cast<SingleValueInstruction>(V)) {
- if (!SILGlobalVariable::isValidStaticInitializerInst(I, I->getModule()))
- return false;
-
- for (Operand &Op : I->getAllOperands()) {
- if (!isValidInitVal(Op.get()))
- return false;
- }
- return true;
- }
- return false;
-}
-
-/// Check if a use of an object may prevent outlining the object.
-///
-/// If \p isCOWObject is true, then the object reference is wrapped into a
-/// COW container. Currently this is just Array<T>.
-/// If a use is a call to the findStringSwitchCase semantic call, the apply
-/// is returned in \p FindStringCall.
-bool SILGlobalOpt::isValidUseOfObject(SILInstruction *I, bool isCOWObject,
- ApplyInst **FindStringCall) {
- switch (I->getKind()) {
- case SILInstructionKind::DebugValueAddrInst:
- case SILInstructionKind::DebugValueInst:
- case SILInstructionKind::LoadInst:
- case SILInstructionKind::DeallocRefInst:
- case SILInstructionKind::StrongRetainInst:
- case SILInstructionKind::StrongReleaseInst:
- return true;
-
- case SILInstructionKind::ReturnInst:
- case SILInstructionKind::TryApplyInst:
- case SILInstructionKind::PartialApplyInst:
- case SILInstructionKind::StoreInst:
- /// We don't have a representation for COW objects in SIL, so we do some
- /// ad-hoc testing: We can ignore uses of a COW object if any use after
- /// this will do a uniqueness checking before the object is modified.
- return isCOWObject;
-
- case SILInstructionKind::ApplyInst:
- if (!isCOWObject)
- return false;
- // There should only be a single call to findStringSwitchCase. But even
- // if there are multiple calls, it's not problem - we'll just optimize the
- // last one we find.
- if (cast<ApplyInst>(I)->hasSemantics("findStringSwitchCase"))
- *FindStringCall = cast<ApplyInst>(I);
- return true;
-
- case SILInstructionKind::StructInst:
- if (isCOWType(cast<StructInst>(I)->getType())) {
- // The object is wrapped into a COW container.
- isCOWObject = true;
- }
- break;
-
- case SILInstructionKind::UncheckedRefCastInst:
- case SILInstructionKind::StructElementAddrInst:
- case SILInstructionKind::AddressToPointerInst:
- assert(!isCOWObject && "instruction cannot have a COW object as operand");
- break;
-
- case SILInstructionKind::TupleInst:
- case SILInstructionKind::TupleExtractInst:
- case SILInstructionKind::EnumInst:
- break;
-
- case SILInstructionKind::StructExtractInst:
- // To be on the safe side we don't consider the object as COW if it is
- // extracted again from the COW container: the uniqueness check may be
- // optimized away in this case.
- isCOWObject = false;
- break;
-
- case SILInstructionKind::BuiltinInst: {
- // Handle the case for comparing addresses. This occurs when the Array
- // comparison function is inlined.
- auto *BI = cast<BuiltinInst>(I);
- BuiltinValueKind K = BI->getBuiltinInfo().ID;
- if (K == BuiltinValueKind::ICMP_EQ || K == BuiltinValueKind::ICMP_NE)
- return true;
- return false;
- }
-
- default:
- return false;
- }
-
- auto SVI = cast<SingleValueInstruction>(I);
- for (Operand *Use : getNonDebugUses(SVI)) {
- if (!isValidUseOfObject(Use->getUser(), isCOWObject, FindStringCall))
- return false;
- }
- return true;
-}
-
-/// Handle the address of a tail element.
-bool SILGlobalOpt::handleTailAddr(int TailIdx, SILInstruction *TailAddr,
- llvm::SmallVectorImpl<StoreInst *> &TailStores) {
- if (TailIdx >= 0 && TailIdx < (int)TailStores.size()) {
- if (auto *SI = dyn_cast<StoreInst>(TailAddr)) {
- if (!isValidInitVal(SI->getSrc()) || TailStores[TailIdx])
- return false;
- TailStores[TailIdx] = SI;
- return true;
- }
- }
- return isValidUseOfObject(TailAddr, /*isCOWObject*/false);
-}
-
-/// Get the init values for an object's stored properties and its tail elements.
-bool SILGlobalOpt::getObjectInitVals(SILValue Val,
- llvm::DenseMap<VarDecl *, StoreInst *> &MemberStores,
- llvm::SmallVectorImpl<StoreInst *> &TailStores,
- ApplyInst **FindStringCall) {
- for (Operand *Use : Val->getUses()) {
- SILInstruction *User = Use->getUser();
- if (auto *UC = dyn_cast<UpcastInst>(User)) {
- // Upcast is transparent.
- if (!getObjectInitVals(UC, MemberStores, TailStores, FindStringCall))
- return false;
- } else if (auto *REA = dyn_cast<RefElementAddrInst>(User)) {
- // The address of a stored property.
- for (Operand *ElemAddrUse : REA->getUses()) {
- SILInstruction *ElemAddrUser = ElemAddrUse->getUser();
- if (auto *SI = dyn_cast<StoreInst>(ElemAddrUser)) {
- if (!isValidInitVal(SI->getSrc()) || MemberStores[REA->getField()])
- return false;
- MemberStores[REA->getField()] = SI;
- } else if (!isValidUseOfObject(ElemAddrUser, /*isCOWObject*/false)) {
- return false;
- }
- }
- } else if (auto *RTA = dyn_cast<RefTailAddrInst>(User)) {
- // The address of a tail element.
- for (Operand *TailUse : RTA->getUses()) {
- SILInstruction *TailUser = TailUse->getUser();
- if (auto *IA = dyn_cast<IndexAddrInst>(TailUser)) {
- // An index_addr yields the address of any tail element. Only if the
- // second operand (the index) is an integer literal we can figure out
- // which tail element is refereneced.
- int TailIdx = -1;
- if (auto *Index = dyn_cast<IntegerLiteralInst>(IA->getIndex()))
- TailIdx = Index->getValue().getZExtValue();
-
- for (Operand *IAUse : IA->getUses()) {
- if (!handleTailAddr(TailIdx, IAUse->getUser(), TailStores))
- return false;
- }
- // Without an index_addr it's the first tail element.
- } else if (!handleTailAddr(/*TailIdx*/0, TailUser, TailStores)) {
- return false;
- }
- }
- } else if (!isValidUseOfObject(User, /*isCOWObject*/false, FindStringCall)) {
- return false;
- }
- }
- return true;
-}
-
-class GlobalVariableMangler : public Mangle::ASTMangler {
-public:
- std::string mangleOutlinedVariable(SILFunction *F, int &uniqueIdx) {
- std::string GlobName;
- do {
- beginManglingWithoutPrefix();
- appendOperator(F->getName());
- appendOperator("Tv", Index(uniqueIdx++));
- GlobName = finalize();
- } while (F->getModule().lookUpGlobalVariable(GlobName));
-
- return GlobName;
- }
-};
-
-/// Try to convert an object allocation into a statically initialized object.
-///
-/// In general this works for any class, but in practice it will only kick in
-/// for array buffer objects. The use cases are array literals in a function.
-/// For example:
-/// func getarray() -> [Int] {
-/// return [1, 2, 3]
-/// }
-void SILGlobalOpt::optimizeObjectAllocation(
- AllocRefInst *ARI, llvm::SmallVector<SILInstruction *, 4> &ToRemove) {
-
- if (ARI->isObjC())
- return;
-
- // Check how many tail allocated elements are on the object.
- ArrayRef<Operand> TailCounts = ARI->getTailAllocatedCounts();
- SILType TailType;
- unsigned NumTailElems = 0;
- if (TailCounts.size() > 0) {
- // We only support a single tail allocated array.
- if (TailCounts.size() > 1)
- return;
- // The number of tail allocated elements must be constant.
- if (auto *ILI = dyn_cast<IntegerLiteralInst>(TailCounts[0].get())) {
- if (ILI->getValue().getActiveBits() > 20)
- return;
- NumTailElems = ILI->getValue().getZExtValue();
- TailType = ARI->getTailAllocatedTypes()[0];
- } else {
- return;
- }
- }
- SILType Ty = ARI->getType();
- ClassDecl *Cl = Ty.getClassOrBoundGenericClass();
- if (!Cl)
- return;
- llvm::SmallVector<VarDecl *, 16> Fields;
- getFields(Cl, Fields);
-
- // Get the initialization stores of the object's properties and tail
- // allocated elements. Also check if there are any "bad" uses of the object.
- llvm::DenseMap<VarDecl *, StoreInst *> MemberStores;
- llvm::SmallVector<StoreInst *, 16> TailStores;
- TailStores.resize(NumTailElems);
- ApplyInst *FindStringCall = nullptr;
- if (!getObjectInitVals(ARI, MemberStores, TailStores, &FindStringCall))
- return;
-
- // Is there a store for all the class properties?
- if (MemberStores.size() != Fields.size())
- return;
-
- // Is there a store for all tail allocated elements?
- for (auto V : TailStores) {
- if (!V)
- return;
- }
-
- DEBUG(llvm::dbgs() << "Outline global variable in " <<
- ARI->getFunction()->getName() << '\n');
-
- assert(!Cl->isResilient(Module->getSwiftModule(),
- ResilienceExpansion::Minimal) &&
- "constructor call of resilient class should prevent static allocation");
-
- // Create a name for the outlined global variable.
- GlobalVariableMangler Mangler;
- std::string GlobName =
- Mangler.mangleOutlinedVariable(ARI->getFunction(), GlobIdx);
-
- SILGlobalVariable *Glob =
- SILGlobalVariable::create(*Module, SILLinkage::Private, IsNotSerialized,
- GlobName, ARI->getType());
-
- // Schedule all init values for cloning into the initializer of Glob.
- StaticInitCloner Cloner(Glob);
- for (VarDecl *Field : Fields) {
- StoreInst *MemberStore = MemberStores[Field];
- Cloner.add(cast<SingleValueInstruction>(MemberStore->getSrc()));
- }
- for (StoreInst *TailStore : TailStores) {
- Cloner.add(cast<SingleValueInstruction>(TailStore->getSrc()));
- }
-
- // Create the class property initializers
- llvm::SmallVector<SILValue, 16> ObjectArgs;
- for (VarDecl *Field : Fields) {
- StoreInst *MemberStore = MemberStores[Field];
- assert(MemberStore);
- ObjectArgs.push_back(Cloner.clone(
- cast<SingleValueInstruction>(MemberStore->getSrc())));
- ToRemove.push_back(MemberStore);
- }
- // Create the initializers for the tail elements.
- unsigned NumBaseElements = ObjectArgs.size();
- for (StoreInst *TailStore : TailStores) {
- ObjectArgs.push_back(Cloner.clone(
- cast<SingleValueInstruction>(TailStore->getSrc())));
- ToRemove.push_back(TailStore);
- }
- // Create the initializer for the object itself.
- SILBuilder StaticInitBuilder(Glob);
- StaticInitBuilder.createObject(ArtificialUnreachableLocation(),
- ARI->getType(), ObjectArgs, NumBaseElements);
-
- // Replace the alloc_ref by global_value + strong_retain instructions.
- SILBuilder B(ARI);
- GlobalValueInst *GVI = B.createGlobalValue(ARI->getLoc(), Glob);
- B.createStrongRetain(ARI->getLoc(), GVI, B.getDefaultAtomicity());
- llvm::SmallVector<Operand *, 8> Worklist(ARI->use_begin(), ARI->use_end());
- while (!Worklist.empty()) {
- auto *Use = Worklist.pop_back_val();
- SILInstruction *User = Use->getUser();
- switch (User->getKind()) {
- case SILInstructionKind::DeallocRefInst:
- ToRemove.push_back(User);
- break;
- default:
- Use->set(GVI);
- }
- }
- if (FindStringCall && NumTailElems > 16) {
- assert(&*std::next(ARI->getIterator()) != FindStringCall &&
- "FindStringCall must not be the next instruction after ARI because "
- "deleting it would invalidate the instruction iterator");
- replaceFindStringCall(FindStringCall);
- }
-
- ToRemove.push_back(ARI);
- HasChanged = true;
-}
-
-/// Replaces a call to _findStringSwitchCase with a call to
-/// _findStringSwitchCaseWithCache which builds a cache (e.g. a Dictionary) and
-/// stores it into a global variable. Then subsequent calls to this function can
-/// do a fast lookup using the cache.
-void SILGlobalOpt::replaceFindStringCall(ApplyInst *FindStringCall) {
- // Find the replacement function in the swift stdlib.
- SmallVector<ValueDecl *, 1> results;
- Module->getASTContext().lookupInSwiftModule("_findStringSwitchCaseWithCache",
- results);
- if (results.size() != 1)
- return;
-
- auto *FD = dyn_cast<FuncDecl>(results.front());
- if (!FD)
- return;
-
- SILDeclRef declRef(FD, SILDeclRef::Kind::Func);
- SILFunction *replacementFunc = Module->getOrCreateFunction(
- FindStringCall->getLoc(), declRef, NotForDefinition);
-
- SILFunctionType *FTy = replacementFunc->getLoweredFunctionType();
- if (FTy->getNumParameters() != 3)
- return;
-
- SILType cacheType = FTy->getParameters()[2].getSILStorageType().getObjectType();
- NominalTypeDecl *cacheDecl = cacheType.getNominalOrBoundGenericNominal();
- if (!cacheDecl)
- return;
-
- assert(!cacheDecl->isResilient(Module->getSwiftModule(),
- ResilienceExpansion::Minimal));
-
- SILType wordTy = cacheType.getFieldType(
- cacheDecl->getStoredProperties().front(), *Module);
-
- GlobalVariableMangler Mangler;
- std::string GlobName =
- Mangler.mangleOutlinedVariable(FindStringCall->getFunction(), GlobIdx);
-
- // Create an "opaque" global variable which is passed as inout to
- // _findStringSwitchCaseWithCache and into which the function stores the
- // "cache".
- SILGlobalVariable *CacheVar =
- SILGlobalVariable::create(*Module, SILLinkage::Private, IsNotSerialized,
- GlobName, cacheType);
-
- SILLocation Loc = FindStringCall->getLoc();
- SILBuilder StaticInitBuilder(CacheVar);
- auto *Zero = StaticInitBuilder.createIntegerLiteral(Loc, wordTy, 0);
- StaticInitBuilder.createStruct(ArtificialUnreachableLocation(), cacheType,
- {Zero, Zero});
-
- SILBuilder B(FindStringCall);
- GlobalAddrInst *CacheAddr = B.createGlobalAddr(FindStringCall->getLoc(),
- CacheVar);
- FunctionRefInst *FRI = B.createFunctionRef(FindStringCall->getLoc(),
- replacementFunc);
- ApplyInst *NewCall = B.createApply(FindStringCall->getLoc(), FRI,
- FindStringCall->getSubstitutions(),
- { FindStringCall->getArgument(0),
- FindStringCall->getArgument(1),
- CacheAddr },
- FindStringCall->isNonThrowing());
-
- FindStringCall->replaceAllUsesWith(NewCall);
- FindStringCall->eraseFromParent();
-}
-
/// Optimize access to the global variable, which is known
/// to have a constant value. Replace all loads from the
/// global address by invocations of a getter that returns
@@ -1449,7 +951,6 @@
// Cache cold blocks per function.
ColdBlockInfo ColdBlocks(DA);
- GlobIdx = 0;
for (auto &BB : F) {
bool IsCold = ColdBlocks.isCold(&BB);
auto Iter = BB.begin();
@@ -1472,15 +973,6 @@
collectGlobalInitCall(AI);
} else if (auto *GAI = dyn_cast<GlobalAddrInst>(I)) {
collectGlobalAccess(GAI);
- } else if (auto *ARI = dyn_cast<AllocRefInst>(I)) {
- if (!F.isSerialized()) {
- // Currently we cannot serialize a function which refers to a
- // statically initialized global. So we don't do the optimization
- // for serializable functions.
- // TODO: We may do the optimization _after_ serialization in the
- // pass pipeline.
- optimizeObjectAllocation(ARI, ToRemove);
- }
}
}
for (auto *I : ToRemove)
diff --git a/lib/SILOptimizer/LoopTransforms/LICM.cpp b/lib/SILOptimizer/LoopTransforms/LICM.cpp
index 9820f24..00ad534 100644
--- a/lib/SILOptimizer/LoopTransforms/LICM.cpp
+++ b/lib/SILOptimizer/LoopTransforms/LICM.cpp
@@ -72,6 +72,7 @@
for (unsigned Idx = 0, End = AI->getNumArguments(); Idx < End; ++Idx) {
auto &ArgEffect = E.getParameterEffects()[Idx];
+ assert(!ArgEffect.mayRelease() && "apply should only read from memory");
if (!ArgEffect.mayRead())
continue;
diff --git a/lib/SILOptimizer/Mandatory/ConstantPropagation.cpp b/lib/SILOptimizer/Mandatory/ConstantPropagation.cpp
index 5fc700a..4f5d07a 100644
--- a/lib/SILOptimizer/Mandatory/ConstantPropagation.cpp
+++ b/lib/SILOptimizer/Mandatory/ConstantPropagation.cpp
@@ -11,1263 +11,10 @@
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "constant-propagation"
-#include "swift/AST/DiagnosticsSIL.h"
-#include "swift/AST/Expr.h"
-#include "swift/SIL/PatternMatch.h"
-#include "swift/SIL/SILBuilder.h"
-#include "swift/SIL/SILInstruction.h"
-#include "swift/SILOptimizer/PassManager/Passes.h"
#include "swift/SILOptimizer/PassManager/Transforms.h"
-#include "swift/SILOptimizer/Utils/CastOptimizer.h"
#include "swift/SILOptimizer/Utils/ConstantFolding.h"
-#include "swift/SILOptimizer/Utils/Local.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Debug.h"
+
using namespace swift;
-using namespace swift::PatternMatch;
-
-STATISTIC(NumInstFolded, "Number of constant folded instructions");
-
-template<typename...T, typename...U>
-static InFlightDiagnostic
-diagnose(ASTContext &Context, SourceLoc loc, Diag<T...> diag, U &&...args) {
- return Context.Diags.diagnose(loc, diag, std::forward<U>(args)...);
-}
-
-/// \brief Construct (int, overflow) result tuple.
-static SILValue constructResultWithOverflowTuple(BuiltinInst *BI,
- APInt Res, bool Overflow) {
- // Get the SIL subtypes of the returned tuple type.
- SILType FuncResType = BI->getType();
- assert(FuncResType.castTo<TupleType>()->getNumElements() == 2);
- SILType ResTy1 = FuncResType.getTupleElementType(0);
- SILType ResTy2 = FuncResType.getTupleElementType(1);
-
- // Construct the folded instruction - a tuple of two literals, the
- // result and overflow.
- SILBuilderWithScope B(BI);
- SILLocation Loc = BI->getLoc();
- SILValue Result[] = {
- B.createIntegerLiteral(Loc, ResTy1, Res),
- B.createIntegerLiteral(Loc, ResTy2, Overflow)
- };
- return B.createTuple(Loc, FuncResType, Result);
-}
-
-/// \brief Fold arithmetic intrinsics with overflow.
-static SILValue
-constantFoldBinaryWithOverflow(BuiltinInst *BI, llvm::Intrinsic::ID ID,
- bool ReportOverflow,
- Optional<bool> &ResultsInError) {
- OperandValueArrayRef Args = BI->getArguments();
- assert(Args.size() >= 2);
-
- auto *Op1 = dyn_cast<IntegerLiteralInst>(Args[0]);
- auto *Op2 = dyn_cast<IntegerLiteralInst>(Args[1]);
-
- // If either Op1 or Op2 is not a literal, we cannot do anything.
- if (!Op1 || !Op2)
- return nullptr;
-
- // Calculate the result.
- APInt LHSInt = Op1->getValue();
- APInt RHSInt = Op2->getValue();
- bool Overflow;
- APInt Res = constantFoldBinaryWithOverflow(LHSInt, RHSInt, Overflow, ID);
-
- // If we can statically determine that the operation overflows,
- // warn about it if warnings are not disabled by ResultsInError being null.
- if (ResultsInError.hasValue() && Overflow && ReportOverflow) {
- if (BI->getFunction()->isSpecialization()) {
- // Do not report any constant propagation issues in specializations,
- // because they are eventually not present in the original function.
- return nullptr;
- }
- // Try to infer the type of the constant expression that the user operates
- // on. If the intrinsic was lowered from a call to a function that takes
- // two arguments of the same type, use the type of the LHS argument.
- // This would detect '+'/'+=' and such.
- Type OpType;
- SILLocation Loc = BI->getLoc();
- const ApplyExpr *CE = Loc.getAsASTNode<ApplyExpr>();
- SourceRange LHSRange, RHSRange;
- if (CE) {
- const auto *Args = dyn_cast_or_null<TupleExpr>(CE->getArg());
- if (Args && Args->getNumElements() == 2) {
- // Look through inout types in order to handle += well.
- CanType LHSTy = Args->getElement(0)->getType()->getInOutObjectType()->
- getCanonicalType();
- CanType RHSTy = Args->getElement(1)->getType()->getCanonicalType();
- if (LHSTy == RHSTy)
- OpType = Args->getElement(1)->getType();
-
- LHSRange = Args->getElement(0)->getSourceRange();
- RHSRange = Args->getElement(1)->getSourceRange();
- }
- }
-
- bool Signed = false;
- StringRef Operator = "+";
-
- switch (ID) {
- default: llvm_unreachable("Invalid case");
- case llvm::Intrinsic::sadd_with_overflow:
- Signed = true;
- break;
- case llvm::Intrinsic::uadd_with_overflow:
- break;
- case llvm::Intrinsic::ssub_with_overflow:
- Operator = "-";
- Signed = true;
- break;
- case llvm::Intrinsic::usub_with_overflow:
- Operator = "-";
- break;
- case llvm::Intrinsic::smul_with_overflow:
- Operator = "*";
- Signed = true;
- break;
- case llvm::Intrinsic::umul_with_overflow:
- Operator = "*";
- break;
- }
-
- if (!OpType.isNull()) {
- diagnose(BI->getModule().getASTContext(),
- Loc.getSourceLoc(),
- diag::arithmetic_operation_overflow,
- LHSInt.toString(/*Radix*/ 10, Signed),
- Operator,
- RHSInt.toString(/*Radix*/ 10, Signed),
- OpType).highlight(LHSRange).highlight(RHSRange);
- } else {
- // If we cannot get the type info in an expected way, describe the type.
- diagnose(BI->getModule().getASTContext(),
- Loc.getSourceLoc(),
- diag::arithmetic_operation_overflow_generic_type,
- LHSInt.toString(/*Radix*/ 10, Signed),
- Operator,
- RHSInt.toString(/*Radix*/ 10, Signed),
- Signed,
- LHSInt.getBitWidth()).highlight(LHSRange).highlight(RHSRange);
- }
- ResultsInError = Optional<bool>(true);
- }
-
- return constructResultWithOverflowTuple(BI, Res, Overflow);
-}
-
-static SILValue
-constantFoldBinaryWithOverflow(BuiltinInst *BI, BuiltinValueKind ID,
- Optional<bool> &ResultsInError) {
- OperandValueArrayRef Args = BI->getArguments();
- auto *ShouldReportFlag = dyn_cast<IntegerLiteralInst>(Args[2]);
- return constantFoldBinaryWithOverflow(BI,
- getLLVMIntrinsicIDForBuiltinWithOverflow(ID),
- ShouldReportFlag && (ShouldReportFlag->getValue() == 1),
- ResultsInError);
-}
-
-static SILValue constantFoldIntrinsic(BuiltinInst *BI, llvm::Intrinsic::ID ID,
- Optional<bool> &ResultsInError) {
- switch (ID) {
- default: break;
- case llvm::Intrinsic::expect: {
- // An expect of an integral constant is the constant itself.
- assert(BI->getArguments().size() == 2 && "Expect should have 2 args.");
- auto *Op1 = dyn_cast<IntegerLiteralInst>(BI->getArguments()[0]);
- if (!Op1)
- return nullptr;
- return Op1;
- }
-
- case llvm::Intrinsic::ctlz: {
- assert(BI->getArguments().size() == 2 && "Ctlz should have 2 args.");
- OperandValueArrayRef Args = BI->getArguments();
-
- // Fold for integer constant arguments.
- auto *LHS = dyn_cast<IntegerLiteralInst>(Args[0]);
- if (!LHS) {
- return nullptr;
- }
- APInt LHSI = LHS->getValue();
- unsigned LZ = 0;
- // Check corner-case of source == zero
- if (LHSI == 0) {
- auto *RHS = dyn_cast<IntegerLiteralInst>(Args[1]);
- if (!RHS || RHS->getValue() != 0) {
- // Undefined
- return nullptr;
- }
- LZ = LHSI.getBitWidth();
- } else {
- LZ = LHSI.countLeadingZeros();
- }
- APInt LZAsAPInt = APInt(LHSI.getBitWidth(), LZ);
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), LHS->getType(), LZAsAPInt);
- }
-
- case llvm::Intrinsic::sadd_with_overflow:
- case llvm::Intrinsic::uadd_with_overflow:
- case llvm::Intrinsic::ssub_with_overflow:
- case llvm::Intrinsic::usub_with_overflow:
- case llvm::Intrinsic::smul_with_overflow:
- case llvm::Intrinsic::umul_with_overflow:
- return constantFoldBinaryWithOverflow(BI, ID,
- /* ReportOverflow */ false,
- ResultsInError);
- }
- return nullptr;
-}
-
-static SILValue constantFoldCompare(BuiltinInst *BI, BuiltinValueKind ID) {
- OperandValueArrayRef Args = BI->getArguments();
-
- // Fold for integer constant arguments.
- auto *LHS = dyn_cast<IntegerLiteralInst>(Args[0]);
- auto *RHS = dyn_cast<IntegerLiteralInst>(Args[1]);
- if (LHS && RHS) {
- APInt Res = constantFoldComparison(LHS->getValue(), RHS->getValue(), ID);
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), Res);
- }
-
- // Comparisons of an unsigned value with 0.
- SILValue Other;
- auto MatchNonNegative =
- m_BuiltinInst(BuiltinValueKind::AssumeNonNegative, m_ValueBase());
- if (match(BI, m_CombineOr(m_BuiltinInst(BuiltinValueKind::ICMP_ULT,
- m_SILValue(Other), m_Zero()),
- m_BuiltinInst(BuiltinValueKind::ICMP_UGT, m_Zero(),
- m_SILValue(Other)))) ||
- match(BI, m_CombineOr(m_BuiltinInst(BuiltinValueKind::ICMP_SLT,
- MatchNonNegative, m_Zero()),
- m_BuiltinInst(BuiltinValueKind::ICMP_SGT, m_Zero(),
- MatchNonNegative)))) {
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt());
- }
-
- if (match(BI, m_CombineOr(m_BuiltinInst(BuiltinValueKind::ICMP_UGE,
- m_SILValue(Other), m_Zero()),
- m_BuiltinInst(BuiltinValueKind::ICMP_ULE, m_Zero(),
- m_SILValue(Other)))) ||
- match(BI, m_CombineOr(m_BuiltinInst(BuiltinValueKind::ICMP_SGE,
- MatchNonNegative, m_Zero()),
- m_BuiltinInst(BuiltinValueKind::ICMP_SLE, m_Zero(),
- MatchNonNegative)))) {
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt(1, 1));
- }
-
- // Comparisons with Int.Max.
- IntegerLiteralInst *IntMax;
-
- // Check signed comparisons.
- if (match(BI,
- m_CombineOr(
- // Int.max < x
- m_BuiltinInst(BuiltinValueKind::ICMP_SLT,
- m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
- // x > Int.max
- m_BuiltinInst(BuiltinValueKind::ICMP_SGT, m_SILValue(Other),
- m_IntegerLiteralInst(IntMax)))) &&
- IntMax->getValue().isMaxSignedValue()) {
- // Any signed number should be <= then IntMax.
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt());
- }
-
- if (match(BI,
- m_CombineOr(
- m_BuiltinInst(BuiltinValueKind::ICMP_SGE,
- m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
- m_BuiltinInst(BuiltinValueKind::ICMP_SLE, m_SILValue(Other),
- m_IntegerLiteralInst(IntMax)))) &&
- IntMax->getValue().isMaxSignedValue()) {
- // Any signed number should be <= then IntMax.
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt(1, 1));
- }
-
- // For any x of the same size as Int.max and n>=1 , (x>>n) is always <= Int.max,
- // that is (x>>n) <= Int.max and Int.max >= (x>>n) are true.
- if (match(BI,
- m_CombineOr(
- // Int.max >= x
- m_BuiltinInst(BuiltinValueKind::ICMP_UGE,
- m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
- // x <= Int.max
- m_BuiltinInst(BuiltinValueKind::ICMP_ULE, m_SILValue(Other),
- m_IntegerLiteralInst(IntMax)),
- // Int.max >= x
- m_BuiltinInst(BuiltinValueKind::ICMP_SGE,
- m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
- // x <= Int.max
- m_BuiltinInst(BuiltinValueKind::ICMP_SLE, m_SILValue(Other),
- m_IntegerLiteralInst(IntMax)))) &&
- IntMax->getValue().isMaxSignedValue()) {
- // Check if other is a result of a logical shift right by a strictly
- // positive number of bits.
- IntegerLiteralInst *ShiftCount;
- if (match(Other, m_BuiltinInst(BuiltinValueKind::LShr, m_ValueBase(),
- m_IntegerLiteralInst(ShiftCount))) &&
- ShiftCount->getValue().isStrictlyPositive()) {
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt(1, 1));
- }
- }
-
- // At the same time (x>>n) > Int.max and Int.max < (x>>n) is false.
- if (match(BI,
- m_CombineOr(
- // Int.max < x
- m_BuiltinInst(BuiltinValueKind::ICMP_ULT,
- m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
- // x > Int.max
- m_BuiltinInst(BuiltinValueKind::ICMP_UGT, m_SILValue(Other),
- m_IntegerLiteralInst(IntMax)),
- // Int.max < x
- m_BuiltinInst(BuiltinValueKind::ICMP_SLT,
- m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
- // x > Int.max
- m_BuiltinInst(BuiltinValueKind::ICMP_SGT, m_SILValue(Other),
- m_IntegerLiteralInst(IntMax)))) &&
- IntMax->getValue().isMaxSignedValue()) {
- // Check if other is a result of a logical shift right by a strictly
- // positive number of bits.
- IntegerLiteralInst *ShiftCount;
- if (match(Other, m_BuiltinInst(BuiltinValueKind::LShr, m_ValueBase(),
- m_IntegerLiteralInst(ShiftCount))) &&
- ShiftCount->getValue().isStrictlyPositive()) {
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt());
- }
- }
-
- // Fold x < 0 into false, if x is known to be a result of an unsigned
- // operation with overflow checks enabled.
- BuiltinInst *BIOp;
- if (match(BI, m_BuiltinInst(BuiltinValueKind::ICMP_SLT,
- m_TupleExtractInst(m_BuiltinInst(BIOp), 0),
- m_Zero()))) {
- // Check if Other is a result of an unsigned operation with overflow.
- switch (BIOp->getBuiltinInfo().ID) {
- default:
- break;
- case BuiltinValueKind::UAddOver:
- case BuiltinValueKind::USubOver:
- case BuiltinValueKind::UMulOver:
- // Was it an operation with an overflow check?
- if (match(BIOp->getOperand(2), m_One())) {
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt());
- }
- }
- }
-
- // Fold x >= 0 into true, if x is known to be a result of an unsigned
- // operation with overflow checks enabled.
- if (match(BI, m_BuiltinInst(BuiltinValueKind::ICMP_SGE,
- m_TupleExtractInst(m_BuiltinInst(BIOp), 0),
- m_Zero()))) {
- // Check if Other is a result of an unsigned operation with overflow.
- switch (BIOp->getBuiltinInfo().ID) {
- default:
- break;
- case BuiltinValueKind::UAddOver:
- case BuiltinValueKind::USubOver:
- case BuiltinValueKind::UMulOver:
- // Was it an operation with an overflow check?
- if (match(BIOp->getOperand(2), m_One())) {
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt(1, 1));
- }
- }
- }
-
- return nullptr;
-}
-
-static SILValue
-constantFoldAndCheckDivision(BuiltinInst *BI, BuiltinValueKind ID,
- Optional<bool> &ResultsInError) {
- assert(ID == BuiltinValueKind::SDiv ||
- ID == BuiltinValueKind::SRem ||
- ID == BuiltinValueKind::UDiv ||
- ID == BuiltinValueKind::URem);
-
- OperandValueArrayRef Args = BI->getArguments();
- SILModule &M = BI->getModule();
-
- // Get the denominator.
- auto *Denom = dyn_cast<IntegerLiteralInst>(Args[1]);
- if (!Denom)
- return nullptr;
- APInt DenomVal = Denom->getValue();
-
- // If the denominator is zero...
- if (DenomVal == 0) {
- // And if we are not asked to report errors, just return nullptr.
- if (!ResultsInError.hasValue())
- return nullptr;
-
- // Otherwise emit a diagnosis error and set ResultsInError to true.
- diagnose(M.getASTContext(), BI->getLoc().getSourceLoc(),
- diag::division_by_zero);
- ResultsInError = Optional<bool>(true);
- return nullptr;
- }
-
- // Get the numerator.
- auto *Num = dyn_cast<IntegerLiteralInst>(Args[0]);
- if (!Num)
- return nullptr;
- APInt NumVal = Num->getValue();
-
- bool Overflowed;
- APInt ResVal = constantFoldDiv(NumVal, DenomVal, Overflowed, ID);
-
- // If we overflowed...
- if (Overflowed) {
- // And we are not asked to produce diagnostics, just return nullptr...
- if (!ResultsInError.hasValue())
- return nullptr;
-
- bool IsRem = ID == BuiltinValueKind::SRem || ID == BuiltinValueKind::URem;
-
- // Otherwise emit the diagnostic, set ResultsInError to be true, and return
- // nullptr.
- diagnose(M.getASTContext(),
- BI->getLoc().getSourceLoc(),
- diag::division_overflow,
- NumVal.toString(/*Radix*/ 10, /*Signed*/true),
- IsRem ? "%" : "/",
- DenomVal.toString(/*Radix*/ 10, /*Signed*/true));
- ResultsInError = Optional<bool>(true);
- return nullptr;
- }
-
- // Add the literal instruction to represent the result of the division.
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), ResVal);
-}
-
-/// \brief Fold binary operations.
-///
-/// The list of operations we constant fold might not be complete. Start with
-/// folding the operations used by the standard library.
-static SILValue constantFoldBinary(BuiltinInst *BI,
- BuiltinValueKind ID,
- Optional<bool> &ResultsInError) {
- switch (ID) {
- default:
- llvm_unreachable("Not all BUILTIN_BINARY_OPERATIONs are covered!");
-
- // Not supported yet (not easily computable for APInt).
- case BuiltinValueKind::ExactSDiv:
- case BuiltinValueKind::ExactUDiv:
- return nullptr;
-
- // Not supported now.
- case BuiltinValueKind::FRem:
- return nullptr;
-
- // Fold constant division operations and report div by zero.
- case BuiltinValueKind::SDiv:
- case BuiltinValueKind::SRem:
- case BuiltinValueKind::UDiv:
- case BuiltinValueKind::URem: {
- return constantFoldAndCheckDivision(BI, ID, ResultsInError);
- }
-
- // Are there valid uses for these in stdlib?
- case BuiltinValueKind::Add:
- case BuiltinValueKind::Mul:
- case BuiltinValueKind::Sub:
- return nullptr;
-
- case BuiltinValueKind::And:
- case BuiltinValueKind::AShr:
- case BuiltinValueKind::LShr:
- case BuiltinValueKind::Or:
- case BuiltinValueKind::Shl:
- case BuiltinValueKind::Xor: {
- OperandValueArrayRef Args = BI->getArguments();
- auto *LHS = dyn_cast<IntegerLiteralInst>(Args[0]);
- auto *RHS = dyn_cast<IntegerLiteralInst>(Args[1]);
- if (!RHS || !LHS)
- return nullptr;
- APInt LHSI = LHS->getValue();
- APInt RHSI = RHS->getValue();
-
- bool IsShift = ID == BuiltinValueKind::AShr ||
- ID == BuiltinValueKind::LShr ||
- ID == BuiltinValueKind::Shl;
-
- // Reject shifting all significant bits
- if (IsShift && RHSI.getZExtValue() >= LHSI.getBitWidth()) {
- diagnose(BI->getModule().getASTContext(),
- RHS->getLoc().getSourceLoc(),
- diag::shifting_all_significant_bits);
-
- ResultsInError = Optional<bool>(true);
- return nullptr;
- }
-
- APInt ResI = constantFoldBitOperation(LHSI, RHSI, ID);
- // Add the literal instruction to represent the result.
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), ResI);
- }
- case BuiltinValueKind::FAdd:
- case BuiltinValueKind::FDiv:
- case BuiltinValueKind::FMul:
- case BuiltinValueKind::FSub: {
- OperandValueArrayRef Args = BI->getArguments();
- auto *LHS = dyn_cast<FloatLiteralInst>(Args[0]);
- auto *RHS = dyn_cast<FloatLiteralInst>(Args[1]);
- if (!RHS || !LHS)
- return nullptr;
- APFloat LHSF = LHS->getValue();
- APFloat RHSF = RHS->getValue();
- switch (ID) {
- default: llvm_unreachable("Not all cases are covered!");
- case BuiltinValueKind::FAdd:
- LHSF.add(RHSF, APFloat::rmNearestTiesToEven);
- break;
- case BuiltinValueKind::FDiv:
- LHSF.divide(RHSF, APFloat::rmNearestTiesToEven);
- break;
- case BuiltinValueKind::FMul:
- LHSF.multiply(RHSF, APFloat::rmNearestTiesToEven);
- break;
- case BuiltinValueKind::FSub:
- LHSF.subtract(RHSF, APFloat::rmNearestTiesToEven);
- break;
- }
-
- // Add the literal instruction to represent the result.
- SILBuilderWithScope B(BI);
- return B.createFloatLiteral(BI->getLoc(), BI->getType(), LHSF);
- }
- }
-}
-
-static std::pair<bool, bool> getTypeSignedness(const BuiltinInfo &Builtin) {
- bool SrcTySigned =
- (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc ||
- Builtin.ID == BuiltinValueKind::SToUCheckedTrunc ||
- Builtin.ID == BuiltinValueKind::SUCheckedConversion);
-
- bool DstTySigned =
- (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc ||
- Builtin.ID == BuiltinValueKind::UToSCheckedTrunc ||
- Builtin.ID == BuiltinValueKind::USCheckedConversion);
-
- return std::pair<bool, bool>(SrcTySigned, DstTySigned);
-}
-
-static SILValue
-constantFoldAndCheckIntegerConversions(BuiltinInst *BI,
- const BuiltinInfo &Builtin,
- Optional<bool> &ResultsInError) {
- assert(Builtin.ID == BuiltinValueKind::SToSCheckedTrunc ||
- Builtin.ID == BuiltinValueKind::UToUCheckedTrunc ||
- Builtin.ID == BuiltinValueKind::SToUCheckedTrunc ||
- Builtin.ID == BuiltinValueKind::UToSCheckedTrunc ||
- Builtin.ID == BuiltinValueKind::SUCheckedConversion ||
- Builtin.ID == BuiltinValueKind::USCheckedConversion);
-
- // Check if we are converting a constant integer.
- OperandValueArrayRef Args = BI->getArguments();
- auto *V = dyn_cast<IntegerLiteralInst>(Args[0]);
- if (!V)
- return nullptr;
- APInt SrcVal = V->getValue();
-
- // Get source type and bit width.
- Type SrcTy = Builtin.Types[0];
- uint32_t SrcBitWidth =
- Builtin.Types[0]->castTo<BuiltinIntegerType>()->getGreatestWidth();
-
- // Compute the destination (for SrcBitWidth < DestBitWidth) and enough info
- // to check for overflow.
- APInt Result;
- bool OverflowError;
- Type DstTy;
-
- // Process conversions signed <-> unsigned for same size integers.
- if (Builtin.ID == BuiltinValueKind::SUCheckedConversion ||
- Builtin.ID == BuiltinValueKind::USCheckedConversion) {
- DstTy = SrcTy;
- Result = SrcVal;
- // Report an error if the sign bit is set.
- OverflowError = SrcVal.isNegative();
-
- // Process truncation from unsigned to signed.
- } else if (Builtin.ID != BuiltinValueKind::UToSCheckedTrunc) {
- assert(Builtin.Types.size() == 2);
- DstTy = Builtin.Types[1];
- uint32_t DstBitWidth =
- DstTy->castTo<BuiltinIntegerType>()->getGreatestWidth();
- // Result = trunc_IntFrom_IntTo(Val)
- // For signed destination:
- // sext_IntFrom(Result) == Val ? Result : overflow_error
- // For signed destination:
- // zext_IntFrom(Result) == Val ? Result : overflow_error
- Result = SrcVal.trunc(DstBitWidth);
- // Get the signedness of the destination.
- bool Signed = (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc);
- APInt Ext = Signed ? Result.sext(SrcBitWidth) : Result.zext(SrcBitWidth);
- OverflowError = (SrcVal != Ext);
-
- // Process the rest of truncations.
- } else {
- assert(Builtin.Types.size() == 2);
- DstTy = Builtin.Types[1];
- uint32_t DstBitWidth =
- Builtin.Types[1]->castTo<BuiltinIntegerType>()->getGreatestWidth();
- // Compute the destination (for SrcBitWidth < DestBitWidth):
- // Result = trunc_IntTo(Val)
- // Trunc = trunc_'IntTo-1bit'(Val)
- // zext_IntFrom(Trunc) == Val ? Result : overflow_error
- Result = SrcVal.trunc(DstBitWidth);
- APInt TruncVal = SrcVal.trunc(DstBitWidth - 1);
- OverflowError = (SrcVal != TruncVal.zext(SrcBitWidth));
- }
-
- // Check for overflow.
- if (OverflowError) {
- // If we are not asked to emit overflow diagnostics, just return nullptr on
- // overflow.
- if (!ResultsInError.hasValue())
- return nullptr;
-
- SILLocation Loc = BI->getLoc();
- SILModule &M = BI->getModule();
- const ApplyExpr *CE = Loc.getAsASTNode<ApplyExpr>();
- Type UserSrcTy;
- Type UserDstTy;
- // Primitive heuristics to get the user-written type.
- // Eventually we might be able to use SILLocation (when it contains info
- // about inlined call chains).
- if (CE) {
- if (const TupleType *RTy = CE->getArg()->getType()->getAs<TupleType>()) {
- if (RTy->getNumElements() == 1) {
- UserSrcTy = RTy->getElementType(0);
- UserDstTy = CE->getType();
- }
- } else {
- UserSrcTy = CE->getArg()->getType();
- UserDstTy = CE->getType();
- }
- }
-
-
- // Assume that we are converting from a literal if the Source size is
- // 2048. Is there a better way to identify conversions from literals?
- bool Literal = (SrcBitWidth == 2048);
-
- // FIXME: This will prevent hard error in cases the error is coming
- // from ObjC interoperability code. Currently, we treat NSUInteger as
- // Int.
- if (Loc.getSourceLoc().isInvalid()) {
- // Otherwise emit the appropriate diagnostic and set ResultsInError.
- if (Literal)
- diagnose(M.getASTContext(), Loc.getSourceLoc(),
- diag::integer_literal_overflow_warn,
- UserDstTy.isNull() ? DstTy : UserDstTy);
- else
- diagnose(M.getASTContext(), Loc.getSourceLoc(),
- diag::integer_conversion_overflow_warn,
- UserSrcTy.isNull() ? SrcTy : UserSrcTy,
- UserDstTy.isNull() ? DstTy : UserDstTy);
-
- ResultsInError = Optional<bool>(true);
- return nullptr;
- }
-
- // Otherwise report the overflow error.
- if (Literal) {
- bool SrcTySigned, DstTySigned;
- std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin);
- SmallString<10> SrcAsString;
- SrcVal.toString(SrcAsString, /*radix*/10, SrcTySigned);
-
- // Try to print user-visible types if they are available.
- if (!UserDstTy.isNull()) {
- auto diagID = diag::integer_literal_overflow;
-
- // If this is a negative literal in an unsigned type, use a specific
- // diagnostic.
- if (SrcTySigned && !DstTySigned && SrcVal.isNegative())
- diagID = diag::negative_integer_literal_overflow_unsigned;
-
- diagnose(M.getASTContext(), Loc.getSourceLoc(),
- diagID, UserDstTy, SrcAsString);
- // Otherwise, print the Builtin Types.
- } else {
- bool SrcTySigned, DstTySigned;
- std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin);
- diagnose(M.getASTContext(), Loc.getSourceLoc(),
- diag::integer_literal_overflow_builtin_types,
- DstTySigned, DstTy, SrcAsString);
- }
- } else {
- if (Builtin.ID == BuiltinValueKind::SUCheckedConversion) {
- diagnose(M.getASTContext(), Loc.getSourceLoc(),
- diag::integer_conversion_sign_error,
- UserDstTy.isNull() ? DstTy : UserDstTy);
- } else {
- // Try to print user-visible types if they are available.
- if (!UserSrcTy.isNull()) {
- diagnose(M.getASTContext(), Loc.getSourceLoc(),
- diag::integer_conversion_overflow,
- UserSrcTy, UserDstTy);
-
- // Otherwise, print the Builtin Types.
- } else {
- // Since builtin types are sign-agnostic, print the signedness
- // separately.
- bool SrcTySigned, DstTySigned;
- std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin);
- diagnose(M.getASTContext(), Loc.getSourceLoc(),
- diag::integer_conversion_overflow_builtin_types,
- SrcTySigned, SrcTy, DstTySigned, DstTy);
- }
- }
- }
-
- ResultsInError = Optional<bool>(true);
- return nullptr;
- }
-
- // The call to the builtin should be replaced with the constant value.
- return constructResultWithOverflowTuple(BI, Result, false);
-
-}
-
-static SILValue constantFoldBuiltin(BuiltinInst *BI,
- Optional<bool> &ResultsInError) {
- const IntrinsicInfo &Intrinsic = BI->getIntrinsicInfo();
- SILModule &M = BI->getModule();
-
- // If it's an llvm intrinsic, fold the intrinsic.
- if (Intrinsic.ID != llvm::Intrinsic::not_intrinsic)
- return constantFoldIntrinsic(BI, Intrinsic.ID, ResultsInError);
-
- // Otherwise, it should be one of the builtin functions.
- OperandValueArrayRef Args = BI->getArguments();
- const BuiltinInfo &Builtin = BI->getBuiltinInfo();
-
- switch (Builtin.ID) {
- default: break;
-
-// Check and fold binary arithmetic with overflow.
-#define BUILTIN(id, name, Attrs)
-#define BUILTIN_BINARY_OPERATION_WITH_OVERFLOW(id, name, _, attrs, overload) \
- case BuiltinValueKind::id:
-#include "swift/AST/Builtins.def"
- return constantFoldBinaryWithOverflow(BI, Builtin.ID, ResultsInError);
-
-#define BUILTIN(id, name, Attrs)
-#define BUILTIN_BINARY_OPERATION(id, name, attrs, overload) \
-case BuiltinValueKind::id:
-#include "swift/AST/Builtins.def"
- return constantFoldBinary(BI, Builtin.ID, ResultsInError);
-
-// Fold comparison predicates.
-#define BUILTIN(id, name, Attrs)
-#define BUILTIN_BINARY_PREDICATE(id, name, attrs, overload) \
-case BuiltinValueKind::id:
-#include "swift/AST/Builtins.def"
- return constantFoldCompare(BI, Builtin.ID);
-
- case BuiltinValueKind::Trunc:
- case BuiltinValueKind::ZExt:
- case BuiltinValueKind::SExt:
- case BuiltinValueKind::TruncOrBitCast:
- case BuiltinValueKind::ZExtOrBitCast:
- case BuiltinValueKind::SExtOrBitCast: {
-
- // We can fold if the value being cast is a constant.
- auto *V = dyn_cast<IntegerLiteralInst>(Args[0]);
- if (!V)
- return nullptr;
-
- APInt CastResV = constantFoldCast(V->getValue(), Builtin);
-
- // Add the literal instruction to represent the result of the cast.
- SILBuilderWithScope B(BI);
- return B.createIntegerLiteral(BI->getLoc(), BI->getType(), CastResV);
- }
-
- // Process special builtins that are designed to check for overflows in
- // integer conversions.
- case BuiltinValueKind::SToSCheckedTrunc:
- case BuiltinValueKind::UToUCheckedTrunc:
- case BuiltinValueKind::SToUCheckedTrunc:
- case BuiltinValueKind::UToSCheckedTrunc:
- case BuiltinValueKind::SUCheckedConversion:
- case BuiltinValueKind::USCheckedConversion: {
- return constantFoldAndCheckIntegerConversions(BI, Builtin, ResultsInError);
- }
-
- case BuiltinValueKind::IntToFPWithOverflow: {
- // Get the value. It should be a constant in most cases.
- // Note, this will not always be a constant, for example, when analyzing
- // _convertFromBuiltinIntegerLiteral function itself.
- auto *V = dyn_cast<IntegerLiteralInst>(Args[0]);
- if (!V)
- return nullptr;
- APInt SrcVal = V->getValue();
- Type DestTy = Builtin.Types[1];
-
- APFloat TruncVal(
- DestTy->castTo<BuiltinFloatType>()->getAPFloatSemantics());
- APFloat::opStatus ConversionStatus = TruncVal.convertFromAPInt(
- SrcVal, /*IsSigned=*/true, APFloat::rmNearestTiesToEven);
-
- SILLocation Loc = BI->getLoc();
- const ApplyExpr *CE = Loc.getAsASTNode<ApplyExpr>();
-
- // Check for overflow.
- if (ConversionStatus & APFloat::opOverflow) {
- // If we overflow and are not asked for diagnostics, just return nullptr.
- if (!ResultsInError.hasValue())
- return nullptr;
-
- SmallString<10> SrcAsString;
- SrcVal.toString(SrcAsString, /*radix*/10, true /*isSigned*/);
-
- // Otherwise emit our diagnostics and then return nullptr.
- diagnose(M.getASTContext(), Loc.getSourceLoc(),
- diag::integer_literal_overflow,
- CE ? CE->getType() : DestTy, SrcAsString);
- ResultsInError = Optional<bool>(true);
- return nullptr;
- }
-
- // The call to the builtin should be replaced with the constant value.
- SILBuilderWithScope B(BI);
- return B.createFloatLiteral(Loc, BI->getType(), TruncVal);
- }
-
- case BuiltinValueKind::FPTrunc: {
- // Get the value. It should be a constant in most cases.
- auto *V = dyn_cast<FloatLiteralInst>(Args[0]);
- if (!V)
- return nullptr;
- APFloat TruncVal = V->getValue();
- Type DestTy = Builtin.Types[1];
- bool losesInfo;
- APFloat::opStatus ConversionStatus = TruncVal.convert(
- DestTy->castTo<BuiltinFloatType>()->getAPFloatSemantics(),
- APFloat::rmNearestTiesToEven, &losesInfo);
- SILLocation Loc = BI->getLoc();
-
- // Check if conversion was successful.
- if (ConversionStatus != APFloat::opStatus::opOK &&
- ConversionStatus != APFloat::opStatus::opInexact) {
- return nullptr;
- }
-
- // The call to the builtin should be replaced with the constant value.
- SILBuilderWithScope B(BI);
- return B.createFloatLiteral(Loc, BI->getType(), TruncVal);
- }
-
- case BuiltinValueKind::AssumeNonNegative: {
- auto *V = dyn_cast<IntegerLiteralInst>(Args[0]);
- if (!V)
- return nullptr;
-
- APInt VInt = V->getValue();
- if (VInt.isNegative() && ResultsInError.hasValue()) {
- diagnose(M.getASTContext(), BI->getLoc().getSourceLoc(),
- diag::wrong_non_negative_assumption,
- VInt.toString(/*Radix*/ 10, /*Signed*/ true));
- ResultsInError = Optional<bool>(true);
- }
- return V;
- }
- }
- return nullptr;
-}
-
-static SILValue constantFoldInstruction(SILInstruction &I,
- Optional<bool> &ResultsInError) {
- // Constant fold function calls.
- if (auto *BI = dyn_cast<BuiltinInst>(&I)) {
- return constantFoldBuiltin(BI, ResultsInError);
- }
-
- // Constant fold extraction of a constant element.
- if (auto *TEI = dyn_cast<TupleExtractInst>(&I)) {
- if (auto *TheTuple = dyn_cast<TupleInst>(TEI->getOperand()))
- return TheTuple->getElement(TEI->getFieldNo());
- }
-
- // Constant fold extraction of a constant struct element.
- if (auto *SEI = dyn_cast<StructExtractInst>(&I)) {
- if (auto *Struct = dyn_cast<StructInst>(SEI->getOperand()))
- return Struct->getOperandForField(SEI->getField())->get();
- }
-
- // Constant fold indexing insts of a 0 integer literal.
- if (auto *II = dyn_cast<IndexingInst>(&I))
- if (auto *IntLiteral = dyn_cast<IntegerLiteralInst>(II->getIndex()))
- if (!IntLiteral->getValue())
- return II->getBase();
-
- return SILValue();
-}
-
-static bool isApplyOfBuiltin(SILInstruction &I, BuiltinValueKind kind) {
- if (auto *BI = dyn_cast<BuiltinInst>(&I))
- if (BI->getBuiltinInfo().ID == kind)
- return true;
- return false;
-}
-
-static bool isApplyOfStringConcat(SILInstruction &I) {
- if (auto *AI = dyn_cast<ApplyInst>(&I))
- if (auto *Fn = AI->getReferencedFunction())
- if (Fn->hasSemanticsAttr("string.concat"))
- return true;
- return false;
-}
-
-static bool isFoldable(SILInstruction *I) {
- return isa<IntegerLiteralInst>(I) || isa<FloatLiteralInst>(I);
-}
-
-static bool
-constantFoldStringConcatenation(ApplyInst *AI,
- llvm::SetVector<SILInstruction *> &WorkList) {
- SILBuilder B(AI);
- // Try to apply the string literal concatenation optimization.
- auto *Concatenated = tryToConcatenateStrings(AI, B);
- // Bail if string literal concatenation could not be performed.
- if (!Concatenated)
- return false;
-
- // Replace all uses of the old instruction by a new instruction.
- AI->replaceAllUsesWith(Concatenated);
-
- auto RemoveCallback = [&](SILInstruction *DeadI) { WorkList.remove(DeadI); };
- // Remove operands that are not used anymore.
- // Even if they are apply_inst, it is safe to
- // do so, because they can only be applies
- // of functions annotated as string.utf16
- // or string.utf16.
- for (auto &Op : AI->getAllOperands()) {
- SILValue Val = Op.get();
- Op.drop();
- if (Val->use_empty()) {
- auto *DeadI = Val->getDefiningInstruction();
- assert(DeadI);
- recursivelyDeleteTriviallyDeadInstructions(DeadI, /*force*/ true,
- RemoveCallback);
- WorkList.remove(DeadI);
- }
- }
- // Schedule users of the new instruction for constant folding.
- // We only need to schedule the string.concat invocations.
- for (auto AIUse : Concatenated->getUses()) {
- if (isApplyOfStringConcat(*AIUse->getUser())) {
- WorkList.insert(AIUse->getUser());
- }
- }
- // Delete the old apply instruction.
- recursivelyDeleteTriviallyDeadInstructions(AI, /*force*/ true,
- RemoveCallback);
- return true;
-}
-
-/// Initialize the worklist to all of the constant instructions.
-static void initializeWorklist(SILFunction &F,
- bool InstantiateAssertConfiguration,
- llvm::SetVector<SILInstruction *> &WorkList) {
- for (auto &BB : F) {
- for (auto &I : BB) {
- if (isFoldable(&I) && I.hasUsesOfAnyResult()) {
- WorkList.insert(&I);
- continue;
- }
-
- if (InstantiateAssertConfiguration &&
- (isApplyOfBuiltin(I, BuiltinValueKind::AssertConf) ||
- isApplyOfBuiltin(I, BuiltinValueKind::CondUnreachable))) {
- WorkList.insert(&I);
- continue;
- }
-
- if (isa<CheckedCastBranchInst>(&I) ||
- isa<CheckedCastAddrBranchInst>(&I) ||
- isa<UnconditionalCheckedCastInst>(&I) ||
- isa<UnconditionalCheckedCastAddrInst>(&I)) {
- WorkList.insert(&I);
- continue;
- }
-
- if (!isApplyOfStringConcat(I)) {
- continue;
- }
- WorkList.insert(&I);
- }
- }
-}
-
-SILAnalysis::InvalidationKind
-processFunction(SILFunction &F, bool EnableDiagnostics,
- unsigned AssertConfiguration) {
- DEBUG(llvm::dbgs() << "*** ConstPropagation processing: " << F.getName()
- << "\n");
-
- // This is the list of traits that this transformation might preserve.
- bool InvalidateBranches = false;
- bool InvalidateCalls = false;
- bool InvalidateInstructions = false;
-
- // Should we replace calls to assert_configuration by the assert
- // configuration.
- bool InstantiateAssertConfiguration =
- (AssertConfiguration != SILOptions::DisableReplacement);
-
- // The list of instructions whose evaluation resulted in error or warning.
- // This is used to avoid duplicate error reporting in case we reach the same
- // instruction from different entry points in the WorkList.
- llvm::DenseSet<SILInstruction *> ErrorSet;
-
- // The worklist of the constants that could be folded into their users.
- llvm::SetVector<SILInstruction *> WorkList;
- initializeWorklist(F, InstantiateAssertConfiguration, WorkList);
-
- llvm::SetVector<SILInstruction *> FoldedUsers;
- CastOptimizer CastOpt(
- [&](SingleValueInstruction *I, ValueBase *V) { /* ReplaceInstUsesAction */
-
- InvalidateInstructions = true;
- I->replaceAllUsesWith(V);
- },
- [&](SILInstruction *I) { /* EraseAction */
- auto *TI = dyn_cast<TermInst>(I);
-
- if (TI) {
- // Invalidate analysis information related to branches. Replacing
- // unconditional_check_branch type instructions by a trap will also
- // invalidate branches/the CFG.
- InvalidateBranches = true;
- }
-
- InvalidateInstructions = true;
-
- WorkList.remove(I);
- I->eraseFromParent();
- });
-
- while (!WorkList.empty()) {
- SILInstruction *I = WorkList.pop_back_val();
- assert(I->getParent() && "SILInstruction must have parent.");
-
- DEBUG(llvm::dbgs() << "Visiting: " << *I);
-
- // Replace assert_configuration instructions by their constant value. We
- // want them to be replace even if we can't fully propagate the constant.
- if (InstantiateAssertConfiguration)
- if (auto *BI = dyn_cast<BuiltinInst>(I)) {
- if (isApplyOfBuiltin(*BI, BuiltinValueKind::AssertConf)) {
- // Instantiate the constant.
- SILBuilderWithScope B(BI);
- auto AssertConfInt = B.createIntegerLiteral(
- BI->getLoc(), BI->getType(), AssertConfiguration);
- BI->replaceAllUsesWith(AssertConfInt);
- // Schedule users for constant folding.
- WorkList.insert(AssertConfInt);
- // Delete the call.
- recursivelyDeleteTriviallyDeadInstructions(BI);
-
- InvalidateInstructions = true;
- continue;
- }
-
- // Kill calls to conditionallyUnreachable if we've folded assert
- // configuration calls.
- if (isApplyOfBuiltin(*BI, BuiltinValueKind::CondUnreachable)) {
- assert(BI->use_empty() && "use of conditionallyUnreachable?!");
- recursivelyDeleteTriviallyDeadInstructions(BI, /*force*/ true);
- InvalidateInstructions = true;
- continue;
- }
- }
-
- if (auto *AI = dyn_cast<ApplyInst>(I)) {
- // Apply may only come from a string.concat invocation.
- if (constantFoldStringConcatenation(AI, WorkList)) {
- // Invalidate all analysis that's related to the call graph.
- InvalidateInstructions = true;
- }
-
- continue;
- }
-
- if (isa<CheckedCastBranchInst>(I) || isa<CheckedCastAddrBranchInst>(I) ||
- isa<UnconditionalCheckedCastInst>(I) ||
- isa<UnconditionalCheckedCastAddrInst>(I)) {
- // Try to perform cast optimizations. Invalidation is handled by a
- // callback inside the cast optimizer.
- SILInstruction *Result = nullptr;
- switch(I->getKind()) {
- default:
- llvm_unreachable("Unexpected instruction for cast optimizations");
- case SILInstructionKind::CheckedCastBranchInst:
- Result = CastOpt.simplifyCheckedCastBranchInst(cast<CheckedCastBranchInst>(I));
- break;
- case SILInstructionKind::CheckedCastAddrBranchInst:
- Result = CastOpt.simplifyCheckedCastAddrBranchInst(cast<CheckedCastAddrBranchInst>(I));
- break;
- case SILInstructionKind::UnconditionalCheckedCastInst: {
- auto Value =
- CastOpt.optimizeUnconditionalCheckedCastInst(cast<UnconditionalCheckedCastInst>(I));
- if (Value) Result = Value->getDefiningInstruction();
- break;
- }
- case SILInstructionKind::UnconditionalCheckedCastAddrInst:
- Result = CastOpt.optimizeUnconditionalCheckedCastAddrInst(cast<UnconditionalCheckedCastAddrInst>(I));
- break;
- }
-
- if (Result) {
- if (isa<CheckedCastBranchInst>(Result) ||
- isa<CheckedCastAddrBranchInst>(Result) ||
- isa<UnconditionalCheckedCastInst>(Result) ||
- isa<UnconditionalCheckedCastAddrInst>(Result))
- WorkList.insert(Result);
- }
- continue;
- }
-
-
- // Go through all users of the constant and try to fold them.
- // TODO: MultiValueInstruction
- FoldedUsers.clear();
- for (auto Use : cast<SingleValueInstruction>(I)->getUses()) {
- SILInstruction *User = Use->getUser();
- DEBUG(llvm::dbgs() << " User: " << *User);
-
- // It is possible that we had processed this user already. Do not try
- // to fold it again if we had previously produced an error while folding
- // it. It is not always possible to fold an instruction in case of error.
- if (ErrorSet.count(User))
- continue;
-
- // Some constant users may indirectly cause folding of their users.
- if (isa<StructInst>(User) || isa<TupleInst>(User)) {
- WorkList.insert(User);
- continue;
- }
-
- // Always consider cond_fail instructions as potential for DCE. If the
- // expression feeding them is false, they are dead. We can't handle this
- // as part of the constant folding logic, because there is no value
- // they can produce (other than empty tuple, which is wasteful).
- if (isa<CondFailInst>(User))
- FoldedUsers.insert(User);
-
- // Initialize ResultsInError as a None optional.
- //
- // We are essentially using this optional to represent 3 states: true,
- // false, and n/a.
- Optional<bool> ResultsInError;
-
- // If we are asked to emit diagnostics, override ResultsInError with a
- // Some optional initialized to false.
- if (EnableDiagnostics)
- ResultsInError = false;
-
- // Try to fold the user. If ResultsInError is None, we do not emit any
- // diagnostics. If ResultsInError is some, we use it as our return value.
- SILValue C = constantFoldInstruction(*User, ResultsInError);
-
- // If we did not pass in a None and the optional is set to true, add the
- // user to our error set.
- if (ResultsInError.hasValue() && ResultsInError.getValue())
- ErrorSet.insert(User);
-
- // We failed to constant propagate... continue...
- if (!C)
- continue;
-
- // We can currently only do this constant-folding of single-value
- // instructions.
- auto UserV = cast<SingleValueInstruction>(User);
-
- // Ok, we have succeeded. Add user to the FoldedUsers list and perform the
- // necessary cleanups, RAUWs, etc.
- FoldedUsers.insert(User);
- ++NumInstFolded;
-
- InvalidateInstructions = true;
-
- // If the constant produced a tuple, be smarter than RAUW: explicitly nuke
- // any tuple_extract instructions using the apply. This is a common case
- // for functions returning multiple values.
- if (auto *TI = dyn_cast<TupleInst>(C)) {
- for (auto UI = UserV->use_begin(), E = UserV->use_end(); UI != E;) {
- Operand *O = *UI++;
-
- // If the user is a tuple_extract, just substitute the right value in.
- if (auto *TEI = dyn_cast<TupleExtractInst>(O->getUser())) {
- SILValue NewVal = TI->getOperand(TEI->getFieldNo());
- TEI->replaceAllUsesWith(NewVal);
- TEI->dropAllReferences();
- FoldedUsers.insert(TEI);
- if (auto *Inst = NewVal->getDefiningInstruction())
- WorkList.insert(Inst);
- }
- }
-
- if (UserV->use_empty())
- FoldedUsers.insert(TI);
- }
-
-
- // We were able to fold, so all users should use the new folded value.
- UserV->replaceAllUsesWith(C);
-
- // The new constant could be further folded now, add it to the worklist.
- if (auto *Inst = C->getDefiningInstruction())
- WorkList.insert(Inst);
- }
-
- // Eagerly DCE. We do this after visiting all users to ensure we don't
- // invalidate the uses iterator.
- ArrayRef<SILInstruction *> UserArray = FoldedUsers.getArrayRef();
- if (!UserArray.empty()) {
- InvalidateInstructions = true;
- }
-
- recursivelyDeleteTriviallyDeadInstructions(UserArray, false,
- [&](SILInstruction *DeadI) {
- WorkList.remove(DeadI);
- });
- }
-
- // TODO: refactor this code outside of the method. Passes should not merge
- // invalidation kinds themselves.
- using InvalidationKind = SILAnalysis::InvalidationKind;
-
- unsigned Inv = InvalidationKind::Nothing;
- if (InvalidateInstructions) Inv |= (unsigned) InvalidationKind::Instructions;
- if (InvalidateCalls) Inv |= (unsigned) InvalidationKind::Calls;
- if (InvalidateBranches) Inv |= (unsigned) InvalidationKind::Branches;
- return InvalidationKind(Inv);
-}
//===----------------------------------------------------------------------===//
// Top Level Driver
@@ -1286,8 +33,9 @@
/// The entry point to the transformation.
void run() override {
- auto Invalidation = processFunction(*getFunction(), EnableDiagnostics,
- getOptions().AssertConfig);
+ ConstantFolder Folder(getOptions().AssertConfig, EnableDiagnostics);
+ Folder.initializeWorklist(*getFunction());
+ auto Invalidation = Folder.processWorkList();
if (Invalidation != SILAnalysis::InvalidationKind::Nothing) {
invalidateAnalysis(Invalidation);
diff --git a/lib/SILOptimizer/PassManager/PassPipeline.cpp b/lib/SILOptimizer/PassManager/PassPipeline.cpp
index 408d88c..d4fabca 100644
--- a/lib/SILOptimizer/PassManager/PassPipeline.cpp
+++ b/lib/SILOptimizer/PassManager/PassPipeline.cpp
@@ -404,6 +404,9 @@
P.addReleaseDevirtualizer();
addSSAPasses(P, OptimizationLevelKind::LowLevel);
+
+ P.addDeadObjectElimination();
+ P.addObjectOutliner();
P.addDeadStoreElimination();
// We've done a lot of optimizations on this function, attempt to FSO.
diff --git a/lib/SILOptimizer/Transforms/ARCCodeMotion.cpp b/lib/SILOptimizer/Transforms/ARCCodeMotion.cpp
index f8642e2..a1bd2d5 100644
--- a/lib/SILOptimizer/Transforms/ARCCodeMotion.cpp
+++ b/lib/SILOptimizer/Transforms/ARCCodeMotion.cpp
@@ -878,6 +878,7 @@
bool ReleaseCodeMotionContext::performCodeMotion() {
bool Changed = false;
+ SmallVector<SILInstruction *, 8> NewReleases;
// Create the new releases at each anchor point.
for (auto RC : RCRootVault) {
auto Iter = InsertPoints.find(RC);
@@ -888,10 +889,12 @@
// point. Check if the successor instruction is reusable, reuse it, do
// not insert new instruction and delete old one.
if (auto I = getPrevReusableInst(IP, Iter->first)) {
- RCInstructions.erase(I);
+ if (RCInstructions.erase(I))
+ NewReleases.push_back(I);
continue;
}
- createDecrementBefore(Iter->first, IP);
+ if (SILInstruction *I = createDecrementBefore(Iter->first, IP).getPtrOrNull())
+ NewReleases.push_back(I);
Changed = true;
}
}
@@ -900,6 +903,23 @@
++NumReleasesHoisted;
recursivelyDeleteTriviallyDeadInstructions(R, true);
}
+
+ // Eliminate pairs of retain-release if they are adjacent to each other and
+ // retain/release the same RCRoot, e.g.
+ // strong_retain %2
+ // strong_release %2
+ for (SILInstruction *ReleaseInst : NewReleases) {
+ auto InstIter = ReleaseInst->getIterator();
+ if (InstIter == ReleaseInst->getParent()->begin())
+ continue;
+
+ SILInstruction *PrevInst = &*std::prev(InstIter);
+ if (isRetainInstruction(PrevInst) && getRCRoot(PrevInst) == getRCRoot(ReleaseInst)) {
+ recursivelyDeleteTriviallyDeadInstructions(PrevInst, true);
+ recursivelyDeleteTriviallyDeadInstructions(ReleaseInst, true);
+ }
+ }
+
return Changed;
}
diff --git a/lib/SILOptimizer/Transforms/CMakeLists.txt b/lib/SILOptimizer/Transforms/CMakeLists.txt
index fce464c..0f454d8 100644
--- a/lib/SILOptimizer/Transforms/CMakeLists.txt
+++ b/lib/SILOptimizer/Transforms/CMakeLists.txt
@@ -16,6 +16,7 @@
Transforms/MergeCondFail.cpp
Transforms/MarkUninitializedFixup.cpp
Transforms/Outliner.cpp
+ Transforms/ObjectOutliner.cpp
Transforms/OwnershipModelEliminator.cpp
Transforms/PerformanceInliner.cpp
Transforms/RedundantLoadElimination.cpp
diff --git a/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp b/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp
index bc29f7a..adcf14f 100644
--- a/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp
+++ b/lib/SILOptimizer/Transforms/DeadObjectElimination.cpp
@@ -193,11 +193,16 @@
/// Returns false if Inst is an instruction that would require us to keep the
/// alloc_ref alive.
-static bool canZapInstruction(SILInstruction *Inst) {
+static bool canZapInstruction(SILInstruction *Inst, bool acceptRefCountInsts) {
+ if (isa<SetDeallocatingInst>(Inst) || isa<FixLifetimeInst>(Inst))
+ return true;
+
// It is ok to eliminate various retains/releases. We are either removing
// everything or nothing.
- if (isa<RefCountingInst>(Inst) || isa<StrongPinInst>(Inst))
- return true;
+ if (isa<RefCountingInst>(Inst) || isa<StrongPinInst>(Inst) ||
+ // dealloc_partial_ref invokes releases implicitly
+ isa<DeallocPartialRefInst>(Inst))
+ return acceptRefCountInsts;
// If we see a store here, we have already checked that we are storing into
// the pointer before we added it to the worklist, so we can skip it.
@@ -227,7 +232,8 @@
/// Analyze the use graph of AllocRef for any uses that would prevent us from
/// zapping it completely.
static bool
-hasUnremovableUsers(SILInstruction *AllocRef, UserList &Users) {
+hasUnremovableUsers(SILInstruction *AllocRef, UserList &Users,
+ bool acceptRefCountInsts) {
SmallVector<SILInstruction *, 16> Worklist;
Worklist.push_back(AllocRef);
@@ -246,7 +252,7 @@
}
// If we can't zap this instruction... bail...
- if (!canZapInstruction(I)) {
+ if (!canZapInstruction(I, acceptRefCountInsts)) {
DEBUG(llvm::dbgs() << " Found instruction we can't zap...\n");
return true;
}
@@ -695,15 +701,11 @@
DestructorAnalysisCache[Type] = HasSideEffects;
}
- if (HasSideEffects) {
- DEBUG(llvm::dbgs() << " Destructor had side effects. \n");
- return false;
- }
-
// Our destructor has no side effects, so if we can prove that no loads
// escape, then we can completely remove the use graph of this alloc_ref.
UserList UsersToRemove;
- if (hasUnremovableUsers(ARI, UsersToRemove)) {
+ if (hasUnremovableUsers(ARI, UsersToRemove,
+ /*acceptRefCountInsts=*/ !HasSideEffects)) {
DEBUG(llvm::dbgs() << " Found a use that cannot be zapped...\n");
return false;
}
@@ -723,7 +725,7 @@
return false;
UserList UsersToRemove;
- if (hasUnremovableUsers(ASI, UsersToRemove)) {
+ if (hasUnremovableUsers(ASI, UsersToRemove, /*acceptRefCountInsts=*/ true)) {
DEBUG(llvm::dbgs() << " Found a use that cannot be zapped...\n");
return false;
}
diff --git a/lib/SILOptimizer/Transforms/ObjectOutliner.cpp b/lib/SILOptimizer/Transforms/ObjectOutliner.cpp
new file mode 100644
index 0000000..bc7846a
--- /dev/null
+++ b/lib/SILOptimizer/Transforms/ObjectOutliner.cpp
@@ -0,0 +1,489 @@
+//===--- ObjectOutliner.cpp - Outline heap objects -----------------------===//
+//
+// This source file is part of the Swift.org open source project
+//
+// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
+// Licensed under Apache License v2.0 with Runtime Library Exception
+//
+// See https://swift.org/LICENSE.txt for license information
+// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "objectoutliner"
+#include "swift/SIL/DebugUtils.h"
+#include "swift/SIL/SILBuilder.h"
+#include "swift/SILOptimizer/PassManager/Transforms.h"
+#include "swift/SILOptimizer/Utils/Local.h"
+#include "swift/AST/ASTMangler.h"
+#include "llvm/Support/Debug.h"
+using namespace swift;
+
+namespace {
+
+class ObjectOutliner {
+ NominalTypeDecl *ArrayDecl = nullptr;
+ int GlobIdx = 0;
+
+ bool isCOWType(SILType type) {
+ return type.getNominalOrBoundGenericNominal() == ArrayDecl;
+ }
+
+ bool isValidUseOfObject(SILInstruction *Val, bool isCOWObject,
+ ApplyInst **FindStringCall = nullptr);
+
+ bool getObjectInitVals(SILValue Val,
+ llvm::DenseMap<VarDecl *, StoreInst *> &MemberStores,
+ llvm::SmallVectorImpl<StoreInst *> &TailStores,
+ ApplyInst **FindStringCall);
+ bool handleTailAddr(int TailIdx, SILInstruction *I,
+ llvm::SmallVectorImpl<StoreInst *> &TailStores);
+
+ bool
+ optimizeObjectAllocation(AllocRefInst *ARI,
+ llvm::SmallVector<SILInstruction *, 4> &ToRemove);
+ void replaceFindStringCall(ApplyInst *FindStringCall);
+
+public:
+ ObjectOutliner(NominalTypeDecl *ArrayDecl) : ArrayDecl(ArrayDecl) { }
+
+ bool run(SILFunction *F);
+};
+
+bool ObjectOutliner::run(SILFunction *F) {
+ bool hasChanged = false;
+
+ for (auto &BB : *F) {
+ auto Iter = BB.begin();
+
+ // We can't remove instructions willy-nilly as we iterate because
+ // that might cause a pointer to the next instruction to become
+ // garbage, causing iterator invalidations (and crashes).
+ // Instead, we collect in a list the instructions we want to remove
+ // and erase the BB they belong to at the end of the loop, once we're
+ // sure it's safe to do so.
+ llvm::SmallVector<SILInstruction *, 4> ToRemove;
+
+ while (Iter != BB.end()) {
+ SILInstruction *I = &*Iter;
+ Iter++;
+ if (auto *ARI = dyn_cast<AllocRefInst>(I)) {
+ hasChanged |= optimizeObjectAllocation(ARI, ToRemove);
+ }
+ }
+ for (auto *I : ToRemove)
+ I->eraseFromParent();
+ }
+ return hasChanged;
+}
+
+/// Get all stored properties of a class, including it's super classes.
+static void getFields(ClassDecl *Cl, SmallVectorImpl<VarDecl *> &Fields) {
+ if (ClassDecl *SuperCl = Cl->getSuperclassDecl()) {
+ getFields(SuperCl, Fields);
+ }
+ for (VarDecl *Field : Cl->getStoredProperties()) {
+ Fields.push_back(Field);
+ }
+}
+
+/// Check if \p V is a valid instruction for a static initializer, including
+/// all its operands.
+static bool isValidInitVal(SILValue V) {
+ if (auto I = dyn_cast<SingleValueInstruction>(V)) {
+ if (!SILGlobalVariable::isValidStaticInitializerInst(I, I->getModule()))
+ return false;
+
+ for (Operand &Op : I->getAllOperands()) {
+ if (!isValidInitVal(Op.get()))
+ return false;
+ }
+ return true;
+ }
+ return false;
+}
+
+/// Check if a use of an object may prevent outlining the object.
+///
+/// If \p isCOWObject is true, then the object reference is wrapped into a
+/// COW container. Currently this is just Array<T>.
+/// If a use is a call to the findStringSwitchCase semantic call, the apply
+/// is returned in \p FindStringCall.
+bool ObjectOutliner::isValidUseOfObject(SILInstruction *I, bool isCOWObject,
+ ApplyInst **FindStringCall) {
+ switch (I->getKind()) {
+ case SILInstructionKind::DebugValueAddrInst:
+ case SILInstructionKind::DebugValueInst:
+ case SILInstructionKind::LoadInst:
+ case SILInstructionKind::DeallocRefInst:
+ case SILInstructionKind::StrongRetainInst:
+ case SILInstructionKind::StrongReleaseInst:
+ case SILInstructionKind::FixLifetimeInst:
+ case SILInstructionKind::SetDeallocatingInst:
+ return true;
+
+ case SILInstructionKind::ReturnInst:
+ case SILInstructionKind::TryApplyInst:
+ case SILInstructionKind::PartialApplyInst:
+ case SILInstructionKind::StoreInst:
+ /// We don't have a representation for COW objects in SIL, so we do some
+ /// ad-hoc testing: We can ignore uses of a COW object if any use after
+ /// this will do a uniqueness checking before the object is modified.
+ return isCOWObject;
+
+ case SILInstructionKind::ApplyInst:
+ if (!isCOWObject)
+ return false;
+ // There should only be a single call to findStringSwitchCase. But even
+ // if there are multiple calls, it's not problem - we'll just optimize the
+ // last one we find.
+ if (cast<ApplyInst>(I)->hasSemantics("findStringSwitchCase"))
+ *FindStringCall = cast<ApplyInst>(I);
+ return true;
+
+ case SILInstructionKind::StructInst:
+ if (isCOWType(cast<StructInst>(I)->getType())) {
+ // The object is wrapped into a COW container.
+ isCOWObject = true;
+ }
+ break;
+
+ case SILInstructionKind::UncheckedRefCastInst:
+ case SILInstructionKind::StructElementAddrInst:
+ case SILInstructionKind::AddressToPointerInst:
+ assert(!isCOWObject && "instruction cannot have a COW object as operand");
+ break;
+
+ case SILInstructionKind::TupleInst:
+ case SILInstructionKind::TupleExtractInst:
+ case SILInstructionKind::EnumInst:
+ break;
+
+ case SILInstructionKind::StructExtractInst:
+ // To be on the safe side we don't consider the object as COW if it is
+ // extracted again from the COW container: the uniqueness check may be
+ // optimized away in this case.
+ isCOWObject = false;
+ break;
+
+ case SILInstructionKind::BuiltinInst: {
+ // Handle the case for comparing addresses. This occurs when the Array
+ // comparison function is inlined.
+ auto *BI = cast<BuiltinInst>(I);
+ BuiltinValueKind K = BI->getBuiltinInfo().ID;
+ if (K == BuiltinValueKind::ICMP_EQ || K == BuiltinValueKind::ICMP_NE)
+ return true;
+ return false;
+ }
+
+ default:
+ return false;
+ }
+
+ auto SVI = cast<SingleValueInstruction>(I);
+ for (Operand *Use : getNonDebugUses(SVI)) {
+ if (!isValidUseOfObject(Use->getUser(), isCOWObject, FindStringCall))
+ return false;
+ }
+ return true;
+}
+
+/// Handle the address of a tail element.
+bool ObjectOutliner::handleTailAddr(int TailIdx, SILInstruction *TailAddr,
+ llvm::SmallVectorImpl<StoreInst *> &TailStores) {
+ if (TailIdx >= 0 && TailIdx < (int)TailStores.size()) {
+ if (auto *SI = dyn_cast<StoreInst>(TailAddr)) {
+ if (!isValidInitVal(SI->getSrc()) || TailStores[TailIdx])
+ return false;
+ TailStores[TailIdx] = SI;
+ return true;
+ }
+ }
+ return isValidUseOfObject(TailAddr, /*isCOWObject*/false);
+}
+
+/// Get the init values for an object's stored properties and its tail elements.
+bool ObjectOutliner::getObjectInitVals(SILValue Val,
+ llvm::DenseMap<VarDecl *, StoreInst *> &MemberStores,
+ llvm::SmallVectorImpl<StoreInst *> &TailStores,
+ ApplyInst **FindStringCall) {
+ for (Operand *Use : Val->getUses()) {
+ SILInstruction *User = Use->getUser();
+ if (auto *UC = dyn_cast<UpcastInst>(User)) {
+ // Upcast is transparent.
+ if (!getObjectInitVals(UC, MemberStores, TailStores, FindStringCall))
+ return false;
+ } else if (auto *REA = dyn_cast<RefElementAddrInst>(User)) {
+ // The address of a stored property.
+ for (Operand *ElemAddrUse : REA->getUses()) {
+ SILInstruction *ElemAddrUser = ElemAddrUse->getUser();
+ if (auto *SI = dyn_cast<StoreInst>(ElemAddrUser)) {
+ if (!isValidInitVal(SI->getSrc()) || MemberStores[REA->getField()])
+ return false;
+ MemberStores[REA->getField()] = SI;
+ } else if (!isValidUseOfObject(ElemAddrUser, /*isCOWObject*/false)) {
+ return false;
+ }
+ }
+ } else if (auto *RTA = dyn_cast<RefTailAddrInst>(User)) {
+ // The address of a tail element.
+ for (Operand *TailUse : RTA->getUses()) {
+ SILInstruction *TailUser = TailUse->getUser();
+ if (auto *IA = dyn_cast<IndexAddrInst>(TailUser)) {
+ // An index_addr yields the address of any tail element. Only if the
+ // second operand (the index) is an integer literal we can figure out
+ // which tail element is refereneced.
+ int TailIdx = -1;
+ if (auto *Index = dyn_cast<IntegerLiteralInst>(IA->getIndex()))
+ TailIdx = Index->getValue().getZExtValue();
+
+ for (Operand *IAUse : IA->getUses()) {
+ if (!handleTailAddr(TailIdx, IAUse->getUser(), TailStores))
+ return false;
+ }
+ // Without an index_addr it's the first tail element.
+ } else if (!handleTailAddr(/*TailIdx*/0, TailUser, TailStores)) {
+ return false;
+ }
+ }
+ } else if (!isValidUseOfObject(User, /*isCOWObject*/false, FindStringCall)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+class GlobalVariableMangler : public Mangle::ASTMangler {
+public:
+ std::string mangleOutlinedVariable(SILFunction *F, int &uniqueIdx) {
+ std::string GlobName;
+ do {
+ beginManglingWithoutPrefix();
+ appendOperator(F->getName());
+ appendOperator("Tv", Index(uniqueIdx++));
+ GlobName = finalize();
+ } while (F->getModule().lookUpGlobalVariable(GlobName));
+
+ return GlobName;
+ }
+};
+
+/// Try to convert an object allocation into a statically initialized object.
+///
+/// In general this works for any class, but in practice it will only kick in
+/// for array buffer objects. The use cases are array literals in a function.
+/// For example:
+/// func getarray() -> [Int] {
+/// return [1, 2, 3]
+/// }
+bool ObjectOutliner::optimizeObjectAllocation(
+ AllocRefInst *ARI, llvm::SmallVector<SILInstruction *, 4> &ToRemove) {
+
+ if (ARI->isObjC())
+ return false;
+
+ // Check how many tail allocated elements are on the object.
+ ArrayRef<Operand> TailCounts = ARI->getTailAllocatedCounts();
+ SILType TailType;
+ unsigned NumTailElems = 0;
+ if (TailCounts.size() > 0) {
+ // We only support a single tail allocated array.
+ if (TailCounts.size() > 1)
+ return false;
+ // The number of tail allocated elements must be constant.
+ if (auto *ILI = dyn_cast<IntegerLiteralInst>(TailCounts[0].get())) {
+ if (ILI->getValue().getActiveBits() > 20)
+ return false;
+ NumTailElems = ILI->getValue().getZExtValue();
+ TailType = ARI->getTailAllocatedTypes()[0];
+ } else {
+ return false;
+ }
+ }
+ SILType Ty = ARI->getType();
+ ClassDecl *Cl = Ty.getClassOrBoundGenericClass();
+ if (!Cl)
+ return false;
+ llvm::SmallVector<VarDecl *, 16> Fields;
+ getFields(Cl, Fields);
+
+ // Get the initialization stores of the object's properties and tail
+ // allocated elements. Also check if there are any "bad" uses of the object.
+ llvm::DenseMap<VarDecl *, StoreInst *> MemberStores;
+ llvm::SmallVector<StoreInst *, 16> TailStores;
+ TailStores.resize(NumTailElems);
+ ApplyInst *FindStringCall = nullptr;
+ if (!getObjectInitVals(ARI, MemberStores, TailStores, &FindStringCall))
+ return false;
+
+ // Is there a store for all the class properties?
+ if (MemberStores.size() != Fields.size())
+ return false;
+
+ // Is there a store for all tail allocated elements?
+ for (auto V : TailStores) {
+ if (!V)
+ return false;
+ }
+
+ DEBUG(llvm::dbgs() << "Outline global variable in " <<
+ ARI->getFunction()->getName() << '\n');
+
+ SILModule *Module = &ARI->getFunction()->getModule();
+ assert(!Cl->isResilient(Module->getSwiftModule(),
+ ResilienceExpansion::Minimal) &&
+ "constructor call of resilient class should prevent static allocation");
+
+ // Create a name for the outlined global variable.
+ GlobalVariableMangler Mangler;
+ std::string GlobName =
+ Mangler.mangleOutlinedVariable(ARI->getFunction(), GlobIdx);
+
+ SILGlobalVariable *Glob =
+ SILGlobalVariable::create(*Module, SILLinkage::Private, IsNotSerialized,
+ GlobName, ARI->getType());
+
+ // Schedule all init values for cloning into the initializer of Glob.
+ StaticInitCloner Cloner(Glob);
+ for (VarDecl *Field : Fields) {
+ StoreInst *MemberStore = MemberStores[Field];
+ Cloner.add(cast<SingleValueInstruction>(MemberStore->getSrc()));
+ }
+ for (StoreInst *TailStore : TailStores) {
+ Cloner.add(cast<SingleValueInstruction>(TailStore->getSrc()));
+ }
+
+ // Create the class property initializers
+ llvm::SmallVector<SILValue, 16> ObjectArgs;
+ for (VarDecl *Field : Fields) {
+ StoreInst *MemberStore = MemberStores[Field];
+ assert(MemberStore);
+ ObjectArgs.push_back(Cloner.clone(
+ cast<SingleValueInstruction>(MemberStore->getSrc())));
+ ToRemove.push_back(MemberStore);
+ }
+ // Create the initializers for the tail elements.
+ unsigned NumBaseElements = ObjectArgs.size();
+ for (StoreInst *TailStore : TailStores) {
+ ObjectArgs.push_back(Cloner.clone(
+ cast<SingleValueInstruction>(TailStore->getSrc())));
+ ToRemove.push_back(TailStore);
+ }
+ // Create the initializer for the object itself.
+ SILBuilder StaticInitBuilder(Glob);
+ StaticInitBuilder.createObject(ArtificialUnreachableLocation(),
+ ARI->getType(), ObjectArgs, NumBaseElements);
+
+ // Replace the alloc_ref by global_value + strong_retain instructions.
+ SILBuilder B(ARI);
+ GlobalValueInst *GVI = B.createGlobalValue(ARI->getLoc(), Glob);
+ B.createStrongRetain(ARI->getLoc(), GVI, B.getDefaultAtomicity());
+ llvm::SmallVector<Operand *, 8> Worklist(ARI->use_begin(), ARI->use_end());
+ while (!Worklist.empty()) {
+ auto *Use = Worklist.pop_back_val();
+ SILInstruction *User = Use->getUser();
+ switch (User->getKind()) {
+ case SILInstructionKind::DeallocRefInst:
+ ToRemove.push_back(User);
+ break;
+ default:
+ Use->set(GVI);
+ }
+ }
+ if (FindStringCall && NumTailElems > 16) {
+ assert(&*std::next(ARI->getIterator()) != FindStringCall &&
+ "FindStringCall must not be the next instruction after ARI because "
+ "deleting it would invalidate the instruction iterator");
+ replaceFindStringCall(FindStringCall);
+ }
+
+ ToRemove.push_back(ARI);
+ return true;
+}
+
+/// Replaces a call to _findStringSwitchCase with a call to
+/// _findStringSwitchCaseWithCache which builds a cache (e.g. a Dictionary) and
+/// stores it into a global variable. Then subsequent calls to this function can
+/// do a fast lookup using the cache.
+void ObjectOutliner::replaceFindStringCall(ApplyInst *FindStringCall) {
+ // Find the replacement function in the swift stdlib.
+ SmallVector<ValueDecl *, 1> results;
+ SILModule *Module = &FindStringCall->getFunction()->getModule();
+ Module->getASTContext().lookupInSwiftModule("_findStringSwitchCaseWithCache",
+ results);
+ if (results.size() != 1)
+ return;
+
+ auto *FD = dyn_cast<FuncDecl>(results.front());
+ if (!FD)
+ return;
+
+ SILDeclRef declRef(FD, SILDeclRef::Kind::Func);
+ SILFunction *replacementFunc = Module->getOrCreateFunction(
+ FindStringCall->getLoc(), declRef, NotForDefinition);
+
+ SILFunctionType *FTy = replacementFunc->getLoweredFunctionType();
+ if (FTy->getNumParameters() != 3)
+ return;
+
+ SILType cacheType = FTy->getParameters()[2].getSILStorageType().getObjectType();
+ NominalTypeDecl *cacheDecl = cacheType.getNominalOrBoundGenericNominal();
+ if (!cacheDecl)
+ return;
+
+
+ assert(!cacheDecl->isResilient(Module->getSwiftModule(),
+ ResilienceExpansion::Minimal));
+
+ SILType wordTy = cacheType.getFieldType(
+ cacheDecl->getStoredProperties().front(), *Module);
+
+ GlobalVariableMangler Mangler;
+ std::string GlobName =
+ Mangler.mangleOutlinedVariable(FindStringCall->getFunction(), GlobIdx);
+
+ // Create an "opaque" global variable which is passed as inout to
+ // _findStringSwitchCaseWithCache and into which the function stores the
+ // "cache".
+ SILGlobalVariable *CacheVar =
+ SILGlobalVariable::create(*Module, SILLinkage::Private, IsNotSerialized,
+ GlobName, cacheType);
+
+ SILLocation Loc = FindStringCall->getLoc();
+ SILBuilder StaticInitBuilder(CacheVar);
+ auto *Zero = StaticInitBuilder.createIntegerLiteral(Loc, wordTy, 0);
+ StaticInitBuilder.createStruct(ArtificialUnreachableLocation(), cacheType,
+ {Zero, Zero});
+
+ SILBuilder B(FindStringCall);
+ GlobalAddrInst *CacheAddr = B.createGlobalAddr(FindStringCall->getLoc(),
+ CacheVar);
+ FunctionRefInst *FRI = B.createFunctionRef(FindStringCall->getLoc(),
+ replacementFunc);
+ ApplyInst *NewCall = B.createApply(FindStringCall->getLoc(), FRI,
+ FindStringCall->getSubstitutions(),
+ { FindStringCall->getArgument(0),
+ FindStringCall->getArgument(1),
+ CacheAddr },
+ FindStringCall->isNonThrowing());
+
+ FindStringCall->replaceAllUsesWith(NewCall);
+ FindStringCall->eraseFromParent();
+}
+
+class ObjectOutlinerPass : public SILFunctionTransform
+{
+ void run() override {
+ SILFunction *F = getFunction();
+ ObjectOutliner Outliner(F->getModule().getASTContext().getArrayDecl());
+ if (Outliner.run(F)) {
+ invalidateAnalysis(SILAnalysis::InvalidationKind::Instructions);
+ }
+ }
+};
+
+} // end anonymous namespace
+
+SILTransform *swift::createObjectOutliner() {
+ return new ObjectOutlinerPass();
+}
diff --git a/lib/SILOptimizer/Transforms/PerformanceInliner.cpp b/lib/SILOptimizer/Transforms/PerformanceInliner.cpp
index e7273a9..51d5536 100644
--- a/lib/SILOptimizer/Transforms/PerformanceInliner.cpp
+++ b/lib/SILOptimizer/Transforms/PerformanceInliner.cpp
@@ -97,6 +97,10 @@
/// specialization for a call.
GenericSpecializationBenefit = RemovedCallBenefit + 300,
+ /// The benefit of inlining class methods with -Osize.
+ /// We only inline very small class methods with -Osize.
+ OSizeClassMethodBenefit = 5,
+
/// Approximately up to this cost level a function can be inlined without
/// increasing the code size.
TrivialFunctionThreshold = 18,
@@ -243,6 +247,7 @@
int BaseBenefit = RemovedCallBenefit;
// Osize heuristic.
+ bool isClassMethodAtOsize = false;
if (OptMode == OptimizationMode::ForSize) {
// Don't inline into thunks.
if (AI.getFunction()->isThunk())
@@ -253,9 +258,8 @@
auto SelfTy = Callee->getLoweredFunctionType()->getSelfInstanceType();
if (SelfTy->mayHaveSuperclass() &&
Callee->getRepresentation() == SILFunctionTypeRepresentation::Method)
- return false;
+ isClassMethodAtOsize = true;
}
-
// Use command line option to control inlining in Osize mode.
const uint64_t CallerBaseBenefitReductionFactor = AI.getFunction()->getModule().getOptions().CallerBaseBenefitReductionFactor;
BaseBenefit = BaseBenefit / CallerBaseBenefitReductionFactor;
@@ -263,8 +267,13 @@
// It is always OK to inline a simple call.
// TODO: May be consider also the size of the callee?
- if (isPureCall(AI, SEA))
+ if (isPureCall(AI, SEA)) {
+ DEBUG(
+ dumpCaller(AI.getFunction());
+ llvm::dbgs() << " pure-call decision " << Callee->getName() << '\n';
+ );
return true;
+ }
// Bail out if this generic call can be optimized by means of
// the generic specialization, because we prefer generic specialization
@@ -294,7 +303,6 @@
}
CallerWeight.updateBenefit(Benefit, BaseBenefit);
- // Benefit = 1;
// Go through all blocks of the function, accumulate the cost and find
// benefits.
@@ -432,6 +440,9 @@
return profileBasedDecision(AI, Benefit, Callee, CalleeCost,
NumCallerBlocks, bbIt);
}
+ if (isClassMethodAtOsize && Benefit > OSizeClassMethodBenefit) {
+ Benefit = OSizeClassMethodBenefit;
+ }
// This is the final inlining decision.
if (CalleeCost > Benefit) {
diff --git a/lib/SILOptimizer/Transforms/SimplifyCFG.cpp b/lib/SILOptimizer/Transforms/SimplifyCFG.cpp
index 89d8494..16d0269 100644
--- a/lib/SILOptimizer/Transforms/SimplifyCFG.cpp
+++ b/lib/SILOptimizer/Transforms/SimplifyCFG.cpp
@@ -26,6 +26,7 @@
#include "swift/SILOptimizer/Utils/CFG.h"
#include "swift/SILOptimizer/Utils/CastOptimizer.h"
#include "swift/SILOptimizer/Utils/Local.h"
+#include "swift/SILOptimizer/Utils/ConstantFolding.h"
#include "swift/SILOptimizer/Utils/SILInliner.h"
#include "swift/SILOptimizer/Utils/SILSSAUpdater.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -75,13 +76,25 @@
// Dominance and post-dominance info for the current function
DominanceInfo *DT = nullptr;
+ ConstantFolder ConstFolder;
+
+ void constFoldingCallback(SILInstruction *I) {
+ // If a terminal instruction gets constant folded (like cond_br), it
+ // enables further simplify-CFG optimizations.
+ if (isa<TermInst>(I))
+ addToWorklist(I->getParent());
+ }
+
bool ShouldVerify;
bool EnableJumpThread;
public:
SimplifyCFG(SILFunction &Fn, SILPassManager *PM, bool Verify,
bool EnableJumpThread)
- : Fn(Fn), PM(PM), ShouldVerify(Verify),
- EnableJumpThread(EnableJumpThread) {}
+ : Fn(Fn), PM(PM),
+ ConstFolder(PM->getOptions().AssertConfig,
+ /* EnableDiagnostics */false,
+ [&](SILInstruction *I) { constFoldingCallback(I); }),
+ ShouldVerify(Verify), EnableJumpThread(EnableJumpThread) {}
bool run();
@@ -1092,11 +1105,15 @@
/// result in exposing opportunities for CFG simplification.
bool SimplifyCFG::simplifyBranchOperands(OperandValueArrayRef Operands) {
bool Simplified = false;
- for (auto O = Operands.begin(), E = Operands.end(); O != E; ++O)
+ for (auto O = Operands.begin(), E = Operands.end(); O != E; ++O) {
// All of our interesting simplifications are on single-value instructions
// for now.
- if (auto *I = dyn_cast<SingleValueInstruction>(*O))
- if (SILValue Result = simplifyInstruction(I)) {
+ if (auto *I = dyn_cast<SingleValueInstruction>(*O)) {
+ SILValue Result = simplifyInstruction(I);
+
+ // The Result can be the same instruction I in case it is in an
+ // unreachable block. In this case it can reference itself as operand.
+ if (Result && Result != I) {
DEBUG(llvm::dbgs() << "simplify branch operand " << *I);
I->replaceAllUsesWith(Result);
if (isInstructionTriviallyDead(I)) {
@@ -1104,6 +1121,8 @@
Simplified = true;
}
}
+ }
+ }
return Simplified;
}
@@ -1228,9 +1247,16 @@
// If there are any BB arguments in the destination, replace them with the
// branch operands, since they must dominate the dest block.
for (unsigned i = 0, e = BI->getArgs().size(); i != e; ++i) {
- if (DestBB->getArgument(i) != BI->getArg(i))
- DestBB->getArgument(i)->replaceAllUsesWith(BI->getArg(i));
- else {
+ if (DestBB->getArgument(i) != BI->getArg(i)) {
+ SILValue Val = BI->getArg(i);
+ DestBB->getArgument(i)->replaceAllUsesWith(Val);
+ if (auto *I = dyn_cast<SingleValueInstruction>(Val)) {
+ // Replacing operands may trigger constant folding which then could
+ // trigger other simplify-CFG optimizations.
+ ConstFolder.addToWorklist(I);
+ ConstFolder.processWorkList();
+ }
+ } else {
// We must be processing an unreachable part of the cfg with a cycle.
// bb1(arg1): // preds: bb3
// br bb2
diff --git a/lib/SILOptimizer/Utils/ConstantFolding.cpp b/lib/SILOptimizer/Utils/ConstantFolding.cpp
index a7f05bb..eb57919 100644
--- a/lib/SILOptimizer/Utils/ConstantFolding.cpp
+++ b/lib/SILOptimizer/Utils/ConstantFolding.cpp
@@ -12,6 +12,17 @@
#include "swift/SILOptimizer/Utils/ConstantFolding.h"
+#include "swift/AST/Expr.h"
+#include "swift/AST/DiagnosticsSIL.h"
+#include "swift/SIL/PatternMatch.h"
+#include "swift/SIL/SILBuilder.h"
+#include "swift/SILOptimizer/Utils/CastOptimizer.h"
+#include "swift/SILOptimizer/Utils/Local.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "constant-folding"
+
using namespace swift;
APInt swift::constantFoldBitOperation(APInt lhs, APInt rhs, BuiltinValueKind ID) {
@@ -118,3 +129,1241 @@
return val.sext(DestBitWidth);
}
}
+
+//===----------------------------------------------------------------------===//
+// ConstantFolder
+//===----------------------------------------------------------------------===//
+
+STATISTIC(NumInstFolded, "Number of constant folded instructions");
+
+template<typename...T, typename...U>
+static InFlightDiagnostic
+diagnose(ASTContext &Context, SourceLoc loc, Diag<T...> diag, U &&...args) {
+ return Context.Diags.diagnose(loc, diag, std::forward<U>(args)...);
+}
+
+/// \brief Construct (int, overflow) result tuple.
+static SILValue constructResultWithOverflowTuple(BuiltinInst *BI,
+ APInt Res, bool Overflow) {
+ // Get the SIL subtypes of the returned tuple type.
+ SILType FuncResType = BI->getType();
+ assert(FuncResType.castTo<TupleType>()->getNumElements() == 2);
+ SILType ResTy1 = FuncResType.getTupleElementType(0);
+ SILType ResTy2 = FuncResType.getTupleElementType(1);
+
+ // Construct the folded instruction - a tuple of two literals, the
+ // result and overflow.
+ SILBuilderWithScope B(BI);
+ SILLocation Loc = BI->getLoc();
+ SILValue Result[] = {
+ B.createIntegerLiteral(Loc, ResTy1, Res),
+ B.createIntegerLiteral(Loc, ResTy2, Overflow)
+ };
+ return B.createTuple(Loc, FuncResType, Result);
+}
+
+/// \brief Fold arithmetic intrinsics with overflow.
+static SILValue
+constantFoldBinaryWithOverflow(BuiltinInst *BI, llvm::Intrinsic::ID ID,
+ bool ReportOverflow,
+ Optional<bool> &ResultsInError) {
+ OperandValueArrayRef Args = BI->getArguments();
+ assert(Args.size() >= 2);
+
+ auto *Op1 = dyn_cast<IntegerLiteralInst>(Args[0]);
+ auto *Op2 = dyn_cast<IntegerLiteralInst>(Args[1]);
+
+ // If either Op1 or Op2 is not a literal, we cannot do anything.
+ if (!Op1 || !Op2)
+ return nullptr;
+
+ // Calculate the result.
+ APInt LHSInt = Op1->getValue();
+ APInt RHSInt = Op2->getValue();
+ bool Overflow;
+ APInt Res = constantFoldBinaryWithOverflow(LHSInt, RHSInt, Overflow, ID);
+
+ // If we can statically determine that the operation overflows,
+ // warn about it if warnings are not disabled by ResultsInError being null.
+ if (ResultsInError.hasValue() && Overflow && ReportOverflow) {
+ if (BI->getFunction()->isSpecialization()) {
+ // Do not report any constant propagation issues in specializations,
+ // because they are eventually not present in the original function.
+ return nullptr;
+ }
+ // Try to infer the type of the constant expression that the user operates
+ // on. If the intrinsic was lowered from a call to a function that takes
+ // two arguments of the same type, use the type of the LHS argument.
+ // This would detect '+'/'+=' and such.
+ Type OpType;
+ SILLocation Loc = BI->getLoc();
+ const ApplyExpr *CE = Loc.getAsASTNode<ApplyExpr>();
+ SourceRange LHSRange, RHSRange;
+ if (CE) {
+ const auto *Args = dyn_cast_or_null<TupleExpr>(CE->getArg());
+ if (Args && Args->getNumElements() == 2) {
+ // Look through inout types in order to handle += well.
+ CanType LHSTy = Args->getElement(0)->getType()->getInOutObjectType()->
+ getCanonicalType();
+ CanType RHSTy = Args->getElement(1)->getType()->getCanonicalType();
+ if (LHSTy == RHSTy)
+ OpType = Args->getElement(1)->getType();
+
+ LHSRange = Args->getElement(0)->getSourceRange();
+ RHSRange = Args->getElement(1)->getSourceRange();
+ }
+ }
+
+ bool Signed = false;
+ StringRef Operator = "+";
+
+ switch (ID) {
+ default: llvm_unreachable("Invalid case");
+ case llvm::Intrinsic::sadd_with_overflow:
+ Signed = true;
+ break;
+ case llvm::Intrinsic::uadd_with_overflow:
+ break;
+ case llvm::Intrinsic::ssub_with_overflow:
+ Operator = "-";
+ Signed = true;
+ break;
+ case llvm::Intrinsic::usub_with_overflow:
+ Operator = "-";
+ break;
+ case llvm::Intrinsic::smul_with_overflow:
+ Operator = "*";
+ Signed = true;
+ break;
+ case llvm::Intrinsic::umul_with_overflow:
+ Operator = "*";
+ break;
+ }
+
+ if (!OpType.isNull()) {
+ diagnose(BI->getModule().getASTContext(),
+ Loc.getSourceLoc(),
+ diag::arithmetic_operation_overflow,
+ LHSInt.toString(/*Radix*/ 10, Signed),
+ Operator,
+ RHSInt.toString(/*Radix*/ 10, Signed),
+ OpType).highlight(LHSRange).highlight(RHSRange);
+ } else {
+ // If we cannot get the type info in an expected way, describe the type.
+ diagnose(BI->getModule().getASTContext(),
+ Loc.getSourceLoc(),
+ diag::arithmetic_operation_overflow_generic_type,
+ LHSInt.toString(/*Radix*/ 10, Signed),
+ Operator,
+ RHSInt.toString(/*Radix*/ 10, Signed),
+ Signed,
+ LHSInt.getBitWidth()).highlight(LHSRange).highlight(RHSRange);
+ }
+ ResultsInError = Optional<bool>(true);
+ }
+
+ return constructResultWithOverflowTuple(BI, Res, Overflow);
+}
+
+static SILValue
+constantFoldBinaryWithOverflow(BuiltinInst *BI, BuiltinValueKind ID,
+ Optional<bool> &ResultsInError) {
+ OperandValueArrayRef Args = BI->getArguments();
+ auto *ShouldReportFlag = dyn_cast<IntegerLiteralInst>(Args[2]);
+ return constantFoldBinaryWithOverflow(BI,
+ getLLVMIntrinsicIDForBuiltinWithOverflow(ID),
+ ShouldReportFlag && (ShouldReportFlag->getValue() == 1),
+ ResultsInError);
+}
+
+static SILValue constantFoldIntrinsic(BuiltinInst *BI, llvm::Intrinsic::ID ID,
+ Optional<bool> &ResultsInError) {
+ switch (ID) {
+ default: break;
+ case llvm::Intrinsic::expect: {
+ // An expect of an integral constant is the constant itself.
+ assert(BI->getArguments().size() == 2 && "Expect should have 2 args.");
+ auto *Op1 = dyn_cast<IntegerLiteralInst>(BI->getArguments()[0]);
+ if (!Op1)
+ return nullptr;
+ return Op1;
+ }
+
+ case llvm::Intrinsic::ctlz: {
+ assert(BI->getArguments().size() == 2 && "Ctlz should have 2 args.");
+ OperandValueArrayRef Args = BI->getArguments();
+
+ // Fold for integer constant arguments.
+ auto *LHS = dyn_cast<IntegerLiteralInst>(Args[0]);
+ if (!LHS) {
+ return nullptr;
+ }
+ APInt LHSI = LHS->getValue();
+ unsigned LZ = 0;
+ // Check corner-case of source == zero
+ if (LHSI == 0) {
+ auto *RHS = dyn_cast<IntegerLiteralInst>(Args[1]);
+ if (!RHS || RHS->getValue() != 0) {
+ // Undefined
+ return nullptr;
+ }
+ LZ = LHSI.getBitWidth();
+ } else {
+ LZ = LHSI.countLeadingZeros();
+ }
+ APInt LZAsAPInt = APInt(LHSI.getBitWidth(), LZ);
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), LHS->getType(), LZAsAPInt);
+ }
+
+ case llvm::Intrinsic::sadd_with_overflow:
+ case llvm::Intrinsic::uadd_with_overflow:
+ case llvm::Intrinsic::ssub_with_overflow:
+ case llvm::Intrinsic::usub_with_overflow:
+ case llvm::Intrinsic::smul_with_overflow:
+ case llvm::Intrinsic::umul_with_overflow:
+ return constantFoldBinaryWithOverflow(BI, ID,
+ /* ReportOverflow */ false,
+ ResultsInError);
+ }
+ return nullptr;
+}
+
+static SILValue constantFoldCompare(BuiltinInst *BI, BuiltinValueKind ID) {
+ OperandValueArrayRef Args = BI->getArguments();
+
+ // Fold for integer constant arguments.
+ auto *LHS = dyn_cast<IntegerLiteralInst>(Args[0]);
+ auto *RHS = dyn_cast<IntegerLiteralInst>(Args[1]);
+ if (LHS && RHS) {
+ APInt Res = constantFoldComparison(LHS->getValue(), RHS->getValue(), ID);
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), Res);
+ }
+
+ using namespace swift::PatternMatch;
+
+ // Comparisons of an unsigned value with 0.
+ SILValue Other;
+ auto MatchNonNegative =
+ m_BuiltinInst(BuiltinValueKind::AssumeNonNegative, m_ValueBase());
+ if (match(BI, m_CombineOr(m_BuiltinInst(BuiltinValueKind::ICMP_ULT,
+ m_SILValue(Other), m_Zero()),
+ m_BuiltinInst(BuiltinValueKind::ICMP_UGT, m_Zero(),
+ m_SILValue(Other)))) ||
+ match(BI, m_CombineOr(m_BuiltinInst(BuiltinValueKind::ICMP_SLT,
+ MatchNonNegative, m_Zero()),
+ m_BuiltinInst(BuiltinValueKind::ICMP_SGT, m_Zero(),
+ MatchNonNegative)))) {
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt());
+ }
+
+ if (match(BI, m_CombineOr(m_BuiltinInst(BuiltinValueKind::ICMP_UGE,
+ m_SILValue(Other), m_Zero()),
+ m_BuiltinInst(BuiltinValueKind::ICMP_ULE, m_Zero(),
+ m_SILValue(Other)))) ||
+ match(BI, m_CombineOr(m_BuiltinInst(BuiltinValueKind::ICMP_SGE,
+ MatchNonNegative, m_Zero()),
+ m_BuiltinInst(BuiltinValueKind::ICMP_SLE, m_Zero(),
+ MatchNonNegative)))) {
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt(1, 1));
+ }
+
+ // Comparisons with Int.Max.
+ IntegerLiteralInst *IntMax;
+
+ // Check signed comparisons.
+ if (match(BI,
+ m_CombineOr(
+ // Int.max < x
+ m_BuiltinInst(BuiltinValueKind::ICMP_SLT,
+ m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
+ // x > Int.max
+ m_BuiltinInst(BuiltinValueKind::ICMP_SGT, m_SILValue(Other),
+ m_IntegerLiteralInst(IntMax)))) &&
+ IntMax->getValue().isMaxSignedValue()) {
+ // Any signed number should be <= then IntMax.
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt());
+ }
+
+ if (match(BI,
+ m_CombineOr(
+ m_BuiltinInst(BuiltinValueKind::ICMP_SGE,
+ m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
+ m_BuiltinInst(BuiltinValueKind::ICMP_SLE, m_SILValue(Other),
+ m_IntegerLiteralInst(IntMax)))) &&
+ IntMax->getValue().isMaxSignedValue()) {
+ // Any signed number should be <= then IntMax.
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt(1, 1));
+ }
+
+ // For any x of the same size as Int.max and n>=1 , (x>>n) is always <= Int.max,
+ // that is (x>>n) <= Int.max and Int.max >= (x>>n) are true.
+ if (match(BI,
+ m_CombineOr(
+ // Int.max >= x
+ m_BuiltinInst(BuiltinValueKind::ICMP_UGE,
+ m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
+ // x <= Int.max
+ m_BuiltinInst(BuiltinValueKind::ICMP_ULE, m_SILValue(Other),
+ m_IntegerLiteralInst(IntMax)),
+ // Int.max >= x
+ m_BuiltinInst(BuiltinValueKind::ICMP_SGE,
+ m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
+ // x <= Int.max
+ m_BuiltinInst(BuiltinValueKind::ICMP_SLE, m_SILValue(Other),
+ m_IntegerLiteralInst(IntMax)))) &&
+ IntMax->getValue().isMaxSignedValue()) {
+ // Check if other is a result of a logical shift right by a strictly
+ // positive number of bits.
+ IntegerLiteralInst *ShiftCount;
+ if (match(Other, m_BuiltinInst(BuiltinValueKind::LShr, m_ValueBase(),
+ m_IntegerLiteralInst(ShiftCount))) &&
+ ShiftCount->getValue().isStrictlyPositive()) {
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt(1, 1));
+ }
+ }
+
+ // At the same time (x>>n) > Int.max and Int.max < (x>>n) is false.
+ if (match(BI,
+ m_CombineOr(
+ // Int.max < x
+ m_BuiltinInst(BuiltinValueKind::ICMP_ULT,
+ m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
+ // x > Int.max
+ m_BuiltinInst(BuiltinValueKind::ICMP_UGT, m_SILValue(Other),
+ m_IntegerLiteralInst(IntMax)),
+ // Int.max < x
+ m_BuiltinInst(BuiltinValueKind::ICMP_SLT,
+ m_IntegerLiteralInst(IntMax), m_SILValue(Other)),
+ // x > Int.max
+ m_BuiltinInst(BuiltinValueKind::ICMP_SGT, m_SILValue(Other),
+ m_IntegerLiteralInst(IntMax)))) &&
+ IntMax->getValue().isMaxSignedValue()) {
+ // Check if other is a result of a logical shift right by a strictly
+ // positive number of bits.
+ IntegerLiteralInst *ShiftCount;
+ if (match(Other, m_BuiltinInst(BuiltinValueKind::LShr, m_ValueBase(),
+ m_IntegerLiteralInst(ShiftCount))) &&
+ ShiftCount->getValue().isStrictlyPositive()) {
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt());
+ }
+ }
+
+ // Fold x < 0 into false, if x is known to be a result of an unsigned
+ // operation with overflow checks enabled.
+ BuiltinInst *BIOp;
+ if (match(BI, m_BuiltinInst(BuiltinValueKind::ICMP_SLT,
+ m_TupleExtractInst(m_BuiltinInst(BIOp), 0),
+ m_Zero()))) {
+ // Check if Other is a result of an unsigned operation with overflow.
+ switch (BIOp->getBuiltinInfo().ID) {
+ default:
+ break;
+ case BuiltinValueKind::UAddOver:
+ case BuiltinValueKind::USubOver:
+ case BuiltinValueKind::UMulOver:
+ // Was it an operation with an overflow check?
+ if (match(BIOp->getOperand(2), m_One())) {
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt());
+ }
+ }
+ }
+
+ // Fold x >= 0 into true, if x is known to be a result of an unsigned
+ // operation with overflow checks enabled.
+ if (match(BI, m_BuiltinInst(BuiltinValueKind::ICMP_SGE,
+ m_TupleExtractInst(m_BuiltinInst(BIOp), 0),
+ m_Zero()))) {
+ // Check if Other is a result of an unsigned operation with overflow.
+ switch (BIOp->getBuiltinInfo().ID) {
+ default:
+ break;
+ case BuiltinValueKind::UAddOver:
+ case BuiltinValueKind::USubOver:
+ case BuiltinValueKind::UMulOver:
+ // Was it an operation with an overflow check?
+ if (match(BIOp->getOperand(2), m_One())) {
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), APInt(1, 1));
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+static SILValue
+constantFoldAndCheckDivision(BuiltinInst *BI, BuiltinValueKind ID,
+ Optional<bool> &ResultsInError) {
+ assert(ID == BuiltinValueKind::SDiv ||
+ ID == BuiltinValueKind::SRem ||
+ ID == BuiltinValueKind::UDiv ||
+ ID == BuiltinValueKind::URem);
+
+ OperandValueArrayRef Args = BI->getArguments();
+ SILModule &M = BI->getModule();
+
+ // Get the denominator.
+ auto *Denom = dyn_cast<IntegerLiteralInst>(Args[1]);
+ if (!Denom)
+ return nullptr;
+ APInt DenomVal = Denom->getValue();
+
+ // If the denominator is zero...
+ if (DenomVal == 0) {
+ // And if we are not asked to report errors, just return nullptr.
+ if (!ResultsInError.hasValue())
+ return nullptr;
+
+ // Otherwise emit a diagnosis error and set ResultsInError to true.
+ diagnose(M.getASTContext(), BI->getLoc().getSourceLoc(),
+ diag::division_by_zero);
+ ResultsInError = Optional<bool>(true);
+ return nullptr;
+ }
+
+ // Get the numerator.
+ auto *Num = dyn_cast<IntegerLiteralInst>(Args[0]);
+ if (!Num)
+ return nullptr;
+ APInt NumVal = Num->getValue();
+
+ bool Overflowed;
+ APInt ResVal = constantFoldDiv(NumVal, DenomVal, Overflowed, ID);
+
+ // If we overflowed...
+ if (Overflowed) {
+ // And we are not asked to produce diagnostics, just return nullptr...
+ if (!ResultsInError.hasValue())
+ return nullptr;
+
+ bool IsRem = ID == BuiltinValueKind::SRem || ID == BuiltinValueKind::URem;
+
+ // Otherwise emit the diagnostic, set ResultsInError to be true, and return
+ // nullptr.
+ diagnose(M.getASTContext(),
+ BI->getLoc().getSourceLoc(),
+ diag::division_overflow,
+ NumVal.toString(/*Radix*/ 10, /*Signed*/true),
+ IsRem ? "%" : "/",
+ DenomVal.toString(/*Radix*/ 10, /*Signed*/true));
+ ResultsInError = Optional<bool>(true);
+ return nullptr;
+ }
+
+ // Add the literal instruction to represent the result of the division.
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), ResVal);
+}
+
+/// \brief Fold binary operations.
+///
+/// The list of operations we constant fold might not be complete. Start with
+/// folding the operations used by the standard library.
+static SILValue constantFoldBinary(BuiltinInst *BI,
+ BuiltinValueKind ID,
+ Optional<bool> &ResultsInError) {
+ switch (ID) {
+ default:
+ llvm_unreachable("Not all BUILTIN_BINARY_OPERATIONs are covered!");
+
+ // Not supported yet (not easily computable for APInt).
+ case BuiltinValueKind::ExactSDiv:
+ case BuiltinValueKind::ExactUDiv:
+ return nullptr;
+
+ // Not supported now.
+ case BuiltinValueKind::FRem:
+ return nullptr;
+
+ // Fold constant division operations and report div by zero.
+ case BuiltinValueKind::SDiv:
+ case BuiltinValueKind::SRem:
+ case BuiltinValueKind::UDiv:
+ case BuiltinValueKind::URem: {
+ return constantFoldAndCheckDivision(BI, ID, ResultsInError);
+ }
+
+ // Are there valid uses for these in stdlib?
+ case BuiltinValueKind::Add:
+ case BuiltinValueKind::Mul:
+ case BuiltinValueKind::Sub:
+ return nullptr;
+
+ case BuiltinValueKind::And:
+ case BuiltinValueKind::AShr:
+ case BuiltinValueKind::LShr:
+ case BuiltinValueKind::Or:
+ case BuiltinValueKind::Shl:
+ case BuiltinValueKind::Xor: {
+ OperandValueArrayRef Args = BI->getArguments();
+ auto *LHS = dyn_cast<IntegerLiteralInst>(Args[0]);
+ auto *RHS = dyn_cast<IntegerLiteralInst>(Args[1]);
+ if (!RHS || !LHS)
+ return nullptr;
+ APInt LHSI = LHS->getValue();
+ APInt RHSI = RHS->getValue();
+
+ bool IsShift = ID == BuiltinValueKind::AShr ||
+ ID == BuiltinValueKind::LShr ||
+ ID == BuiltinValueKind::Shl;
+
+ // Reject shifting all significant bits
+ if (IsShift && RHSI.getZExtValue() >= LHSI.getBitWidth()) {
+ diagnose(BI->getModule().getASTContext(),
+ RHS->getLoc().getSourceLoc(),
+ diag::shifting_all_significant_bits);
+
+ ResultsInError = Optional<bool>(true);
+ return nullptr;
+ }
+
+ APInt ResI = constantFoldBitOperation(LHSI, RHSI, ID);
+ // Add the literal instruction to represent the result.
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), ResI);
+ }
+ case BuiltinValueKind::FAdd:
+ case BuiltinValueKind::FDiv:
+ case BuiltinValueKind::FMul:
+ case BuiltinValueKind::FSub: {
+ OperandValueArrayRef Args = BI->getArguments();
+ auto *LHS = dyn_cast<FloatLiteralInst>(Args[0]);
+ auto *RHS = dyn_cast<FloatLiteralInst>(Args[1]);
+ if (!RHS || !LHS)
+ return nullptr;
+ APFloat LHSF = LHS->getValue();
+ APFloat RHSF = RHS->getValue();
+ switch (ID) {
+ default: llvm_unreachable("Not all cases are covered!");
+ case BuiltinValueKind::FAdd:
+ LHSF.add(RHSF, APFloat::rmNearestTiesToEven);
+ break;
+ case BuiltinValueKind::FDiv:
+ LHSF.divide(RHSF, APFloat::rmNearestTiesToEven);
+ break;
+ case BuiltinValueKind::FMul:
+ LHSF.multiply(RHSF, APFloat::rmNearestTiesToEven);
+ break;
+ case BuiltinValueKind::FSub:
+ LHSF.subtract(RHSF, APFloat::rmNearestTiesToEven);
+ break;
+ }
+
+ // Add the literal instruction to represent the result.
+ SILBuilderWithScope B(BI);
+ return B.createFloatLiteral(BI->getLoc(), BI->getType(), LHSF);
+ }
+ }
+}
+
+static std::pair<bool, bool> getTypeSignedness(const BuiltinInfo &Builtin) {
+ bool SrcTySigned =
+ (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc ||
+ Builtin.ID == BuiltinValueKind::SToUCheckedTrunc ||
+ Builtin.ID == BuiltinValueKind::SUCheckedConversion);
+
+ bool DstTySigned =
+ (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc ||
+ Builtin.ID == BuiltinValueKind::UToSCheckedTrunc ||
+ Builtin.ID == BuiltinValueKind::USCheckedConversion);
+
+ return std::pair<bool, bool>(SrcTySigned, DstTySigned);
+}
+
+static SILValue
+constantFoldAndCheckIntegerConversions(BuiltinInst *BI,
+ const BuiltinInfo &Builtin,
+ Optional<bool> &ResultsInError) {
+ assert(Builtin.ID == BuiltinValueKind::SToSCheckedTrunc ||
+ Builtin.ID == BuiltinValueKind::UToUCheckedTrunc ||
+ Builtin.ID == BuiltinValueKind::SToUCheckedTrunc ||
+ Builtin.ID == BuiltinValueKind::UToSCheckedTrunc ||
+ Builtin.ID == BuiltinValueKind::SUCheckedConversion ||
+ Builtin.ID == BuiltinValueKind::USCheckedConversion);
+
+ // Check if we are converting a constant integer.
+ OperandValueArrayRef Args = BI->getArguments();
+ auto *V = dyn_cast<IntegerLiteralInst>(Args[0]);
+ if (!V)
+ return nullptr;
+ APInt SrcVal = V->getValue();
+
+ // Get source type and bit width.
+ Type SrcTy = Builtin.Types[0];
+ uint32_t SrcBitWidth =
+ Builtin.Types[0]->castTo<BuiltinIntegerType>()->getGreatestWidth();
+
+ // Compute the destination (for SrcBitWidth < DestBitWidth) and enough info
+ // to check for overflow.
+ APInt Result;
+ bool OverflowError;
+ Type DstTy;
+
+ // Process conversions signed <-> unsigned for same size integers.
+ if (Builtin.ID == BuiltinValueKind::SUCheckedConversion ||
+ Builtin.ID == BuiltinValueKind::USCheckedConversion) {
+ DstTy = SrcTy;
+ Result = SrcVal;
+ // Report an error if the sign bit is set.
+ OverflowError = SrcVal.isNegative();
+
+ // Process truncation from unsigned to signed.
+ } else if (Builtin.ID != BuiltinValueKind::UToSCheckedTrunc) {
+ assert(Builtin.Types.size() == 2);
+ DstTy = Builtin.Types[1];
+ uint32_t DstBitWidth =
+ DstTy->castTo<BuiltinIntegerType>()->getGreatestWidth();
+ // Result = trunc_IntFrom_IntTo(Val)
+ // For signed destination:
+ // sext_IntFrom(Result) == Val ? Result : overflow_error
+ // For signed destination:
+ // zext_IntFrom(Result) == Val ? Result : overflow_error
+ Result = SrcVal.trunc(DstBitWidth);
+ // Get the signedness of the destination.
+ bool Signed = (Builtin.ID == BuiltinValueKind::SToSCheckedTrunc);
+ APInt Ext = Signed ? Result.sext(SrcBitWidth) : Result.zext(SrcBitWidth);
+ OverflowError = (SrcVal != Ext);
+
+ // Process the rest of truncations.
+ } else {
+ assert(Builtin.Types.size() == 2);
+ DstTy = Builtin.Types[1];
+ uint32_t DstBitWidth =
+ Builtin.Types[1]->castTo<BuiltinIntegerType>()->getGreatestWidth();
+ // Compute the destination (for SrcBitWidth < DestBitWidth):
+ // Result = trunc_IntTo(Val)
+ // Trunc = trunc_'IntTo-1bit'(Val)
+ // zext_IntFrom(Trunc) == Val ? Result : overflow_error
+ Result = SrcVal.trunc(DstBitWidth);
+ APInt TruncVal = SrcVal.trunc(DstBitWidth - 1);
+ OverflowError = (SrcVal != TruncVal.zext(SrcBitWidth));
+ }
+
+ // Check for overflow.
+ if (OverflowError) {
+ // If we are not asked to emit overflow diagnostics, just return nullptr on
+ // overflow.
+ if (!ResultsInError.hasValue())
+ return nullptr;
+
+ SILLocation Loc = BI->getLoc();
+ SILModule &M = BI->getModule();
+ const ApplyExpr *CE = Loc.getAsASTNode<ApplyExpr>();
+ Type UserSrcTy;
+ Type UserDstTy;
+ // Primitive heuristics to get the user-written type.
+ // Eventually we might be able to use SILLocation (when it contains info
+ // about inlined call chains).
+ if (CE) {
+ if (const TupleType *RTy = CE->getArg()->getType()->getAs<TupleType>()) {
+ if (RTy->getNumElements() == 1) {
+ UserSrcTy = RTy->getElementType(0);
+ UserDstTy = CE->getType();
+ }
+ } else {
+ UserSrcTy = CE->getArg()->getType();
+ UserDstTy = CE->getType();
+ }
+ }
+
+
+ // Assume that we are converting from a literal if the Source size is
+ // 2048. Is there a better way to identify conversions from literals?
+ bool Literal = (SrcBitWidth == 2048);
+
+ // FIXME: This will prevent hard error in cases the error is coming
+ // from ObjC interoperability code. Currently, we treat NSUInteger as
+ // Int.
+ if (Loc.getSourceLoc().isInvalid()) {
+ // Otherwise emit the appropriate diagnostic and set ResultsInError.
+ if (Literal)
+ diagnose(M.getASTContext(), Loc.getSourceLoc(),
+ diag::integer_literal_overflow_warn,
+ UserDstTy.isNull() ? DstTy : UserDstTy);
+ else
+ diagnose(M.getASTContext(), Loc.getSourceLoc(),
+ diag::integer_conversion_overflow_warn,
+ UserSrcTy.isNull() ? SrcTy : UserSrcTy,
+ UserDstTy.isNull() ? DstTy : UserDstTy);
+
+ ResultsInError = Optional<bool>(true);
+ return nullptr;
+ }
+
+ // Otherwise report the overflow error.
+ if (Literal) {
+ bool SrcTySigned, DstTySigned;
+ std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin);
+ SmallString<10> SrcAsString;
+ SrcVal.toString(SrcAsString, /*radix*/10, SrcTySigned);
+
+ // Try to print user-visible types if they are available.
+ if (!UserDstTy.isNull()) {
+ auto diagID = diag::integer_literal_overflow;
+
+ // If this is a negative literal in an unsigned type, use a specific
+ // diagnostic.
+ if (SrcTySigned && !DstTySigned && SrcVal.isNegative())
+ diagID = diag::negative_integer_literal_overflow_unsigned;
+
+ diagnose(M.getASTContext(), Loc.getSourceLoc(),
+ diagID, UserDstTy, SrcAsString);
+ // Otherwise, print the Builtin Types.
+ } else {
+ bool SrcTySigned, DstTySigned;
+ std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin);
+ diagnose(M.getASTContext(), Loc.getSourceLoc(),
+ diag::integer_literal_overflow_builtin_types,
+ DstTySigned, DstTy, SrcAsString);
+ }
+ } else {
+ if (Builtin.ID == BuiltinValueKind::SUCheckedConversion) {
+ diagnose(M.getASTContext(), Loc.getSourceLoc(),
+ diag::integer_conversion_sign_error,
+ UserDstTy.isNull() ? DstTy : UserDstTy);
+ } else {
+ // Try to print user-visible types if they are available.
+ if (!UserSrcTy.isNull()) {
+ diagnose(M.getASTContext(), Loc.getSourceLoc(),
+ diag::integer_conversion_overflow,
+ UserSrcTy, UserDstTy);
+
+ // Otherwise, print the Builtin Types.
+ } else {
+ // Since builtin types are sign-agnostic, print the signedness
+ // separately.
+ bool SrcTySigned, DstTySigned;
+ std::tie(SrcTySigned, DstTySigned) = getTypeSignedness(Builtin);
+ diagnose(M.getASTContext(), Loc.getSourceLoc(),
+ diag::integer_conversion_overflow_builtin_types,
+ SrcTySigned, SrcTy, DstTySigned, DstTy);
+ }
+ }
+ }
+
+ ResultsInError = Optional<bool>(true);
+ return nullptr;
+ }
+
+ // The call to the builtin should be replaced with the constant value.
+ return constructResultWithOverflowTuple(BI, Result, false);
+
+}
+
+static SILValue constantFoldBuiltin(BuiltinInst *BI,
+ Optional<bool> &ResultsInError) {
+ const IntrinsicInfo &Intrinsic = BI->getIntrinsicInfo();
+ SILModule &M = BI->getModule();
+
+ // If it's an llvm intrinsic, fold the intrinsic.
+ if (Intrinsic.ID != llvm::Intrinsic::not_intrinsic)
+ return constantFoldIntrinsic(BI, Intrinsic.ID, ResultsInError);
+
+ // Otherwise, it should be one of the builtin functions.
+ OperandValueArrayRef Args = BI->getArguments();
+ const BuiltinInfo &Builtin = BI->getBuiltinInfo();
+
+ switch (Builtin.ID) {
+ default: break;
+
+// Check and fold binary arithmetic with overflow.
+#define BUILTIN(id, name, Attrs)
+#define BUILTIN_BINARY_OPERATION_WITH_OVERFLOW(id, name, _, attrs, overload) \
+ case BuiltinValueKind::id:
+#include "swift/AST/Builtins.def"
+ return constantFoldBinaryWithOverflow(BI, Builtin.ID, ResultsInError);
+
+#define BUILTIN(id, name, Attrs)
+#define BUILTIN_BINARY_OPERATION(id, name, attrs, overload) \
+case BuiltinValueKind::id:
+#include "swift/AST/Builtins.def"
+ return constantFoldBinary(BI, Builtin.ID, ResultsInError);
+
+// Fold comparison predicates.
+#define BUILTIN(id, name, Attrs)
+#define BUILTIN_BINARY_PREDICATE(id, name, attrs, overload) \
+case BuiltinValueKind::id:
+#include "swift/AST/Builtins.def"
+ return constantFoldCompare(BI, Builtin.ID);
+
+ case BuiltinValueKind::Trunc:
+ case BuiltinValueKind::ZExt:
+ case BuiltinValueKind::SExt:
+ case BuiltinValueKind::TruncOrBitCast:
+ case BuiltinValueKind::ZExtOrBitCast:
+ case BuiltinValueKind::SExtOrBitCast: {
+
+ // We can fold if the value being cast is a constant.
+ auto *V = dyn_cast<IntegerLiteralInst>(Args[0]);
+ if (!V)
+ return nullptr;
+
+ APInt CastResV = constantFoldCast(V->getValue(), Builtin);
+
+ // Add the literal instruction to represent the result of the cast.
+ SILBuilderWithScope B(BI);
+ return B.createIntegerLiteral(BI->getLoc(), BI->getType(), CastResV);
+ }
+
+ // Process special builtins that are designed to check for overflows in
+ // integer conversions.
+ case BuiltinValueKind::SToSCheckedTrunc:
+ case BuiltinValueKind::UToUCheckedTrunc:
+ case BuiltinValueKind::SToUCheckedTrunc:
+ case BuiltinValueKind::UToSCheckedTrunc:
+ case BuiltinValueKind::SUCheckedConversion:
+ case BuiltinValueKind::USCheckedConversion: {
+ return constantFoldAndCheckIntegerConversions(BI, Builtin, ResultsInError);
+ }
+
+ case BuiltinValueKind::IntToFPWithOverflow: {
+ // Get the value. It should be a constant in most cases.
+ // Note, this will not always be a constant, for example, when analyzing
+ // _convertFromBuiltinIntegerLiteral function itself.
+ auto *V = dyn_cast<IntegerLiteralInst>(Args[0]);
+ if (!V)
+ return nullptr;
+ APInt SrcVal = V->getValue();
+ Type DestTy = Builtin.Types[1];
+
+ APFloat TruncVal(
+ DestTy->castTo<BuiltinFloatType>()->getAPFloatSemantics());
+ APFloat::opStatus ConversionStatus = TruncVal.convertFromAPInt(
+ SrcVal, /*IsSigned=*/true, APFloat::rmNearestTiesToEven);
+
+ SILLocation Loc = BI->getLoc();
+ const ApplyExpr *CE = Loc.getAsASTNode<ApplyExpr>();
+
+ // Check for overflow.
+ if (ConversionStatus & APFloat::opOverflow) {
+ // If we overflow and are not asked for diagnostics, just return nullptr.
+ if (!ResultsInError.hasValue())
+ return nullptr;
+
+ SmallString<10> SrcAsString;
+ SrcVal.toString(SrcAsString, /*radix*/10, true /*isSigned*/);
+
+ // Otherwise emit our diagnostics and then return nullptr.
+ diagnose(M.getASTContext(), Loc.getSourceLoc(),
+ diag::integer_literal_overflow,
+ CE ? CE->getType() : DestTy, SrcAsString);
+ ResultsInError = Optional<bool>(true);
+ return nullptr;
+ }
+
+ // The call to the builtin should be replaced with the constant value.
+ SILBuilderWithScope B(BI);
+ return B.createFloatLiteral(Loc, BI->getType(), TruncVal);
+ }
+
+ case BuiltinValueKind::FPTrunc: {
+ // Get the value. It should be a constant in most cases.
+ auto *V = dyn_cast<FloatLiteralInst>(Args[0]);
+ if (!V)
+ return nullptr;
+ APFloat TruncVal = V->getValue();
+ Type DestTy = Builtin.Types[1];
+ bool losesInfo;
+ APFloat::opStatus ConversionStatus = TruncVal.convert(
+ DestTy->castTo<BuiltinFloatType>()->getAPFloatSemantics(),
+ APFloat::rmNearestTiesToEven, &losesInfo);
+ SILLocation Loc = BI->getLoc();
+
+ // Check if conversion was successful.
+ if (ConversionStatus != APFloat::opStatus::opOK &&
+ ConversionStatus != APFloat::opStatus::opInexact) {
+ return nullptr;
+ }
+
+ // The call to the builtin should be replaced with the constant value.
+ SILBuilderWithScope B(BI);
+ return B.createFloatLiteral(Loc, BI->getType(), TruncVal);
+ }
+
+ case BuiltinValueKind::AssumeNonNegative: {
+ auto *V = dyn_cast<IntegerLiteralInst>(Args[0]);
+ if (!V)
+ return nullptr;
+
+ APInt VInt = V->getValue();
+ if (VInt.isNegative() && ResultsInError.hasValue()) {
+ diagnose(M.getASTContext(), BI->getLoc().getSourceLoc(),
+ diag::wrong_non_negative_assumption,
+ VInt.toString(/*Radix*/ 10, /*Signed*/ true));
+ ResultsInError = Optional<bool>(true);
+ }
+ return V;
+ }
+ }
+ return nullptr;
+}
+
+static SILValue constantFoldInstruction(SILInstruction &I,
+ Optional<bool> &ResultsInError) {
+ // Constant fold function calls.
+ if (auto *BI = dyn_cast<BuiltinInst>(&I)) {
+ return constantFoldBuiltin(BI, ResultsInError);
+ }
+
+ // Constant fold extraction of a constant element.
+ if (auto *TEI = dyn_cast<TupleExtractInst>(&I)) {
+ if (auto *TheTuple = dyn_cast<TupleInst>(TEI->getOperand()))
+ return TheTuple->getElement(TEI->getFieldNo());
+ }
+
+ // Constant fold extraction of a constant struct element.
+ if (auto *SEI = dyn_cast<StructExtractInst>(&I)) {
+ if (auto *Struct = dyn_cast<StructInst>(SEI->getOperand()))
+ return Struct->getOperandForField(SEI->getField())->get();
+ }
+
+ // Constant fold indexing insts of a 0 integer literal.
+ if (auto *II = dyn_cast<IndexingInst>(&I))
+ if (auto *IntLiteral = dyn_cast<IntegerLiteralInst>(II->getIndex()))
+ if (!IntLiteral->getValue())
+ return II->getBase();
+
+ return SILValue();
+}
+
+static bool isApplyOfBuiltin(SILInstruction &I, BuiltinValueKind kind) {
+ if (auto *BI = dyn_cast<BuiltinInst>(&I))
+ if (BI->getBuiltinInfo().ID == kind)
+ return true;
+ return false;
+}
+
+static bool isApplyOfStringConcat(SILInstruction &I) {
+ if (auto *AI = dyn_cast<ApplyInst>(&I))
+ if (auto *Fn = AI->getReferencedFunction())
+ if (Fn->hasSemanticsAttr("string.concat"))
+ return true;
+ return false;
+}
+
+static bool isFoldable(SILInstruction *I) {
+ return isa<IntegerLiteralInst>(I) || isa<FloatLiteralInst>(I);
+}
+
+bool ConstantFolder::constantFoldStringConcatenation(ApplyInst *AI) {
+ SILBuilder B(AI);
+ // Try to apply the string literal concatenation optimization.
+ auto *Concatenated = tryToConcatenateStrings(AI, B);
+ // Bail if string literal concatenation could not be performed.
+ if (!Concatenated)
+ return false;
+
+ // Replace all uses of the old instruction by a new instruction.
+ AI->replaceAllUsesWith(Concatenated);
+
+ auto RemoveCallback = [&](SILInstruction *DeadI) { WorkList.remove(DeadI); };
+ // Remove operands that are not used anymore.
+ // Even if they are apply_inst, it is safe to
+ // do so, because they can only be applies
+ // of functions annotated as string.utf16
+ // or string.utf16.
+ for (auto &Op : AI->getAllOperands()) {
+ SILValue Val = Op.get();
+ Op.drop();
+ if (Val->use_empty()) {
+ auto *DeadI = Val->getDefiningInstruction();
+ assert(DeadI);
+ recursivelyDeleteTriviallyDeadInstructions(DeadI, /*force*/ true,
+ RemoveCallback);
+ WorkList.remove(DeadI);
+ }
+ }
+ // Schedule users of the new instruction for constant folding.
+ // We only need to schedule the string.concat invocations.
+ for (auto AIUse : Concatenated->getUses()) {
+ if (isApplyOfStringConcat(*AIUse->getUser())) {
+ WorkList.insert(AIUse->getUser());
+ }
+ }
+ // Delete the old apply instruction.
+ recursivelyDeleteTriviallyDeadInstructions(AI, /*force*/ true,
+ RemoveCallback);
+ return true;
+}
+
+/// Initialize the worklist to all of the constant instructions.
+void ConstantFolder::initializeWorklist(SILFunction &F) {
+ for (auto &BB : F) {
+ for (auto &I : BB) {
+ if (isFoldable(&I) && I.hasUsesOfAnyResult()) {
+ WorkList.insert(&I);
+ continue;
+ }
+
+ // Should we replace calls to assert_configuration by the assert
+ // configuration.
+ if (AssertConfiguration != SILOptions::DisableReplacement &&
+ (isApplyOfBuiltin(I, BuiltinValueKind::AssertConf) ||
+ isApplyOfBuiltin(I, BuiltinValueKind::CondUnreachable))) {
+ WorkList.insert(&I);
+ continue;
+ }
+
+ if (isa<CheckedCastBranchInst>(&I) ||
+ isa<CheckedCastAddrBranchInst>(&I) ||
+ isa<UnconditionalCheckedCastInst>(&I) ||
+ isa<UnconditionalCheckedCastAddrInst>(&I)) {
+ WorkList.insert(&I);
+ continue;
+ }
+
+ if (!isApplyOfStringConcat(I)) {
+ continue;
+ }
+ WorkList.insert(&I);
+ }
+ }
+}
+
+SILAnalysis::InvalidationKind
+ConstantFolder::processWorkList() {
+ DEBUG(llvm::dbgs() << "*** ConstPropagation processing: \n");
+
+ // This is the list of traits that this transformation might preserve.
+ bool InvalidateBranches = false;
+ bool InvalidateCalls = false;
+ bool InvalidateInstructions = false;
+
+ // The list of instructions whose evaluation resulted in error or warning.
+ // This is used to avoid duplicate error reporting in case we reach the same
+ // instruction from different entry points in the WorkList.
+ llvm::DenseSet<SILInstruction *> ErrorSet;
+
+ llvm::SetVector<SILInstruction *> FoldedUsers;
+ CastOptimizer CastOpt(
+ [&](SingleValueInstruction *I, ValueBase *V) { /* ReplaceInstUsesAction */
+
+ InvalidateInstructions = true;
+ I->replaceAllUsesWith(V);
+ },
+ [&](SILInstruction *I) { /* EraseAction */
+ auto *TI = dyn_cast<TermInst>(I);
+
+ if (TI) {
+ // Invalidate analysis information related to branches. Replacing
+ // unconditional_check_branch type instructions by a trap will also
+ // invalidate branches/the CFG.
+ InvalidateBranches = true;
+ }
+
+ InvalidateInstructions = true;
+
+ WorkList.remove(I);
+ I->eraseFromParent();
+ });
+
+ while (!WorkList.empty()) {
+ SILInstruction *I = WorkList.pop_back_val();
+ assert(I->getParent() && "SILInstruction must have parent.");
+
+ DEBUG(llvm::dbgs() << "Visiting: " << *I);
+
+ Callback(I);
+
+ // Replace assert_configuration instructions by their constant value. We
+ // want them to be replace even if we can't fully propagate the constant.
+ if (AssertConfiguration != SILOptions::DisableReplacement)
+ if (auto *BI = dyn_cast<BuiltinInst>(I)) {
+ if (isApplyOfBuiltin(*BI, BuiltinValueKind::AssertConf)) {
+ // Instantiate the constant.
+ SILBuilderWithScope B(BI);
+ auto AssertConfInt = B.createIntegerLiteral(
+ BI->getLoc(), BI->getType(), AssertConfiguration);
+ BI->replaceAllUsesWith(AssertConfInt);
+ // Schedule users for constant folding.
+ WorkList.insert(AssertConfInt);
+ // Delete the call.
+ recursivelyDeleteTriviallyDeadInstructions(BI);
+
+ InvalidateInstructions = true;
+ continue;
+ }
+
+ // Kill calls to conditionallyUnreachable if we've folded assert
+ // configuration calls.
+ if (isApplyOfBuiltin(*BI, BuiltinValueKind::CondUnreachable)) {
+ assert(BI->use_empty() && "use of conditionallyUnreachable?!");
+ recursivelyDeleteTriviallyDeadInstructions(BI, /*force*/ true);
+ InvalidateInstructions = true;
+ continue;
+ }
+ }
+
+ if (auto *AI = dyn_cast<ApplyInst>(I)) {
+ // Apply may only come from a string.concat invocation.
+ if (constantFoldStringConcatenation(AI)) {
+ // Invalidate all analysis that's related to the call graph.
+ InvalidateInstructions = true;
+ }
+
+ continue;
+ }
+
+ if (isa<CheckedCastBranchInst>(I) || isa<CheckedCastAddrBranchInst>(I) ||
+ isa<UnconditionalCheckedCastInst>(I) ||
+ isa<UnconditionalCheckedCastAddrInst>(I)) {
+ // Try to perform cast optimizations. Invalidation is handled by a
+ // callback inside the cast optimizer.
+ SILInstruction *Result = nullptr;
+ switch(I->getKind()) {
+ default:
+ llvm_unreachable("Unexpected instruction for cast optimizations");
+ case SILInstructionKind::CheckedCastBranchInst:
+ Result = CastOpt.simplifyCheckedCastBranchInst(cast<CheckedCastBranchInst>(I));
+ break;
+ case SILInstructionKind::CheckedCastAddrBranchInst:
+ Result = CastOpt.simplifyCheckedCastAddrBranchInst(cast<CheckedCastAddrBranchInst>(I));
+ break;
+ case SILInstructionKind::UnconditionalCheckedCastInst: {
+ auto Value =
+ CastOpt.optimizeUnconditionalCheckedCastInst(cast<UnconditionalCheckedCastInst>(I));
+ if (Value) Result = Value->getDefiningInstruction();
+ break;
+ }
+ case SILInstructionKind::UnconditionalCheckedCastAddrInst:
+ Result = CastOpt.optimizeUnconditionalCheckedCastAddrInst(cast<UnconditionalCheckedCastAddrInst>(I));
+ break;
+ }
+
+ if (Result) {
+ if (isa<CheckedCastBranchInst>(Result) ||
+ isa<CheckedCastAddrBranchInst>(Result) ||
+ isa<UnconditionalCheckedCastInst>(Result) ||
+ isa<UnconditionalCheckedCastAddrInst>(Result))
+ WorkList.insert(Result);
+ }
+ continue;
+ }
+
+
+ // Go through all users of the constant and try to fold them.
+ // TODO: MultiValueInstruction
+ FoldedUsers.clear();
+ for (auto Use : cast<SingleValueInstruction>(I)->getUses()) {
+ SILInstruction *User = Use->getUser();
+ DEBUG(llvm::dbgs() << " User: " << *User);
+
+ // It is possible that we had processed this user already. Do not try
+ // to fold it again if we had previously produced an error while folding
+ // it. It is not always possible to fold an instruction in case of error.
+ if (ErrorSet.count(User))
+ continue;
+
+ // Some constant users may indirectly cause folding of their users.
+ if (isa<StructInst>(User) || isa<TupleInst>(User)) {
+ WorkList.insert(User);
+ continue;
+ }
+
+ // Always consider cond_fail instructions as potential for DCE. If the
+ // expression feeding them is false, they are dead. We can't handle this
+ // as part of the constant folding logic, because there is no value
+ // they can produce (other than empty tuple, which is wasteful).
+ if (isa<CondFailInst>(User))
+ FoldedUsers.insert(User);
+
+ // Initialize ResultsInError as a None optional.
+ //
+ // We are essentially using this optional to represent 3 states: true,
+ // false, and n/a.
+ Optional<bool> ResultsInError;
+
+ // If we are asked to emit diagnostics, override ResultsInError with a
+ // Some optional initialized to false.
+ if (EnableDiagnostics)
+ ResultsInError = false;
+
+ // Try to fold the user. If ResultsInError is None, we do not emit any
+ // diagnostics. If ResultsInError is some, we use it as our return value.
+ SILValue C = constantFoldInstruction(*User, ResultsInError);
+
+ // If we did not pass in a None and the optional is set to true, add the
+ // user to our error set.
+ if (ResultsInError.hasValue() && ResultsInError.getValue())
+ ErrorSet.insert(User);
+
+ // We failed to constant propagate... continue...
+ if (!C)
+ continue;
+
+ // We can currently only do this constant-folding of single-value
+ // instructions.
+ auto UserV = cast<SingleValueInstruction>(User);
+
+ // Ok, we have succeeded. Add user to the FoldedUsers list and perform the
+ // necessary cleanups, RAUWs, etc.
+ FoldedUsers.insert(User);
+ ++NumInstFolded;
+
+ InvalidateInstructions = true;
+
+ // If the constant produced a tuple, be smarter than RAUW: explicitly nuke
+ // any tuple_extract instructions using the apply. This is a common case
+ // for functions returning multiple values.
+ if (auto *TI = dyn_cast<TupleInst>(C)) {
+ for (auto UI = UserV->use_begin(), E = UserV->use_end(); UI != E;) {
+ Operand *O = *UI++;
+
+ // If the user is a tuple_extract, just substitute the right value in.
+ if (auto *TEI = dyn_cast<TupleExtractInst>(O->getUser())) {
+ SILValue NewVal = TI->getOperand(TEI->getFieldNo());
+ TEI->replaceAllUsesWith(NewVal);
+ TEI->dropAllReferences();
+ FoldedUsers.insert(TEI);
+ if (auto *Inst = NewVal->getDefiningInstruction())
+ WorkList.insert(Inst);
+ }
+ }
+
+ if (UserV->use_empty())
+ FoldedUsers.insert(TI);
+ }
+
+
+ // We were able to fold, so all users should use the new folded value.
+ UserV->replaceAllUsesWith(C);
+
+ // The new constant could be further folded now, add it to the worklist.
+ if (auto *Inst = C->getDefiningInstruction())
+ WorkList.insert(Inst);
+ }
+
+ // Eagerly DCE. We do this after visiting all users to ensure we don't
+ // invalidate the uses iterator.
+ ArrayRef<SILInstruction *> UserArray = FoldedUsers.getArrayRef();
+ if (!UserArray.empty()) {
+ InvalidateInstructions = true;
+ }
+
+ recursivelyDeleteTriviallyDeadInstructions(UserArray, false,
+ [&](SILInstruction *DeadI) {
+ WorkList.remove(DeadI);
+ });
+ }
+
+ // TODO: refactor this code outside of the method. Passes should not merge
+ // invalidation kinds themselves.
+ using InvalidationKind = SILAnalysis::InvalidationKind;
+
+ unsigned Inv = InvalidationKind::Nothing;
+ if (InvalidateInstructions) Inv |= (unsigned) InvalidationKind::Instructions;
+ if (InvalidateCalls) Inv |= (unsigned) InvalidationKind::Calls;
+ if (InvalidateBranches) Inv |= (unsigned) InvalidationKind::Branches;
+ return InvalidationKind(Inv);
+}
+
+
diff --git a/lib/SILOptimizer/Utils/Local.cpp b/lib/SILOptimizer/Utils/Local.cpp
index a6b232e..ab57fed 100644
--- a/lib/SILOptimizer/Utils/Local.cpp
+++ b/lib/SILOptimizer/Utils/Local.cpp
@@ -1578,3 +1578,48 @@
}
}
+void StaticInitCloner::add(SILInstruction *InitVal) {
+ // Don't schedule an instruction twice for cloning.
+ if (NumOpsToClone.count(InitVal) != 0)
+ return;
+
+ ArrayRef<Operand> Ops = InitVal->getAllOperands();
+ NumOpsToClone[InitVal] = Ops.size();
+ if (Ops.empty()) {
+ // It's an instruction without operands, e.g. a literal. It's ready to be
+ // cloned first.
+ ReadyToClone.push_back(InitVal);
+ } else {
+ // Recursively add all operands.
+ for (const Operand &Op : Ops) {
+ add(cast<SingleValueInstruction>(Op.get()));
+ }
+ }
+}
+
+SingleValueInstruction *
+StaticInitCloner::clone(SingleValueInstruction *InitVal) {
+ assert(NumOpsToClone.count(InitVal) != 0 && "InitVal was not added");
+ // Find the right order to clone: all operands of an instruction must be
+ // cloned before the instruction itself.
+ while (!ReadyToClone.empty()) {
+ SILInstruction *I = ReadyToClone.pop_back_val();
+
+ // Clone the instruction into the SILGlobalVariable
+ visit(I);
+
+ // Check if users of I can now be cloned.
+ for (SILValue result : I->getResults()) {
+ for (Operand *Use : result->getUses()) {
+ SILInstruction *User = Use->getUser();
+ if (NumOpsToClone.count(User) != 0 && --NumOpsToClone[User] == 0)
+ ReadyToClone.push_back(User);
+ }
+ }
+ }
+ assert(ValueMap.count(InitVal) != 0 &&
+ "Could not schedule all instructions for cloning");
+ return cast<SingleValueInstruction>(ValueMap[InitVal]);
+}
+
+
diff --git a/lib/SILOptimizer/Utils/PerformanceInlinerUtils.cpp b/lib/SILOptimizer/Utils/PerformanceInlinerUtils.cpp
index c4212d3..b591a62 100644
--- a/lib/SILOptimizer/Utils/PerformanceInlinerUtils.cpp
+++ b/lib/SILOptimizer/Utils/PerformanceInlinerUtils.cpp
@@ -781,47 +781,83 @@
return EligibleCallee;
}
-/// Returns true if a given value is constant.
-/// The value is considered to be constant if it is:
-/// - a literal
-/// - a tuple or a struct whose fields are all constants
-static bool isConstantValue(SILValue V) {
- if (isa<LiteralInst>(V))
- return true;
- if (auto *TI = dyn_cast<TupleInst>(V)) {
- for (auto E : TI->getElements()) {
- if (!isConstantValue(E))
- return false;
+/// Returns true if the instruction \I has any interesting side effects which
+/// might prevent inlining a pure function.
+static bool hasInterestingSideEffect(SILInstruction *I) {
+ switch (I->getKind()) {
+ // Those instructions turn into no-ops after inlining, redundante load
+ // elimination, constant folding and dead-object elimination.
+ case swift::SILInstructionKind::StrongRetainInst:
+ case swift::SILInstructionKind::StrongReleaseInst:
+ case swift::SILInstructionKind::RetainValueInst:
+ case swift::SILInstructionKind::ReleaseValueInst:
+ case swift::SILInstructionKind::StoreInst:
+ case swift::SILInstructionKind::DeallocRefInst:
+ return false;
+ default:
+ return I->getMemoryBehavior() != SILInstruction::MemoryBehavior::None;
+ }
+}
+
+/// Returns true if the operand \p Arg is a constant or an object which is
+/// initialized with constant values.
+///
+/// The value is considered to be constant if it is composed of side-effect free
+/// instructions, like literal or aggregate instructions.
+static bool isConstantArg(Operand *Arg) {
+ auto *ArgI = Arg->get()->getDefiningInstruction();
+ if (!ArgI)
+ return false;
+
+ SmallPtrSet<SILInstruction *, 8> Visited;
+ SmallVector<SILInstruction *, 8> Worklist;
+
+ auto addToWorklist = [&](SILInstruction *I) {
+ if (Visited.insert(I).second)
+ Worklist.push_back(I);
+ };
+
+ addToWorklist(ArgI);
+
+ // Visit the transitive closure of \p Arg and see if there is any side-effect
+ // instructions which prevents folding away everything after inlining.
+ while (!Worklist.empty()) {
+ SILInstruction *I = Worklist.pop_back_val();
+
+ if (hasInterestingSideEffect(I))
+ return false;
+
+ for (SILValue Result : I->getResults()) {
+ for (Operand *Use : Result->getUses()) {
+ if (Use != Arg)
+ addToWorklist(Use->getUser());
+ }
}
- return true;
- }
- if (auto *SI = dyn_cast<StructInst>(V)) {
- for (auto E : SI->getElements()) {
- if (!isConstantValue(E))
+ for (Operand &Op : I->getAllOperands()) {
+ if (SILInstruction *OpInst = Op.get()->getDefiningInstruction()) {
+ addToWorklist(OpInst);
+ } else {
return false;
+ }
}
- return true;
}
- if (auto *MT = dyn_cast<MetatypeInst>(V)) {
- if (!MT->getType().hasArchetype())
- return true;
- }
- return false;
+ return true;
}
bool swift::isPureCall(FullApplySite AI, SideEffectAnalysis *SEA) {
// If a call has only constant arguments and the call is pure, i.e. has
// no side effects, then we should always inline it.
+ // This includes arguments which are objects initialized with constant values.
SideEffectAnalysis::FunctionEffects ApplyEffects;
SEA->getEffects(ApplyEffects, AI);
auto GE = ApplyEffects.getGlobalEffects();
if (GE.mayRead() || GE.mayWrite() || GE.mayRetain() || GE.mayRelease())
return false;
// Check if all parameters are constant.
- auto Args = AI.getArgumentsWithoutIndirectResults();
- for (auto Arg : Args) {
- if (!isConstantValue(Arg)) {
+ auto Args = AI.getArgumentOperands().slice(AI.getNumIndirectSILResults());
+ for (Operand &Arg : Args) {
+ if (!isConstantArg(&Arg)) {
return false;
}
}
diff --git a/stdlib/public/SwiftShims/RefCount.h b/stdlib/public/SwiftShims/RefCount.h
index 8cec6d3..2bbefe9 100644
--- a/stdlib/public/SwiftShims/RefCount.h
+++ b/stdlib/public/SwiftShims/RefCount.h
@@ -1231,7 +1231,9 @@
newbits = oldbits;
assert(newbits.getWeakRefCount() != 0);
newbits.incrementWeakRefCount();
- // FIXME: overflow check
+
+ if (newbits.getWeakRefCount() < oldbits.getWeakRefCount())
+ swift_abortWeakRetainOverflow();
} while (!refCounts.compare_exchange_weak(oldbits, newbits,
std::memory_order_relaxed));
}
@@ -1262,16 +1264,7 @@
// Return weak reference count.
// Note that this is not equal to the number of outstanding weak pointers.
- uint32_t getWeakCount() const {
- auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
- if (bits.hasSideTable()) {
- return bits.getSideTable()->getWeakCount();
- } else {
- // No weak refcount storage. Return only the weak increment held
- // on behalf of the unowned count.
- return bits.getUnownedRefCount() ? 1 : 0;
- }
- }
+ uint32_t getWeakCount() const;
private:
@@ -1288,6 +1281,11 @@
static_assert(std::is_trivially_destructible<InlineRefCounts>::value,
"InlineRefCounts must be trivially destructible");
+template <>
+inline uint32_t RefCounts<InlineRefCountBits>::getWeakCount() const;
+template <>
+inline uint32_t RefCounts<SideTableRefCountBits>::getWeakCount() const;
+
class HeapObjectSideTableEntry {
// FIXME: does object need to be atomic?
std::atomic<HeapObject*> object;
@@ -1532,6 +1530,23 @@
"a side table entry of its own");
}
+template <>
+inline uint32_t RefCounts<InlineRefCountBits>::getWeakCount() const {
+ auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
+ if (bits.hasSideTable()) {
+ return bits.getSideTable()->getWeakCount();
+ } else {
+ // No weak refcount storage. Return only the weak increment held
+ // on behalf of the unowned count.
+ return bits.getUnownedRefCount() ? 1 : 0;
+ }
+}
+
+template <>
+inline uint32_t RefCounts<SideTableRefCountBits>::getWeakCount() const {
+ auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
+ return bits.getWeakRefCount();
+}
template <> inline
HeapObject* RefCounts<InlineRefCountBits>::getHeapObject() {
diff --git a/stdlib/public/core/Filter.swift b/stdlib/public/core/Filter.swift
index 1180a98..0750129 100644
--- a/stdlib/public/core/Filter.swift
+++ b/stdlib/public/core/Filter.swift
@@ -85,6 +85,7 @@
}
extension LazyFilterSequence: LazySequenceProtocol {
+ public typealias Element = Base.Element
/// Returns an iterator over the elements of this sequence.
///
/// - Complexity: O(1).
@@ -383,6 +384,26 @@
}
}
+extension LazyFilterSequence {
+ public func filter(
+ _ isIncluded: @escaping (Element) -> Bool
+ ) -> LazyFilterSequence<Base> {
+ return LazyFilterSequence(_base: _base) {
+ isIncluded($0) && self._predicate($0)
+ }
+ }
+}
+
+extension LazyFilterCollection {
+ public func filter(
+ _ isIncluded: @escaping (Element) -> Bool
+ ) -> LazyFilterCollection<Base> {
+ return LazyFilterCollection(_base: _base) {
+ isIncluded($0) && self._predicate($0)
+ }
+ }
+}
+
// @available(*, deprecated, renamed: "LazyFilterSequence.Iterator")
public typealias LazyFilterIterator<T: Sequence> = LazyFilterSequence<T>.Iterator
// @available(swift, deprecated: 3.1, obsoleted: 4.0, message: "Use Base.Index")
diff --git a/stdlib/public/core/FloatingPoint.swift.gyb b/stdlib/public/core/FloatingPoint.swift.gyb
index 3e75d2f..6183f56 100644
--- a/stdlib/public/core/FloatingPoint.swift.gyb
+++ b/stdlib/public/core/FloatingPoint.swift.gyb
@@ -2053,6 +2053,7 @@
significandBitPattern: magnitudeOf.significandBitPattern)
}
+ @_inlineable // FIXME(sil-serialize-all)
public // @testable
static func _convert<Source : BinaryInteger>(
from source: Source
@@ -2129,6 +2130,7 @@
self = value_
}
+ @_inlineable // FIXME(sil-serialize-all)
public // @testable
static func _convert<Source : BinaryFloatingPoint>(
from source: Source
diff --git a/stdlib/public/core/Integers.swift.gyb b/stdlib/public/core/Integers.swift.gyb
index d1a4a91..80ead85 100644
--- a/stdlib/public/core/Integers.swift.gyb
+++ b/stdlib/public/core/Integers.swift.gyb
@@ -1602,21 +1602,26 @@
@_inlineable // FIXME(sil-serialize-all)
public func _binaryLogarithm() -> Self {
- _precondition(self > 0)
- let wordBitWidth = Magnitude.Words.Element.bitWidth
- let reversedWords = magnitude.words.reversed()
+ _precondition(self > (0 as Self))
+ var (quotient, remainder) =
+ (bitWidth &- 1).quotientAndRemainder(dividingBy: UInt.bitWidth)
+ remainder = remainder &+ 1
+ var word = UInt(truncatingIfNeeded: self >> (bitWidth &- remainder))
// If, internally, a variable-width binary integer uses digits of greater
// bit width than that of Magnitude.Words.Element (i.e., UInt), then it is
- // possible that more than one element of Magnitude.Words could be entirely
- // zero.
- let reversedWordsLeadingZeroBitCount =
- zip(0..., reversedWords)
- .first { $0.1 != 0 }
- .map { $0.0 &* wordBitWidth &+ $0.1.leadingZeroBitCount }!
- let logarithm =
- reversedWords.count &* wordBitWidth &-
- (reversedWordsLeadingZeroBitCount &+ 1)
- return Self(logarithm)
+ // possible that `word` could be zero. Additionally, a signed variable-width
+ // binary integer may have a leading word that is zero to store a clear sign
+ // bit.
+ while word == 0 {
+ quotient = quotient &- 1
+ remainder = remainder &+ UInt.bitWidth
+ word = UInt(truncatingIfNeeded: self >> (bitWidth &- remainder))
+ }
+ // Note that the order of operations below is important to guarantee that
+ // we won't overflow.
+ return Self(
+ UInt.bitWidth &* quotient &+
+ (UInt.bitWidth &- (word.leadingZeroBitCount &+ 1)))
}
/// Returns the quotient and remainder of this value divided by the given
@@ -2239,8 +2244,8 @@
@_inlineable // FIXME(sil-serialize-all)
public func _binaryLogarithm() -> Self {
- _precondition(self > 0)
- return Self(Magnitude.bitWidth &- (magnitude.leadingZeroBitCount &+ 1))
+ _precondition(self > (0 as Self))
+ return Self(Self.bitWidth &- (leadingZeroBitCount &+ 1))
}
/// Creates an integer from its little-endian representation, changing the
diff --git a/stdlib/public/core/Map.swift b/stdlib/public/core/Map.swift
index 5cd0ce7..9b936b8 100644
--- a/stdlib/public/core/Map.swift
+++ b/stdlib/public/core/Map.swift
@@ -276,6 +276,28 @@
}
}
+extension LazyMapSequence {
+ @_inlineable
+ public func map<ElementOfResult>(
+ _ transform: @escaping (Element) -> ElementOfResult
+ ) -> LazyMapSequence<Base, ElementOfResult> {
+ return LazyMapSequence<Base, ElementOfResult>(
+ _base: _base,
+ transform: {transform(self._transform($0))})
+ }
+}
+
+extension LazyMapCollection {
+ @_inlineable
+ public func map<ElementOfResult>(
+ _ transform: @escaping (Element) -> ElementOfResult
+ ) -> LazyMapCollection<Base, ElementOfResult> {
+ return LazyMapCollection<Base, ElementOfResult>(
+ _base: _base,
+ transform: {transform(self._transform($0))})
+ }
+}
+
// @available(*, deprecated, renamed: "LazyMapSequence.Iterator")
public typealias LazyMapIterator<T, E> = LazyMapSequence<T, E>.Iterator where T: Sequence
@available(*, deprecated, renamed: "LazyMapCollection")
diff --git a/stdlib/public/core/SwiftNativeNSArray.swift b/stdlib/public/core/SwiftNativeNSArray.swift
index 5189acc..4c181d5 100644
--- a/stdlib/public/core/SwiftNativeNSArray.swift
+++ b/stdlib/public/core/SwiftNativeNSArray.swift
@@ -267,6 +267,10 @@
@_inlineable
@_versioned
internal init() {}
+
+ @_inlineable
+ @_versioned
+ deinit {}
}
#endif
diff --git a/stdlib/public/runtime/Casting.cpp b/stdlib/public/runtime/Casting.cpp
index 9c6ab68..ddc9b94 100644
--- a/stdlib/public/runtime/Casting.cpp
+++ b/stdlib/public/runtime/Casting.cpp
@@ -2682,6 +2682,12 @@
// protocol _ObjectiveCBridgeable {
struct _ObjectiveCBridgeableWitnessTable {
+ /// The protocol conformance descriptor.
+ const void *protocolConformanceDescriptor;
+
+ static_assert(WitnessTableFirstRequirementOffset == 1,
+ "Witness table layout changed");
+
// associatedtype _ObjectiveCType : class
const Metadata * (*ObjectiveCType)(
const Metadata *parentMetadata,
diff --git a/stdlib/public/runtime/Errors.cpp b/stdlib/public/runtime/Errors.cpp
index 5a9bf87..a603936 100644
--- a/stdlib/public/runtime/Errors.cpp
+++ b/stdlib/public/runtime/Errors.cpp
@@ -383,6 +383,13 @@
"Fatal error: Object's unowned reference was retained too many times");
}
+// Crash due to a weak retain count overflow.
+// FIXME: can't pass the object's address from InlineRefCounts without hacks
+void swift::swift_abortWeakRetainOverflow() {
+ swift::fatalError(FatalErrorFlags::ReportBacktrace,
+ "Fatal error: Object's weak reference was retained too many times");
+}
+
// Crash due to retain of a dead unowned reference.
// FIXME: can't pass the object's address from InlineRefCounts without hacks
void swift::swift_abortRetainUnowned(const void *object) {
diff --git a/stdlib/public/runtime/Metadata.cpp b/stdlib/public/runtime/Metadata.cpp
index 1f0c3d8..debd325 100644
--- a/stdlib/public/runtime/Metadata.cpp
+++ b/stdlib/public/runtime/Metadata.cpp
@@ -2749,7 +2749,8 @@
if (genericTable->Instantiator.isNull() &&
genericTable->WitnessTablePrivateSizeInWords == 0 &&
genericTable->WitnessTableSizeInWords ==
- genericTable->Protocol->NumRequirements) {
+ (genericTable->Protocol->NumRequirements +
+ WitnessTableFirstRequirementOffset)) {
return true;
}
@@ -2770,11 +2771,13 @@
// The number of mandatory requirements, i.e. requirements lacking
// default implementations.
- size_t numMandatoryRequirements = protocol->NumMandatoryRequirements;
+ size_t numMandatoryRequirements =
+ protocol->NumMandatoryRequirements + WitnessTableFirstRequirementOffset;
assert(numPatternWitnesses >= numMandatoryRequirements);
// The total number of requirements.
- size_t numRequirements = protocol->NumRequirements;
+ size_t numRequirements =
+ protocol->NumRequirements + WitnessTableFirstRequirementOffset;
assert(numPatternWitnesses <= numRequirements);
// Number of bytes for any private storage used by the conformance itself.
@@ -2807,7 +2810,9 @@
// Fill in any default requirements.
for (size_t i = numPatternWitnesses, e = numRequirements; i < e; ++i) {
- void *defaultImpl = requirements[i].DefaultImplementation.get();
+ size_t requirementIndex = i - WitnessTableFirstRequirementOffset;
+ void *defaultImpl =
+ requirements[requirementIndex].DefaultImplementation.get();
assert(defaultImpl &&
"no default implementation for missing requirement");
table[i] = defaultImpl;
diff --git a/stdlib/public/runtime/MetadataLookup.cpp b/stdlib/public/runtime/MetadataLookup.cpp
index c8348f0..cac6f8c 100644
--- a/stdlib/public/runtime/MetadataLookup.cpp
+++ b/stdlib/public/runtime/MetadataLookup.cpp
@@ -383,7 +383,7 @@
continue;
if (currentAssocTypeIdx == matchingAssocTypeIdx)
- return reqIdx;
+ return reqIdx + WitnessTableFirstRequirementOffset;
++currentAssocTypeIdx;
}
diff --git a/test/ClangImporter/import-as-member-objc.swift b/test/ClangImporter/import-as-member-objc.swift
new file mode 100644
index 0000000..78f93e6
--- /dev/null
+++ b/test/ClangImporter/import-as-member-objc.swift
@@ -0,0 +1,13 @@
+// RUN: %target-swift-frontend(mock-sdk: %clang-importer-sdk) -typecheck -I %S/../IDE/Inputs/custom-modules %s -verify
+// REQUIRES: objc_interop
+
+import ImportAsMember.Class
+
+func doIt(s: SomeClass) {
+ s.doIt()
+}
+
+// Make sure we can't find doIt() via dynamic lookup.
+func doItDynamic(s: AnyObject) {
+ s.doIt() // expected-error {{value of type 'AnyObject' has no member 'doIt'}}
+}
diff --git a/test/ClangImporter/macros.swift b/test/ClangImporter/macros.swift
index 370f8ae..73202a0 100644
--- a/test/ClangImporter/macros.swift
+++ b/test/ClangImporter/macros.swift
@@ -172,3 +172,9 @@
_ = RECURSION_IN_EXPR2 // expected-error {{use of unresolved identifier 'RECURSION_IN_EXPR2'}}
_ = RECURSION_IN_EXPR3 // expected-error {{use of unresolved identifier 'RECURSION_IN_EXPR3'}}
}
+
+func testNulls() {
+ let _: Int = UNAVAILABLE_ONE // expected-error {{use of unresolved identifier 'UNAVAILABLE_ONE'}}
+ let _: Int = DEPRECATED_ONE // expected-error {{use of unresolved identifier 'DEPRECATED_ONE'}}
+ let _: Int = OKAY_TYPED_ONE // expected-error {{cannot convert value of type 'okay_t' (aka 'UInt32') to specified type 'Int'}}
+}
diff --git a/test/IDE/Inputs/custom-modules/ImportAsMemberClass.h b/test/IDE/Inputs/custom-modules/ImportAsMemberClass.h
index b93b166..cc23e81 100644
--- a/test/IDE/Inputs/custom-modules/ImportAsMemberClass.h
+++ b/test/IDE/Inputs/custom-modules/ImportAsMemberClass.h
@@ -19,6 +19,9 @@
void IAMSomeClassApplyOptions(IAMSomeClass * _Nonnull someClass,
IAMSomeClassOptions options);
+__attribute__((swift_name("SomeClass.doIt(self:)")))
+void IAMSomeClassDoIt(IAMSomeClass * _Nonnull someClass);
+
@interface UnavailableDefaultInit : NSObject
-(instancetype)init __attribute__((availability(swift,unavailable)));
@end
diff --git a/test/IDE/import_as_member_objc.swift b/test/IDE/import_as_member_objc.swift
index c254ba6..429c5a9 100644
--- a/test/IDE/import_as_member_objc.swift
+++ b/test/IDE/import_as_member_objc.swift
@@ -8,6 +8,7 @@
// PRINT-CLASS-NEXT: extension SomeClass {
// PRINT-CLASS-NEXT: /*not inherited*/ init(value x: Double)
// PRINT-CLASS-NEXT: func applyOptions(_ options: SomeClass.Options)
+// PRINT-CLASS-NEXT: func doIt()
// PRINT-CLASS-NEXT: struct Options : OptionSet {
// PRINT-CLASS-NEXT: init(rawValue rawValue: Int)
// PRINT-CLASS-NEXT: let rawValue: Int
diff --git a/test/IRGen/Mirror-LoadableByAddress-failure.swift b/test/IRGen/Mirror-LoadableByAddress-failure.swift
new file mode 100644
index 0000000..56eca79
--- /dev/null
+++ b/test/IRGen/Mirror-LoadableByAddress-failure.swift
@@ -0,0 +1,23 @@
+// Check that we don't crash when we verify after every pass.
+// RUN: %empty-directory(%t)
+//
+// RUN: %target-swift-frontend %s -I %S/../stdlib/Inputs/Mirror/ -o %t/Mirror \
+// RUN: -emit-ir -sil-verify-all -o /dev/null
+
+class A : CustomReflectable {
+ var a: Int = 1
+ var customMirror: Mirror {
+ return Mirror(self, children: ["aye": a])
+ }
+}
+class X : A {}
+class Y : X {}
+class B : Y {
+ var b: UInt = 42
+ override var customMirror: Mirror {
+ return Mirror(
+ self,
+ children: ["bee": b],
+ ancestorRepresentation: .customized({ super.customMirror }))
+ }
+}
diff --git a/test/IRGen/associated_type_witness.swift b/test/IRGen/associated_type_witness.swift
index e37c649..2470814 100644
--- a/test/IRGen/associated_type_witness.swift
+++ b/test/IRGen/associated_type_witness.swift
@@ -24,12 +24,13 @@
// Witness table access functions for Universal : P and Universal : Q.
// CHECK-LABEL: define hidden i8** @"$S23associated_type_witness9UniversalVAA1PAAWa"()
-// CHECK: ret i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S23associated_type_witness9UniversalVAA1PAAWP", i32 0, i32 0)
+// CHECK: ret i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S23associated_type_witness9UniversalVAA1PAAWP", i32 0, i32 0)
// CHECK-LABEL: define hidden i8** @"$S23associated_type_witness9UniversalVAA1QAAWa"()
-// CHECK: ret i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S23associated_type_witness9UniversalVAA1QAAWP", i32 0, i32 0)
+// CHECK: ret i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S23associated_type_witness9UniversalVAA1QAAWP", i32 0, i32 0)
// Witness table for WithUniversal : Assocked.
-// GLOBAL-LABEL: @"$S23associated_type_witness13WithUniversalVAA8AssockedAAWP" = hidden constant [3 x i8*] [
+// GLOBAL-LABEL: @"$S23associated_type_witness13WithUniversalVAA8AssockedAAWP" = hidden constant [4 x i8*] [
+// GLOBAL-SAME: @"$S23associated_type_witness13WithUniversalVAA8AssockedAAMc"
// GLOBAL-SAME: i8* bitcast (%swift.type* ()* @"$S23associated_type_witness9UniversalVMa" to i8*)
// GLOBAL-SAME: i8* bitcast (i8** ()* @"$S23associated_type_witness9UniversalVAA1PAAWa" to i8*)
// GLOBAL-SAME: i8* bitcast (i8** ()* @"$S23associated_type_witness9UniversalVAA1QAAWa" to i8*)
@@ -39,7 +40,8 @@
}
// Witness table for GenericWithUniversal : Assocked.
-// GLOBAL-LABEL: @"$S23associated_type_witness20GenericWithUniversalVyxGAA8AssockedAAWP" = hidden constant [3 x i8*] [
+// GLOBAL-LABEL: @"$S23associated_type_witness20GenericWithUniversalVyxGAA8AssockedAAWP" = hidden constant [4 x i8*] [
+// GLOBAL-SAME: @"$S23associated_type_witness20GenericWithUniversalVyxGAA8AssockedAAMc"
// GLOBAL-SAME: i8* bitcast (%swift.type* ()* @"$S23associated_type_witness9UniversalVMa" to i8*)
// GLOBAL-SAME: i8* bitcast (i8** ()* @"$S23associated_type_witness9UniversalVAA1PAAWa" to i8*)
// GLOBAL-SAME: i8* bitcast (i8** ()* @"$S23associated_type_witness9UniversalVAA1QAAWa" to i8*)
@@ -49,7 +51,8 @@
}
// Witness table for Fulfilled : Assocked.
-// GLOBAL-LABEL: @"$S23associated_type_witness9FulfilledVyxGAA8AssockedAAWP" = hidden constant [3 x i8*] [
+// GLOBAL-LABEL: @"$S23associated_type_witness9FulfilledVyxGAA8AssockedAAWP" = hidden constant [4 x i8*] [
+// GLOBAL-SAME: @"$S23associated_type_witness9FulfilledVyxGAA8AssockedAAMc"
// GLOBAL-SAME: i8* bitcast (%swift.type* (%swift.type*, i8**)* @"$S23associated_type_witness9FulfilledVyxGAA8AssockedAA5AssocWt" to i8*)
// GLOBAL-SAME: i8* bitcast (i8** (%swift.type*, %swift.type*, i8**)* @"$S23associated_type_witness9FulfilledVyxGAA8AssockedAA5Assoc_AA1PPWT" to i8*)
// GLOBAL-SAME: i8* bitcast (i8** (%swift.type*, %swift.type*, i8**)* @"$S23associated_type_witness9FulfilledVyxGAA8AssockedAA5Assoc_AA1QPWT" to i8*)
@@ -82,20 +85,22 @@
struct Pair<T, U> : P, Q {}
// Generic witness table pattern for Computed : Assocked.
-// GLOBAL-LABEL: @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAWP" = hidden constant [3 x i8*] [
+// GLOBAL-LABEL: @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAWP" = hidden constant [4 x i8*] [
+// GLOBAL-SAME: @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAMc"
// GLOBAL-SAME: i8* bitcast (%swift.type* (%swift.type*, i8**)* @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAA5AssocWt" to i8*)
// GLOBAL-SAME: i8* bitcast (i8** (%swift.type*, %swift.type*, i8**)* @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAA5Assoc_AA1PPWT" to i8*)
// GLOBAL-SAME: i8* bitcast (i8** (%swift.type*, %swift.type*, i8**)* @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAA5Assoc_AA1QPWT" to i8*)
// GLOBAL-SAME: ]
// Generic witness table cache for Computed : Assocked.
// GLOBAL-LABEL: @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAWG" = internal constant %swift.generic_witness_table_cache {
-// GLOBAL-SAME: i16 3,
+// GLOBAL-SAME: i16 4,
// GLOBAL-SAME: i16 1,
// Relative reference to protocol
// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint (%swift.protocol* @"$S23associated_type_witness8AssockedMp" to i64), i64 ptrtoint (i32* getelementptr inbounds (%swift.generic_witness_table_cache, %swift.generic_witness_table_cache* @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAWG", i32 0, i32 2) to i64)) to i32
// Relative reference to witness table template
-// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint ([3 x i8*]* @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAWP" to i64), i64 ptrtoint (i32* getelementptr inbounds (%swift.generic_witness_table_cache, %swift.generic_witness_table_cache* @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAWG", i32 0, i32 3) to i64)) to i32),
+// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint ([4 x i8*]* @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAWP" to i64), i64 ptrtoint (i32* getelementptr inbounds (%swift.generic_witness_table_cache, %swift.generic_witness_table_cache* @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAWG", i32 0, i32 3) to i64)) to i32
+
// No instantiator function
// GLOBAL-SAME: i32 0,
// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint ([16 x i8*]* [[PRIVATE:@.*]] to i64), i64 ptrtoint (i32* getelementptr inbounds (%swift.generic_witness_table_cache, %swift.generic_witness_table_cache* @"$S23associated_type_witness8ComputedVyxq_GAA8AssockedAAWG", i32 0, i32 5) to i64)) to i32)
@@ -149,15 +154,18 @@
// Generic witness table pattern for GenericComputed : DerivedFromSimpleAssoc.
-// GLOBAL-LABEL: @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAWP" = hidden constant [1 x i8*] zeroinitializer
+// GLOBAL-LABEL: @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAWP" = hidden constant [2 x i8*]
+// GLOBAL-SAME: @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAMc"
+// GLOBAL-SAME: i8* null
+
// Generic witness table cache for GenericComputed : DerivedFromSimpleAssoc.
// GLOBAL-LABEL: @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAWG" = internal constant %swift.generic_witness_table_cache {
-// GLOBAL-SAME: i16 1,
+// GLOBAL-SAME: i16 2,
// GLOBAL-SAME: i16 0,
// Relative reference to protocol
-// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint (%swift.protocol* @"$S23associated_type_witness22DerivedFromSimpleAssocMp" to i64),
+// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint (%swift.protocol* @"$S23associated_type_witness22DerivedFromSimpleAssocMp" to i64
// Relative reference to witness table template
-// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint ([1 x i8*]* @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAWP" to i64)
+// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint ([2 x i8*]* @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAWP" to i64
// Relative reference to instantiator function
// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint (void (i8**, %swift.type*, i8**)* @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAWI" to i64), i64 ptrtoint (i32* getelementptr inbounds (%swift.generic_witness_table_cache, %swift.generic_witness_table_cache* @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAWG", i32 0, i32 4) to i64)) to i32)
// GLOBAL-SAME: i32 trunc (i64 sub (i64 ptrtoint ([16 x i8*]* @1 to i64), i64 ptrtoint (i32* getelementptr inbounds (%swift.generic_witness_table_cache, %swift.generic_witness_table_cache* @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAWG", i32 0, i32 5) to i64)) to i32)
@@ -170,7 +178,7 @@
// CHECK-LABEL: define internal void @"$S23associated_type_witness15GenericComputedVyxGAA22DerivedFromSimpleAssocAAWI"(i8**, %swift.type*, i8**)
// CHECK: [[T0:%.*]] = call i8** @"$S23associated_type_witness15GenericComputedVyxGAA14HasSimpleAssocAAWa"(%swift.type* %1, i8*** undef, i64 0)
// CHECK-NEXT: [[T1:%.*]] = bitcast i8** [[T0]] to i8*
-// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8*, i8** %0, i32 0
+// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8*, i8** %0, i32 1
// CHECK-NEXT: store i8* [[T1]], i8** [[T2]], align 8
// CHECK-NEXT: ret void
diff --git a/test/IRGen/associated_types.swift b/test/IRGen/associated_types.swift
index 365cc57..05148bb 100644
--- a/test/IRGen/associated_types.swift
+++ b/test/IRGen/associated_types.swift
@@ -75,26 +75,29 @@
// 1. Get the type metadata for U.RuncerType.Runcee.
// 1a. Get the type metadata for U.RuncerType.
// Note that we actually look things up in T, which is going to prove unfortunate.
-// CHECK: [[T0:%.*]] = load i8*, i8** %T.Runcible,
+// CHECK: [[T0_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.Runcible, i32 1
+// CHECK: [[T0:%.*]] = load i8*, i8** [[T0_GEP]]
// CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to %swift.type* (%swift.type*, i8**)*
// CHECK-NEXT: %T.RuncerType = call %swift.type* [[T1]](%swift.type* %T, i8** %T.Runcible)
// 2. Get the witness table for U.RuncerType.Runcee : Speedy
// 2a. Get the protocol witness table for U.RuncerType : FastRuncer.
-// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8*, i8** %U.FastRuncible, i32 1
+// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8*, i8** %U.FastRuncible, i32 2
// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[T0]],
// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to i8** (%swift.type*, %swift.type*, i8**)*
// CHECK-NEXT: %T.RuncerType.FastRuncer = call i8** [[T2]](%swift.type* %T.RuncerType, %swift.type* %U, i8** %U.FastRuncible)
// 1c. Get the type metadata for U.RuncerType.Runcee.
-// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** %T.RuncerType.FastRuncer
+// CHECK-NEXT: [[T0_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.RuncerType.FastRuncer, i32 1
+// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[T0_GEP]]
// CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to %swift.type* (%swift.type*, i8**)*
// CHECK-NEXT: %T.RuncerType.Runcee = call %swift.type* [[T1]](%swift.type* %T.RuncerType, i8** %T.RuncerType.FastRuncer)
// 2b. Get the witness table for U.RuncerType.Runcee : Speedy.
-// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8*, i8** %T.RuncerType.FastRuncer, i32 1
+// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i8*, i8** %T.RuncerType.FastRuncer, i32 2
// CHECK-NEXT: [[T1:%.*]] = load i8*, i8** [[T0]],
// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to i8** (%swift.type*, %swift.type*, i8**)*
// CHECK-NEXT: %T.RuncerType.Runcee.Speedy = call i8** [[T2]](%swift.type* %T.RuncerType.Runcee, %swift.type* %T.RuncerType, i8** %T.RuncerType.FastRuncer)
// 3. Perform the actual call.
-// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** %T.RuncerType.Runcee.Speedy,
+// CHECK-NEXT: [[T0_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.RuncerType.Runcee.Speedy, i32 1
+// CHECK-NEXT: [[T0:%.*]] = load i8*, i8** [[T0_GEP]]
// CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to void (%swift.type*, %swift.type*, i8**)*
// CHECK-NEXT: call swiftcc void [[T1]](%swift.type* swiftself %T.RuncerType.Runcee, %swift.type* %T.RuncerType.Runcee, i8** %T.RuncerType.Runcee.Speedy)
diff --git a/test/IRGen/class_bounded_generics.swift b/test/IRGen/class_bounded_generics.swift
index 127383b..f69090f 100644
--- a/test/IRGen/class_bounded_generics.swift
+++ b/test/IRGen/class_bounded_generics.swift
@@ -85,14 +85,16 @@
// CHECK-LABEL: define hidden swiftcc void @"$S22class_bounded_generics0a1_B17_archetype_method{{[_0-9a-zA-Z]*}}F"(%objc_object*, %objc_object*, %swift.type* %T, i8** %T.ClassBoundBinary)
func class_bounded_archetype_method<T : ClassBoundBinary>(_ x: T, y: T) {
x.classBoundMethod()
- // CHECK: [[INHERITED:%.*]] = load i8*, i8** %T.ClassBoundBinary, align 8
+ // CHECK: [[INHERITED_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.ClassBoundBinary, i32 1
+ // CHECK: [[INHERITED:%.*]] = load i8*, i8** [[INHERITED_GEP]]
// CHECK: [[INHERITED_WTBL:%.*]] = bitcast i8* [[INHERITED]] to i8**
- // CHECK: [[WITNESS:%.*]] = load i8*, i8** [[INHERITED_WTBL]], align 8
+ // CHECK: [[WITNESS_GEP:%.*]] = getelementptr inbounds i8*, i8** [[INHERITED_WTBL]], i32 1
+ // CHECK: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_GEP]], align 8
// CHECK: [[WITNESS_FUNC:%.*]] = bitcast i8* [[WITNESS]] to void (%objc_object*, %swift.type*, i8**)
// CHECK: call swiftcc void [[WITNESS_FUNC]](%objc_object* swiftself %0, %swift.type* {{.*}}, i8** [[INHERITED_WTBL]])
x.classBoundBinaryMethod(y)
// CHECK: call %objc_object* @swift_unknownRetain(%objc_object* returned [[Y:%.*]])
- // CHECK: [[WITNESS_ENTRY:%.*]] = getelementptr inbounds i8*, i8** %T.ClassBoundBinary, i32 1
+ // CHECK: [[WITNESS_ENTRY:%.*]] = getelementptr inbounds i8*, i8** %T.ClassBoundBinary, i32 2
// CHECK: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_ENTRY]], align 8
// CHECK: [[WITNESS_FUNC:%.*]] = bitcast i8* [[WITNESS]] to void (%objc_object*, %objc_object*, %swift.type*, i8**)
// CHECK: call swiftcc void [[WITNESS_FUNC]](%objc_object* [[Y]], %objc_object* swiftself %0, %swift.type* %T, i8** %T.ClassBoundBinary)
@@ -146,7 +148,7 @@
return x
// CHECK: [[INSTANCE_OPAQUE:%.*]] = bitcast %T22class_bounded_generics13ConcreteClassC* [[INSTANCE:%.*]] to %objc_object*
// CHECK: [[T0:%.*]] = insertvalue { %objc_object*, i8** } undef, %objc_object* [[INSTANCE_OPAQUE]], 0
- // CHECK: [[T1:%.*]] = insertvalue { %objc_object*, i8** } [[T0]], i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S22class_bounded_generics13ConcreteClassCAA0E5BoundAAWP", i32 0, i32 0), 1
+ // CHECK: [[T1:%.*]] = insertvalue { %objc_object*, i8** } [[T0]], i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @"$S22class_bounded_generics13ConcreteClassCAA0E5BoundAAWP", i32 0, i32 0), 1
// CHECK: ret { %objc_object*, i8** } [[T1]]
}
@@ -154,7 +156,8 @@
func class_bounded_protocol_method(_ x: ClassBound) {
x.classBoundMethod()
// CHECK: [[METADATA:%.*]] = call %swift.type* @swift_getObjectType(%objc_object* %0)
- // CHECK: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_TABLE:%.*]], align 8
+ // CHECK: [[WITNESS_GEP:%.*]] = getelementptr inbounds i8*, i8** [[WITNESS_TABLE:%.*]], i32 1
+ // CHECK: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_GEP]], align 8
// CHECK: [[WITNESS_FN:%.*]] = bitcast i8* [[WITNESS]] to void (%objc_object*, %swift.type*, i8**)
// CHECK: call swiftcc void [[WITNESS_FN]](%objc_object* swiftself %0, %swift.type* [[METADATA]], i8** [[WITNESS_TABLE]])
}
diff --git a/test/IRGen/conformance_access_path.swift b/test/IRGen/conformance_access_path.swift
index 02b41ff..e3b9eb6 100644
--- a/test/IRGen/conformance_access_path.swift
+++ b/test/IRGen/conformance_access_path.swift
@@ -14,10 +14,11 @@
public func tested() {}
// CHECK-LABEL: define{{.*}}$S23conformance_access_path11ValidatablePAAE6tested2byyqd__m_t9InputTypeQyd__RszAA15ValidationSuiteRd__lF
- public func tested<S: ValidationSuite>(by suite: S.Type) where S.InputType == Self {
- // CHECK: [[S_AS_VALIDATION_SUITE:%[0-9]+]] = load i8*, i8** %S.ValidationSuite
+ public func tested<S: ValidationSuite>(by suite: S.Type) where S.InputType == Self {
+ // CHECK: [[S_AS_VALIDATION_SUITE_GEP:%[0-9]+]] = getelementptr inbounds i8*, i8** %S.ValidationSuite, i32 1
+ // CHECK: [[S_AS_VALIDATION_SUITE:%[0-9]+]] = load i8*, i8** [[S_AS_VALIDATION_SUITE_GEP]]
// CHECK-NEXT: [[S_VALIDATOR_BASE:%.*]] = bitcast i8* [[S_AS_VALIDATION_SUITE]] to i8**
- // CHECK-NEXT: [[S_VALIDATABLE_ADDR:%[0-9]+]] = getelementptr inbounds i8*, i8** [[S_VALIDATOR_BASE]], i32 1
+ // CHECK-NEXT: [[S_VALIDATABLE_ADDR:%[0-9]+]] = getelementptr inbounds i8*, i8** [[S_VALIDATOR_BASE]], i32 2
// CHECK-NEXT: [[S_VALIDATABLE_FN_RAW:%[0-9]+]] = load i8*, i8** [[S_VALIDATABLE_ADDR]]
// CHECK-NEXT: [[S_VALIDATABLE_FN:%[0-9]+]] = bitcast i8* [[S_VALIDATABLE_FN_RAW]] to i8** (%swift.type*, %swift.type*, i8**)*
// CHECK-NEXT: call i8** [[S_VALIDATABLE_FN]](%swift.type* %Self, %swift.type* %S, i8** %S.Validator)
diff --git a/test/IRGen/enum_resilience.swift b/test/IRGen/enum_resilience.swift
index e177cbd..7cef5df 100644
--- a/test/IRGen/enum_resilience.swift
+++ b/test/IRGen/enum_resilience.swift
@@ -263,3 +263,16 @@
// CHECK-LABEL: define{{( protected)?}} private void @initialize_metadata_EnumWithResilientPayload(i8*)
// CHECK: call void @swift_initEnumMetadataMultiPayload(%swift.type* {{.*}}, [[INT]] 256, [[INT]] 2, i8*** {{.*}})
+
+
+
+public protocol Prot {
+}
+
+private enum ProtGenEnumWithSize<T: Prot> {
+ case c1(s1: Size)
+ case c2(s2: Size)
+}
+
+// CHECK-LABEL: define{{( protected)?}} internal %T15enum_resilience19ProtGenEnumWithSize33_59077B69D65A4A3BEE0C93708067D5F0LLO* @"$S15enum_resilienceytWh2_"(%T15enum_resilience19ProtGenEnumWithSize
+// CHECK: ret %T15enum_resilience19ProtGenEnumWithSize33_59077B69D65A4A3BEE0C93708067D5F0LLO* %0
\ No newline at end of file
diff --git a/test/IRGen/generic_metatypes.swift b/test/IRGen/generic_metatypes.swift
index 9319647..5f9800a 100644
--- a/test/IRGen/generic_metatypes.swift
+++ b/test/IRGen/generic_metatypes.swift
@@ -69,7 +69,7 @@
// CHECK-LABEL: define hidden swiftcc { %swift.type*, i8** } @"$S17generic_metatypes15metatypeErasureyAA3Bas_pXpAA4ZangCmF"(%swift.type*) #0
func metatypeErasure(_ z: Zang.Type) -> Bas.Type {
// CHECK: [[RET:%.*]] = insertvalue { %swift.type*, i8** } undef, %swift.type* %0, 0
- // CHECK: [[RET2:%.*]] = insertvalue { %swift.type*, i8** } [[RET]], i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S17generic_metatypes4ZangCAA3BasAAWP", i32 0, i32 0), 1
+ // CHECK: [[RET2:%.*]] = insertvalue { %swift.type*, i8** } [[RET]], i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S17generic_metatypes4ZangCAA3BasAAWP", i32 0, i32 0), 1
// CHECK: ret { %swift.type*, i8** } [[RET2]]
return z
}
diff --git a/test/IRGen/generic_metatypes_arm.swift b/test/IRGen/generic_metatypes_arm.swift
index 6d1ef34..1f2d6d0 100644
--- a/test/IRGen/generic_metatypes_arm.swift
+++ b/test/IRGen/generic_metatypes_arm.swift
@@ -67,7 +67,7 @@
// CHECK-LABEL: define hidden swiftcc { %swift.type*, i8** } @"$S17generic_metatypes15metatypeErasureyAA3Bas_pXpAA4ZangCmF"(%swift.type*) #0
func metatypeErasure(_ z: Zang.Type) -> Bas.Type {
// CHECK: [[RET:%.*]] = insertvalue { %swift.type*, i8** } undef, %swift.type* %0, 0
- // CHECK: [[RET2:%.*]] = insertvalue { %swift.type*, i8** } [[RET]], i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S17generic_metatypes4ZangCAA3BasAAWP", i32 0, i32 0), 1
+ // CHECK: [[RET2:%.*]] = insertvalue { %swift.type*, i8** } [[RET]], i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S17generic_metatypes4ZangCAA3BasAAWP", i32 0, i32 0), 1
// CHECK: ret { %swift.type*, i8** } [[RET2]]
return z
}
diff --git a/test/IRGen/generic_structs.sil b/test/IRGen/generic_structs.sil
index 53dcf10..c83ce1a 100644
--- a/test/IRGen/generic_structs.sil
+++ b/test/IRGen/generic_structs.sil
@@ -245,17 +245,20 @@
// CHECK: [[METADATA:%.*]] = call %swift.type* @swift_allocateGenericValueMetadata
-// CHECK: [[T0:%.*]] = load i8*, i8** %T.ParentHasAssociatedType
+// CHECK: [[T0_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.ParentHasAssociatedType, i32 1
+// CHECK: [[T0:%.*]] = load i8*, i8** [[T0_GEP]]
// CHECK: [[T1:%.*]] = bitcast i8* [[T0]] to i8**
-// CHECK: [[T2:%.*]] = load i8*, i8** [[T1]], align 8, !invariant.load
+// CHECK: [[T2_GEP:%.*]] = getelementptr inbounds i8*, i8** [[T1]], i32 1
+// CHECK: [[T2:%.*]] = load i8*, i8** [[T2_GEP]], align 8, !invariant.load
// CHECK: [[T3:%.*]] = bitcast i8* [[T2]] to %swift.type*
// CHECK: %T.Assoc = call %swift.type* [[T3]](%swift.type* %T, i8** [[T1]])
-// CHECK: [[T0:%.*]] = getelementptr inbounds i8*, i8** %T.ParentHasAssociatedType, i32 2
+// CHECK: [[T0:%.*]] = getelementptr inbounds i8*, i8** %T.ParentHasAssociatedType, i32 3
// CHECK: [[T1:%.*]] = load i8*, i8** [[T0]],
// CHECK: [[T2:%.*]] = bitcast i8* [[T1]] to i8** (%swift.type*, %swift.type*, i8**)*
// CHECK: %T.Assoc.HasAssociatedType = call i8** [[T2]](%swift.type* %T.Assoc, %swift.type* %T, i8** %T.ParentHasAssociatedType)
-// CHECK: [[T0:%.*]] = load i8*, i8** %T.Assoc.HasAssociatedType,
+// CHECK: [[T0_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.Assoc.HasAssociatedType, i32 1
+// CHECK: [[T0:%.*]] = load i8*, i8** [[T0_GEP]]
// CHECK: [[T1:%.*]] = bitcast i8* [[T0]] to %swift.type* (%swift.type*, i8**)*
// CHECK: %T.Assoc.Assoc = call %swift.type* [[T1]](%swift.type* %T.Assoc, i8** %T.Assoc.HasAssociatedType)
diff --git a/test/IRGen/generic_types.swift b/test/IRGen/generic_types.swift
index cd1e8b1..f071062 100644
--- a/test/IRGen/generic_types.swift
+++ b/test/IRGen/generic_types.swift
@@ -24,7 +24,7 @@
// CHECK-objc-SAME: %swift.opaque* @_objc_empty_cache,
// CHECK-SAME: %swift.opaque* null,
// CHECK-SAME: i64 {{1|2}},
-// CHECK-SAME: i32 2,
+// CHECK-SAME: i32 {{3|2}},
// CHECK-SAME: i32 0,
// CHECK-SAME: i32 24,
// CHECK-SAME: i16 7,
@@ -53,7 +53,7 @@
// CHECK-objc-SAME: %swift.opaque* @_objc_empty_cache,
// CHECK-SAME: %swift.opaque* null,
// CHECK-SAME: i64 {{1|2}},
-// CHECK-SAME: i32 2,
+// CHECK-SAME: i32 {{3|2}},
// CHECK-SAME: i32 0,
// CHECK-SAME: i32 24,
// CHECK-SAME: i16 7,
@@ -75,7 +75,7 @@
// CHECK-objc-SAME: %swift.opaque* @_objc_empty_cache,
// CHECK-SAME: %swift.opaque* null,
// CHECK-SAME: i64 {{1|2}},
-// CHECK-SAME: i32 2,
+// CHECK-SAME: i32 {{3|2}},
// CHECK-SAME: i32 0,
// CHECK-SAME: i32 24,
// CHECK-SAME: i16 7,
@@ -97,7 +97,7 @@
// CHECK-objc-SAME: %swift.opaque* @_objc_empty_cache,
// CHECK-SAME: %swift.opaque* null,
// CHECK-SAME: i64 {{1|2}},
-// CHECK-SAME: i32 2,
+// CHECK-SAME: i32 {{3|2}},
// CHECK-SAME: i32 0,
// CHECK-SAME: i32 24,
// CHECK-SAME: i16 7,
diff --git a/test/IRGen/ivar_destroyer.sil b/test/IRGen/ivar_destroyer.sil
index 8b7ef4d..f1ef5c9 100644
--- a/test/IRGen/ivar_destroyer.sil
+++ b/test/IRGen/ivar_destroyer.sil
@@ -15,7 +15,7 @@
// \ CHECK: [[OPAQUE]]* @_objc_empty_cache,
// \ CHECK: [[OPAQUE]]* null,
// \ CHECK: i64 add (i64 ptrtoint ({{.*}}* @_DATA__TtC14ivar_destroyer17NonTrivialDerived to i64), i64 {{1|2}}),
-// \ CHECK: i32 2,
+// \ CHECK: i32 {{3|2}},
// \ CHECK: i32 0,
// \ CHECK: i32 24,
// \ CHECK: i16 7,
diff --git a/test/IRGen/objc_attr_NSManaged.sil b/test/IRGen/objc_attr_NSManaged.sil
index 0523079..5f907a6 100644
--- a/test/IRGen/objc_attr_NSManaged.sil
+++ b/test/IRGen/objc_attr_NSManaged.sil
@@ -27,7 +27,7 @@
// The getter/setter should not show up in the Swift metadata.
/* FIXME: sil_vtable parser picks the wrong 'init' overload. Both vtable entries
ought to be nonnull here. rdar://problem/19572342 */
-// CHECK: @"$S19objc_attr_NSManaged10SwiftGizmoCMf" = internal global <{ {{.*}} }> <{ void (%T19objc_attr_NSManaged10SwiftGizmoC*)* @"$S19objc_attr_NSManaged10SwiftGizmoCfD", i8** @"$SBOWV", i64 ptrtoint (%objc_class* @"OBJC_METACLASS_$__TtC19objc_attr_NSManaged10SwiftGizmo" to i64), %objc_class* @"OBJC_CLASS_$_Gizmo", %swift.opaque* @_objc_empty_cache, %swift.opaque* null, i64 add (i64 ptrtoint ({ i32, i32, i32, i32, i8*, i8*, { i32, i32, [2 x { i8*, i8*, i8* }] }*, i8*, i8*, i8*, { i32, i32, [1 x { i8*, i8* }] }* }* @_DATA__TtC19objc_attr_NSManaged10SwiftGizmo to i64), i64 {{1|2}}), i32 0, i32 0, i32 8, i16 7, i16 0, i32 112, i32 16, {{.*}}* @"$S19objc_attr_NSManaged10SwiftGizmoCMn", i8* null, %T19objc_attr_NSManaged10SwiftGizmoC* (i64, %T19objc_attr_NSManaged10SwiftGizmoC*)* @"$S19objc_attr_NSManaged10SwiftGizmoC7bellsOnACSi_tcfc", i8* bitcast (void ()* @swift_deletedMethodError to i8*) }>
+// CHECK: @"$S19objc_attr_NSManaged10SwiftGizmoCMf" = internal global <{ {{.*}} }> <{ void (%T19objc_attr_NSManaged10SwiftGizmoC*)* @"$S19objc_attr_NSManaged10SwiftGizmoCfD", i8** @"$SBOWV", i64 ptrtoint (%objc_class* @"OBJC_METACLASS_$__TtC19objc_attr_NSManaged10SwiftGizmo" to i64), %objc_class* @"OBJC_CLASS_$_Gizmo", %swift.opaque* @_objc_empty_cache, %swift.opaque* null, i64 add (i64 ptrtoint ({ i32, i32, i32, i32, i8*, i8*, { i32, i32, [2 x { i8*, i8*, i8* }] }*, i8*, i8*, i8*, { i32, i32, [1 x { i8*, i8* }] }* }* @_DATA__TtC19objc_attr_NSManaged10SwiftGizmo to i64), i64 {{1|2}}), i32 {{1|0}}, i32 0, i32 8, i16 7, i16 0, i32 112, i32 16, {{.*}}* @"$S19objc_attr_NSManaged10SwiftGizmoCMn", i8* null, %T19objc_attr_NSManaged10SwiftGizmoC* (i64, %T19objc_attr_NSManaged10SwiftGizmoC*)* @"$S19objc_attr_NSManaged10SwiftGizmoC7bellsOnACSi_tcfc", i8* bitcast (void ()* @swift_deletedMethodError to i8*) }>
@objc class SwiftGizmo : Gizmo {
@objc @NSManaged var x: X
diff --git a/test/IRGen/objc_protocols.swift b/test/IRGen/objc_protocols.swift
index 6676c8b..0636891 100644
--- a/test/IRGen/objc_protocols.swift
+++ b/test/IRGen/objc_protocols.swift
@@ -11,7 +11,7 @@
// -- Protocol "Frungible" inherits only objc protocols and should have no
// out-of-line inherited witnesses in its witness table.
-// CHECK: [[ZIM_FRUNGIBLE_WITNESS:@"\$S14objc_protocols3ZimCAA9FrungibleAAWP"]] = hidden constant [1 x i8*] [
+// CHECK: [[ZIM_FRUNGIBLE_WITNESS:@"\$S14objc_protocols3ZimCAA9FrungibleAAWP"]] = hidden constant [2 x i8*] [
// CHECK: i8* bitcast (void (%T14objc_protocols3ZimC*, %swift.type*, i8**)* @"$S14objc_protocols3ZimCAA9FrungibleA2aDP6frungeyyFTW" to i8*)
// CHECK: ]
@@ -125,7 +125,7 @@
func mixed_heritage_erasure(_ x: Zim) -> Frungible {
return x
// CHECK: [[T0:%.*]] = insertvalue { %objc_object*, i8** } undef, %objc_object* {{%.*}}, 0
- // CHECK: insertvalue { %objc_object*, i8** } [[T0]], i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* [[ZIM_FRUNGIBLE_WITNESS]], i32 0, i32 0), 1
+ // CHECK: insertvalue { %objc_object*, i8** } [[T0]], i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* [[ZIM_FRUNGIBLE_WITNESS]], i32 0, i32 0), 1
}
// CHECK-LABEL: define hidden swiftcc void @"$S14objc_protocols0A8_generic{{[_0-9a-zA-Z]*}}F"(%objc_object*, %swift.type* %T) {{.*}} {
diff --git a/test/IRGen/open_boxed_existential.sil b/test/IRGen/open_boxed_existential.sil
index 9143525..4b4d8e9 100644
--- a/test/IRGen/open_boxed_existential.sil
+++ b/test/IRGen/open_boxed_existential.sil
@@ -15,7 +15,7 @@
// CHECK: [[OUT_WITNESS:%.*]] = getelementptr inbounds {{.*}} [[OUT]], i32 0, i32 2
// CHECK: [[WITNESS:%.*]] = load {{.*}} [[OUT_WITNESS]]
%o = open_existential_box %b : $Error to $*@opened("01234567-89AB-CDEF-0123-000000000000") Error
- // CHECK: [[CODE_ADDR:%.*]] = getelementptr {{.*}} [[WITNESS]], i32 1
+ // CHECK: [[CODE_ADDR:%.*]] = getelementptr {{.*}} [[WITNESS]], i32 2
// CHECK: [[CODE:%.*]] = load {{.*}} [[CODE_ADDR]]
%m = witness_method $@opened("01234567-89AB-CDEF-0123-000000000000") Error, #Error._code!getter.1, %o : $*@opened("01234567-89AB-CDEF-0123-000000000000") Error : $@convention(witness_method: Error) <Self: Error> (@in_guaranteed Self) -> Int
// CHECK: [[CODE_FN:%.*]] = bitcast i8* [[CODE]] to [[INT:i[0-9]+]] (%swift.opaque*, %swift.type*, i8**)*
diff --git a/test/IRGen/partial_apply_forwarder.sil b/test/IRGen/partial_apply_forwarder.sil
index 8d08d70..0a2982e 100644
--- a/test/IRGen/partial_apply_forwarder.sil
+++ b/test/IRGen/partial_apply_forwarder.sil
@@ -27,7 +27,7 @@
// CHECK-LABEL: define internal swiftcc %T23partial_apply_forwarder1DCyAA1CCG* @"$S23unspecialized_uncurriedTA"(%swift.refcounted*
// CHECK: [[TYPE:%.*]] = call %swift.type* @"$S23partial_apply_forwarder1CCMa"()
// CHECK: [[CAST:%.*]] = bitcast %swift.refcounted* %0 to %T23partial_apply_forwarder1EC*
-// CHECK: [[CALL:%.*]] = call swiftcc %T23partial_apply_forwarder1DC* @unspecialized_uncurried(%swift.type* [[TYPE]], i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S23partial_apply_forwarder1CCAA1PAAWP", i32 0, i32 0), %T23partial_apply_forwarder1EC* swiftself [[CAST]])
+// CHECK: [[CALL:%.*]] = call swiftcc %T23partial_apply_forwarder1DC* @unspecialized_uncurried(%swift.type* [[TYPE]], i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S23partial_apply_forwarder1CCAA1PAAWP", i32 0, i32 0), %T23partial_apply_forwarder1EC* swiftself [[CAST]])
sil hidden @specialized_curried : $@convention(thin) (@owned E) -> @owned @callee_owned () -> @owned D<C> {
bb0(%0 : $E):
diff --git a/test/IRGen/protocol_conformance_records.swift b/test/IRGen/protocol_conformance_records.swift
index d9af700..ade7fc6 100644
--- a/test/IRGen/protocol_conformance_records.swift
+++ b/test/IRGen/protocol_conformance_records.swift
@@ -8,13 +8,6 @@
func runce()
}
-// CHECK-LABEL: @"\01l_protocols" = private constant [
-
-// CHECK: %swift.protocolref {
-// CHECK-SAME: @"$S28protocol_conformance_records8RuncibleMp"
-// CHECK-SAME: %swift.protocolref {
-// CHECK-SAME: @"$S28protocol_conformance_records5SpoonMp"
-
// CHECK-LABEL: @"$S28protocol_conformance_records15NativeValueTypeVAA8RuncibleAAMc" ={{ protected | }}constant %swift.protocol_conformance_descriptor {
// -- protocol descriptor
// CHECK-SAME: [[RUNCIBLE:@"\$S28protocol_conformance_records8RuncibleMp"]]
@@ -119,6 +112,13 @@
// CHECK-SAME: }
extension Int : OtherResilientProtocol { }
+// CHECK-LABEL: @"\01l_protocols" = private constant [
+
+// CHECK: %swift.protocolref {
+// CHECK-SAME: @"$S28protocol_conformance_records8RuncibleMp"
+// CHECK-SAME: %swift.protocolref {
+// CHECK-SAME: @"$S28protocol_conformance_records5SpoonMp"
+
// CHECK-LABEL: @"\01l_protocol_conformances" = private constant
// CHECK-SAME: @"$S28protocol_conformance_records15NativeValueTypeVAA8RuncibleAAMc"
// CHECK-SAME: @"$S28protocol_conformance_records15NativeClassTypeCAA8RuncibleAAMc"
@@ -127,3 +127,5 @@
// CHECK-SAME: @"$S28protocol_conformance_records17NativeGenericTypeVyxGAA5SpoonA2aERzlMc"
// CHECK-SAME: @"$SSi18resilient_protocol22OtherResilientProtocol0B20_conformance_recordsMc"
+
+
diff --git a/test/IRGen/protocol_metadata.swift b/test/IRGen/protocol_metadata.swift
index 75cbf87..184db19 100644
--- a/test/IRGen/protocol_metadata.swift
+++ b/test/IRGen/protocol_metadata.swift
@@ -90,6 +90,7 @@
var instance: Assoc { get set }
static var global: Assoc { get set }
}
+
// CHECK: [[COMPREHENSIVE_REQTS:@.*]] = internal unnamed_addr constant [11 x %swift.protocol_requirement]
// CHECK-SAME: [%swift.protocol_requirement { i32 6, i32 0 },
// CHECK-SAME: %swift.protocol_requirement { i32 7, i32 0 },
@@ -103,6 +104,15 @@
// CHECK-SAME: %swift.protocol_requirement { i32 4, i32 0 },
// CHECK-SAME: %swift.protocol_requirement { i32 5, i32 0 }]
+// CHECK: [[COMPREHENSIVE_ASSOC_NAME:@.*]] = private constant [6 x i8] c"Assoc\00"
+
+// CHECK: @"$S17protocol_metadata13ComprehensiveMp" = hidden constant %swift.protocol
+// CHECK-SAME: i32 72, i32 7, i16 11, i16 11,
+// CHECK-SAME: [11 x %swift.protocol_requirement]* [[COMPREHENSIVE_REQTS]]
+// CHECK-SAME: i32 0
+// CHECK-SAME: i32 trunc
+// CHECK-SAME: [6 x i8]* [[COMPREHENSIVE_ASSOC_NAME]]
+
func reify_metadata<T>(_ x: T) {}
// CHECK: define hidden swiftcc void @"$S17protocol_metadata0A6_types{{[_0-9a-zA-Z]*}}F"
diff --git a/test/IRGen/protocol_resilience.sil b/test/IRGen/protocol_resilience.sil
index 1f2b8bf..ab37060 100644
--- a/test/IRGen/protocol_resilience.sil
+++ b/test/IRGen/protocol_resilience.sil
@@ -59,7 +59,7 @@
// Witness table for conformance with resilient associated type
-// CHECK: @"$S19protocol_resilience26ConformsWithResilientAssocVAA03HaseF0AAWP" = {{(protected )?}}hidden constant [2 x i8*] [
+// CHECK: @"$S19protocol_resilience26ConformsWithResilientAssocVAA03HaseF0AAWP" = {{(protected )?}}hidden constant [3 x i8*] [
// CHECK-SAME: i8* bitcast (%swift.type* ()* @"$S19protocol_resilience23ResilientConformingTypeVMa" to i8*),
// CHECK-SAME: i8* bitcast (i8** ()* @"$S19protocol_resilience23ResilientConformingTypeV010resilient_A005OtherC8ProtocolAAWa" to i8*)
// CHECK-SAME: ]
@@ -90,7 +90,7 @@
// Make sure we can do dynamic dispatch to other protocol requirements
// from a default implementation
- // CHECK-NEXT: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %SelfWitnessTable, i32 4
+ // CHECK-NEXT: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %SelfWitnessTable, i32 5
// CHECK-NEXT: [[WITNESS_FN:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK-NEXT: [[WITNESS:%.*]] = bitcast i8* [[WITNESS_FN]] to void (%swift.opaque*, %swift.type*, i8**)*
// CHECK-NEXT: call swiftcc void [[WITNESS]](%swift.opaque* noalias nocapture swiftself %0, %swift.type* %Self, i8** %SelfWitnessTable)
@@ -130,7 +130,7 @@
// Make sure we can do dynamic dispatch to other protocol requirements
// from a default implementation
- // CHECK-NEXT: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %SelfWitnessTable, i32 7
+ // CHECK-NEXT: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %SelfWitnessTable, i32 8
// CHECK-NEXT: [[WITNESS_FN:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK-NEXT: [[WITNESS:%.*]] = bitcast i8* [[WITNESS_FN]] to void (%swift.type*, %swift.type*, i8**)*
// CHECK-NEXT: call swiftcc void [[WITNESS]](%swift.type* swiftself %0, %swift.type* %Self, i8** %SelfWitnessTable)
@@ -203,7 +203,7 @@
// concrete Self type.
// CHECK-NEXT: [[SELF:%.*]] = bitcast %T19protocol_resilience16ConformingStructV* %0 to %swift.opaque*
- // CHECK-NEXT: call swiftcc void @defaultC(%swift.opaque* noalias nocapture swiftself [[SELF]], %swift.type* bitcast ({{i32|i64}}* {{.*}}) to %swift.type*), i8** getelementptr inbounds ([8 x i8*], [8 x i8*]* @"$S19protocol_resilience16ConformingStructVAA17ResilientProtocolAAWP", i32 0, i32 0))
+ // CHECK-NEXT: call swiftcc void @defaultC(%swift.opaque* noalias nocapture swiftself [[SELF]], %swift.type* bitcast ({{i32|i64}}* {{.*}}) to %swift.type*), i8** getelementptr inbounds ([9 x i8*], [9 x i8*]* @"$S19protocol_resilience16ConformingStructVAA17ResilientProtocolAAWP", i32 0, i32 0))
%fn1 = function_ref @defaultC : $@convention(witness_method: ResilientProtocol) <Self where Self : ResilientProtocol> (@in_guaranteed Self) -> ()
%ignore1 = apply %fn1<ConformingStruct>(%0) : $@convention(witness_method: ResilientProtocol) <Self where Self : ResilientProtocol> (@in_guaranteed Self) -> ()
@@ -224,7 +224,7 @@
// CHECK-NEXT: [[CONTEXT:%.*]] = call noalias %swift.refcounted* @swift_rt_swift_allocObject({{.*}})
// CHECK-NEXT: [[LAYOUT:%.*]] = bitcast %swift.refcounted* [[CONTEXT]] to <{ %swift.refcounted, i8* }>*
// CHECK-NEXT: [[WTABLE:%.*]] = getelementptr inbounds <{ %swift.refcounted, i8* }>, <{ %swift.refcounted, i8* }>* [[LAYOUT]], i32 0, i32 1
- // CHECK-NEXT: store i8* bitcast ([8 x i8*]* @"$S19protocol_resilience16ConformingStructVAA17ResilientProtocolAAWP" to i8*), i8** [[WTABLE]]
+ // CHECK-NEXT: store i8* bitcast ([9 x i8*]* @"$S19protocol_resilience16ConformingStructVAA17ResilientProtocolAAWP" to i8*), i8** [[WTABLE]]
%fn1 = function_ref @defaultC : $@convention(witness_method: ResilientProtocol) <Self where Self : ResilientProtocol> (@in_guaranteed Self) -> ()
%ignore1 = partial_apply %fn1<ConformingStruct>() : $@convention(witness_method: ResilientProtocol) <Self where Self : ResilientProtocol> (@in_guaranteed Self) -> ()
@@ -377,7 +377,7 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: [[ARG:%.*]] = bitcast %T19protocol_resilience26ConformsWithResilientAssocV* %0 to %swift.opaque*
- // CHECK-NEXT: call swiftcc void @doSomethingAssoc(%swift.opaque* noalias nocapture [[ARG]], %swift.type* bitcast ({{i32|i64}}* getelementptr inbounds ({{.*}} @"$S19protocol_resilience26ConformsWithResilientAssocVMf", i32 0, i32 1) to %swift.type*), i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @"$S19protocol_resilience26ConformsWithResilientAssocVAA03HaseF0AAWP", i32 0, i32 0))
+ // CHECK-NEXT: call swiftcc void @doSomethingAssoc(%swift.opaque* noalias nocapture [[ARG]], %swift.type* bitcast ({{i32|i64}}* getelementptr inbounds ({{.*}} @"$S19protocol_resilience26ConformsWithResilientAssocVMf", i32 0, i32 1) to %swift.type*), i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @"$S19protocol_resilience26ConformsWithResilientAssocVAA03HaseF0AAWP", i32 0, i32 0))
%fn = function_ref @doSomethingAssoc : $@convention(thin) <T : HasResilientAssoc> (@in T) -> ()
%ignore = apply %fn<ConformsWithResilientAssoc>(%0) : $@convention(thin) <T : HasResilientAssoc> (@in T) -> ()
@@ -396,4 +396,4 @@
// CHECK-LABEL: define{{( protected)?}} hidden i8** @"$S19protocol_resilience16ConformingStructVAA17ResilientProtocolAAWa"()
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i8** getelementptr inbounds ([8 x i8*], [8 x i8*]* @"$S19protocol_resilience16ConformingStructVAA17ResilientProtocolAAWP", i32 0, i32 0)
+// CHECK-NEXT: ret i8** getelementptr inbounds ([9 x i8*], [9 x i8*]* @"$S19protocol_resilience16ConformingStructVAA17ResilientProtocolAAWP", i32 0, i32 0)
diff --git a/test/IRGen/protocol_resilience_thunks.swift b/test/IRGen/protocol_resilience_thunks.swift
index 0d6c87d..d4417c5 100644
--- a/test/IRGen/protocol_resilience_thunks.swift
+++ b/test/IRGen/protocol_resilience_thunks.swift
@@ -11,7 +11,7 @@
// CHECK: call swiftcc [[INT]] @"$S18resilient_protocol21ResilientBaseProtocolP11requirementSiyFTj"(%swift.opaque* noalias nocapture swiftself %0, %swift.type* %T, i8** %T.ResilientBaseProtocol)
// CHECK: ret void
public func callResilientWitnessMethod<T : ResilientBaseProtocol>(_ value: T) {
- value.requirement()
+ _ = value.requirement()
}
public protocol MyResilientProtocol {
@@ -28,55 +28,56 @@
}
// CHECK-LABEL: define{{( protected)?}} swiftcc void @"$S26protocol_resilience_thunks19MyResilientProtocolP11returnsVoid1xySb_tFTj"(i1, %swift.opaque* noalias nocapture swiftself, %swift.type*, i8**)
-// CHECK: [[WITNESS:%.*]] = load i8*, i8** %3
+// CHECK: [[WITNESS_GEP:%.*]] = getelementptr inbounds i8*, i8** %3, i32 1
+// CHECK: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_GEP]]
// CHECK-NEXT: [[FN:%.*]] = bitcast i8* [[WITNESS]] to void (i1, %swift.opaque*, %swift.type*, i8**)*
// CHECK-NEXT: call swiftcc void [[FN]](i1 %0, %swift.opaque* noalias nocapture swiftself %1, %swift.type* %2, i8** %3)
// CHECK-NEXT: ret void
// CHECK-LABEL: define{{( protected)?}} swiftcc i1 @"$S26protocol_resilience_thunks19MyResilientProtocolP11returnsBoolSbyFTj"(%swift.opaque* noalias nocapture swiftself, %swift.type*, i8**)
-// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %2, i32 1
+// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %2, i32 2
// CHECK-NEXT: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK-NEXT: [[FN:%.*]] = bitcast i8* [[WITNESS]] to i1 (%swift.opaque*, %swift.type*, i8**)*
// CHECK-NEXT: [[RESULT:%.*]] = call swiftcc i1 [[FN]](%swift.opaque* noalias nocapture swiftself %0, %swift.type* %1, i8** %2)
// CHECK-NEXT: ret i1 [[RESULT]]
// CHECK-LABEL: define{{( protected)?}} swiftcc void @"$S26protocol_resilience_thunks19MyResilientProtocolP10returnsAnyypyFTj"(%Any* noalias nocapture sret, %swift.opaque* noalias nocapture swiftself, %swift.type*, i8**)
-// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %3, i32 2
+// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %3, i32 3
// CHECK-NEXT: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK-NEXT: [[FN:%.*]] = bitcast i8* [[WITNESS]] to void (%Any*, %swift.opaque*, %swift.type*, i8**)*
// CHECK-NEXT: call swiftcc void [[FN]](%Any* noalias nocapture sret %0, %swift.opaque* noalias nocapture swiftself %1, %swift.type* %2, i8** %3)
// CHECK-NEXT: ret void
// CHECK-LABEL: define{{( protected)?}} swiftcc void @"$S26protocol_resilience_thunks19MyResilientProtocolP12throwingFuncyyKFTj"(%swift.opaque* noalias nocapture swiftself, %swift.error**{{( swifterror)?}}, %swift.type*, i8**)
-// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %3, i32 3
+// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %3, i32 4
// CHECK-NEXT: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK-NEXT: [[FN:%.*]] = bitcast i8* [[WITNESS]] to void (%swift.opaque*, %swift.error**, %swift.type*, i8**)*
// CHECK-NEXT: call swiftcc void [[FN]](%swift.opaque* noalias nocapture swiftself %0, %swift.error**{{( swifterror)?}} %1, %swift.type* %2, i8** %3)
// CHECK-NEXT: ret void
// CHECK-LABEL: define{{( protected)?}} swiftcc void @"$S26protocol_resilience_thunks19MyResilientProtocolP11genericFuncyqd__qd__lFTj"(%swift.opaque* noalias nocapture sret, %swift.opaque* noalias nocapture, %swift.type*, %swift.opaque* noalias nocapture swiftself, %swift.type*, i8**)
-// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %5, i32 4
+// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %5, i32 5
// CHECK-NEXT: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK-NEXT: [[FN:%.*]] = bitcast i8* [[WITNESS]] to void (%swift.opaque*, %swift.opaque*, %swift.type*, %swift.opaque*, %swift.type*, i8**)*
// CHECK-NEXT: call swiftcc void [[FN]](%swift.opaque* noalias nocapture sret %0, %swift.opaque* noalias nocapture %1, %swift.type* %2, %swift.opaque* noalias nocapture swiftself %3, %swift.type* %4, i8** %5)
// CHECK-NEXT: ret void
// CHECK-LABEL: define{{( protected)?}} swiftcc i1 @"$S26protocol_resilience_thunks19MyResilientProtocolP8propertySbvgTj"(%swift.opaque* noalias nocapture swiftself, %swift.type*, i8**)
-// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %2, i32 5
+// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %2, i32 6
// CHECK-NEXT: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK-NEXT: [[FN:%.*]] = bitcast i8* [[WITNESS]] to i1 (%swift.opaque*, %swift.type*, i8**)*
// CHECK-NEXT: [[RESULT:%.*]] = call swiftcc i1 %5(%swift.opaque* noalias nocapture swiftself %0, %swift.type* %1, i8** %2)
// CHECK-NEXT: ret i1 [[RESULT]]
// CHECK-LABEL: define{{( protected)?}} swiftcc void @"$S26protocol_resilience_thunks19MyResilientProtocolP8propertySbvsTj"(i1, %swift.opaque* nocapture swiftself, %swift.type*, i8**)
-// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %3, i32 6
+// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %3, i32 7
// CHECK-NEXT: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK-NEXT: [[FN:%.*]] = bitcast i8* [[WITNESS]] to void (i1, %swift.opaque*, %swift.type*, i8**)*
// CHECK-NEXT: call swiftcc void [[FN]](i1 %0, %swift.opaque* nocapture swiftself %1, %swift.type* %2, i8** %3)
// CHECK-NEXT: ret void
// CHECK-LABEL: define{{( protected)?}} swiftcc { i8*, {{i32|i64}} } @"$S26protocol_resilience_thunks19MyResilientProtocolP8propertySbvmTj"(i8*, [{{12|24}} x i8]* nocapture dereferenceable({{12|24}}), %swift.opaque* nocapture swiftself, %swift.type*, i8**)
-// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %4, i32 7
+// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %4, i32 8
// CHECK-NEXT: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK-NEXT: [[FN:%.*]] = bitcast i8* [[WITNESS]] to { i8*, [[INT]] } (i8*, [{{12|24}} x i8]*, %swift.opaque*, %swift.type*, i8**)*
// CHECK-NEXT: [[RESULT:%.*]] = call swiftcc { i8*, [[INT]] } [[FN]](i8* %0, [{{12|24}} x i8]* nocapture dereferenceable({{12|24}}) %1, %swift.opaque* nocapture swiftself %2, %swift.type* %3, i8** %4)
diff --git a/test/IRGen/sil_generic_witness_methods.swift b/test/IRGen/sil_generic_witness_methods.swift
index 320104b..d8127b5 100644
--- a/test/IRGen/sil_generic_witness_methods.swift
+++ b/test/IRGen/sil_generic_witness_methods.swift
@@ -15,22 +15,23 @@
// CHECK-LABEL: define hidden swiftcc void @"$S27sil_generic_witness_methods05call_D0{{[_0-9a-zA-Z]*}}F"(%swift.opaque* noalias nocapture, %swift.opaque* noalias nocapture, %swift.type* %T, %swift.type* %U, i8** %T.P)
func call_methods<T: P, U>(_ x: T, y: S, z: U) {
- // CHECK: [[STATIC_METHOD_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.P, i32 1
+ // CHECK: [[STATIC_METHOD_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.P, i32 2
// CHECK: [[STATIC_METHOD_PTR:%.*]] = load i8*, i8** [[STATIC_METHOD_ADDR]], align 8
// CHECK: [[STATIC_METHOD:%.*]] = bitcast i8* [[STATIC_METHOD_PTR]] to void (%swift.type*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[STATIC_METHOD]](%swift.type* swiftself %T, %swift.type* %T, i8** %T.P)
T.concrete_static_method()
- // CHECK: [[CONCRETE_METHOD_PTR:%.*]] = load i8*, i8** %T.P, align 8
+ // CHECK: [[CONCRETE_METHOD_PTR_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.P, i32 1
+ // CHECK: [[CONCRETE_METHOD_PTR:%.*]] = load i8*, i8** [[CONCRETE_METHOD_PTR_GEP]]
// CHECK: [[CONCRETE_METHOD:%.*]] = bitcast i8* [[CONCRETE_METHOD_PTR]] to void (%swift.opaque*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[CONCRETE_METHOD]](%swift.opaque* noalias nocapture swiftself {{%.*}}, %swift.type* %T, i8** %T.P)
x.concrete_method()
- // CHECK: [[GENERIC_METHOD_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.P, i32 2
+ // CHECK: [[GENERIC_METHOD_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.P, i32 3
// CHECK: [[GENERIC_METHOD_PTR:%.*]] = load i8*, i8** [[GENERIC_METHOD_ADDR]], align 8
// CHECK: [[GENERIC_METHOD:%.*]] = bitcast i8* [[GENERIC_METHOD_PTR]] to void (%swift.opaque*, %swift.type*, %swift.opaque*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[GENERIC_METHOD]](%swift.opaque* noalias nocapture {{.*}}, %swift.type* {{.*}} @"$S27sil_generic_witness_methods1SVMf", {{.*}} %swift.opaque* noalias nocapture swiftself {{.*}}, %swift.type* %T, i8** %T.P)
x.generic_method(y)
- // CHECK: [[GENERIC_METHOD_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.P, i32 2
+ // CHECK: [[GENERIC_METHOD_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.P, i32 3
// CHECK: [[GENERIC_METHOD_PTR:%.*]] = load i8*, i8** [[GENERIC_METHOD_ADDR]], align 8
// CHECK: [[GENERIC_METHOD:%.*]] = bitcast i8* [[GENERIC_METHOD_PTR]] to void (%swift.opaque*, %swift.type*, %swift.opaque*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[GENERIC_METHOD]](%swift.opaque* noalias nocapture {{.*}}, %swift.type* %U, %swift.opaque* noalias nocapture swiftself {{.*}}, %swift.type* %T, i8** %T.P)
@@ -43,7 +44,8 @@
// CHECK: [[METADATA:%.*]] = load %swift.type*, %swift.type** [[METADATA_ADDR]], align 8
// CHECK: [[WTABLE_ADDR:%.*]] = getelementptr inbounds %T27sil_generic_witness_methods1PP, %T27sil_generic_witness_methods1PP* [[X]], i32 0, i32 2
// CHECK: [[WTABLE:%.*]] = load i8**, i8*** [[WTABLE_ADDR]], align 8
- // CHECK: [[CONCRETE_METHOD_PTR:%.*]] = load i8*, i8** [[WTABLE]], align 8
+ // CHECK: [[CONCRETE_METHOD_PTR_GEP:%.*]] = getelementptr inbounds i8*, i8** [[WTABLE]], i32 1
+ // CHECK: [[CONCRETE_METHOD_PTR:%.*]] = load i8*, i8** [[CONCRETE_METHOD_PTR_GEP]], align 8
// CHECK: [[CONCRETE_METHOD:%.*]] = bitcast i8* [[CONCRETE_METHOD_PTR]] to void (%swift.opaque*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[CONCRETE_METHOD]](%swift.opaque* noalias nocapture swiftself {{%.*}}, %swift.type* [[METADATA]], i8** [[WTABLE]])
x.concrete_method()
@@ -52,7 +54,7 @@
// CHECK: [[METADATA:%.*]] = load %swift.type*, %swift.type** [[METADATA_ADDR]], align 8
// CHECK: [[WTABLE_ADDR:%.*]] = getelementptr inbounds %T27sil_generic_witness_methods1PP, %T27sil_generic_witness_methods1PP* [[X:%.*]], i32 0, i32 2
// CHECK: [[WTABLE:%.*]] = load i8**, i8*** [[WTABLE_ADDR]], align 8
- // CHECK: [[GENERIC_METHOD_ADDR:%.*]] = getelementptr inbounds i8*, i8** [[WTABLE]], i32 2
+ // CHECK: [[GENERIC_METHOD_ADDR:%.*]] = getelementptr inbounds i8*, i8** [[WTABLE]], i32 3
// CHECK: [[GENERIC_METHOD_PTR:%.*]] = load i8*, i8** [[GENERIC_METHOD_ADDR]], align 8
// CHECK: [[GENERIC_METHOD:%.*]] = bitcast i8* [[GENERIC_METHOD_PTR]] to void (%swift.opaque*, %swift.type*, %swift.opaque*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[GENERIC_METHOD]](%swift.opaque* noalias nocapture {{.*}}, %swift.type* {{.*}} @"$S27sil_generic_witness_methods1SVMf", {{.*}} %swift.opaque* noalias nocapture swiftself {{%.*}}, %swift.type* [[METADATA]], i8** [[WTABLE]])
diff --git a/test/IRGen/sil_witness_tables.swift b/test/IRGen/sil_witness_tables.swift
index 89a12c4..c83060b 100644
--- a/test/IRGen/sil_witness_tables.swift
+++ b/test/IRGen/sil_witness_tables.swift
@@ -37,17 +37,17 @@
}
// CHECK: [[EXTERNAL_CONFORMER_EXTERNAL_P_WITNESS_TABLE:@"\$S39sil_witness_tables_external_conformance17ExternalConformerVAA0F1PAAWP"]] = external global i8*, align 8
-// CHECK: [[CONFORMER_Q_WITNESS_TABLE:@"\$S18sil_witness_tables9ConformerVAA1QAAWP"]] = hidden constant [2 x i8*] [
-// CHECK: i8* bitcast ([4 x i8*]* [[CONFORMER_P_WITNESS_TABLE:@"\$S18sil_witness_tables9ConformerVAA1PAAWP"]] to i8*),
+// CHECK: [[CONFORMER_Q_WITNESS_TABLE:@"\$S18sil_witness_tables9ConformerVAA1QAAWP"]] = hidden constant [3 x i8*] [
+// CHECK: i8* bitcast ([5 x i8*]* [[CONFORMER_P_WITNESS_TABLE:@"\$S18sil_witness_tables9ConformerVAA1PAAWP"]] to i8*),
// CHECK: i8* bitcast (void (%T18sil_witness_tables9ConformerV*, %swift.type*, i8**)* @"$S18sil_witness_tables9ConformerVAA1QA2aDP7qMethod{{[_0-9a-zA-Z]*}}FTW" to i8*)
// CHECK: ]
-// CHECK: [[CONFORMER_P_WITNESS_TABLE]] = hidden constant [4 x i8*] [
+// CHECK: [[CONFORMER_P_WITNESS_TABLE]] = hidden constant [5 x i8*] [
// CHECK: i8* bitcast (%swift.type* ()* @"$S18sil_witness_tables14AssocConformerVMa" to i8*),
// CHECK: i8* bitcast (i8** ()* @"$S18sil_witness_tables14AssocConformerVAA1AAAWa" to i8*)
// CHECK: i8* bitcast (void (%swift.type*, %swift.type*, i8**)* @"$S18sil_witness_tables9ConformerVAA1PA2aDP12staticMethod{{[_0-9a-zA-Z]*}}FZTW" to i8*),
// CHECK: i8* bitcast (void (%T18sil_witness_tables9ConformerV*, %swift.type*, i8**)* @"$S18sil_witness_tables9ConformerVAA1PA2aDP14instanceMethod{{[_0-9a-zA-Z]*}}FTW" to i8*)
// CHECK: ]
-// CHECK: [[CONFORMER2_P_WITNESS_TABLE:@"\$S18sil_witness_tables10Conformer2VAA1PAAWP"]] = hidden constant [4 x i8*]
+// CHECK: [[CONFORMER2_P_WITNESS_TABLE:@"\$S18sil_witness_tables10Conformer2VAA1PAAWP"]] = hidden constant [5 x i8*]
struct Conformer2: Q {
typealias Assoc = AssocConformer
@@ -59,15 +59,15 @@
// CHECK-LABEL: define hidden swiftcc void @"$S18sil_witness_tables7erasure1cAA2QQ_pAA9ConformerV_tF"(%T18sil_witness_tables2QQP* noalias nocapture sret)
// CHECK: [[WITNESS_TABLE_ADDR:%.*]] = getelementptr inbounds %T18sil_witness_tables2QQP, %T18sil_witness_tables2QQP* %0, i32 0, i32 2
-// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* [[CONFORMER_QQ_WITNESS_TABLE:@"\$S.*WP"]], i32 0, i32 0), i8*** [[WITNESS_TABLE_ADDR]], align 8
-func erasure(c c: Conformer) -> QQ {
+// CHECK-NEXT: store i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* [[CONFORMER_QQ_WITNESS_TABLE:@"\$S.*WP"]], i32 0, i32 0), i8*** [[WITNESS_TABLE_ADDR]], align 8
+func erasure(c: Conformer) -> QQ {
return c
}
// CHECK-LABEL: define hidden swiftcc void @"$S18sil_witness_tables15externalErasure1c0a1_b1_c1_D12_conformance9ExternalP_pAD0G9ConformerV_tF"(%T39sil_witness_tables_external_conformance9ExternalPP* noalias nocapture sret)
// CHECK: [[WITNESS_TABLE_ADDR:%.*]] = getelementptr inbounds %T39sil_witness_tables_external_conformance9ExternalPP, %T39sil_witness_tables_external_conformance9ExternalPP* %0, i32 0, i32 2
// CHECK-NEXT: store i8** [[EXTERNAL_CONFORMER_EXTERNAL_P_WITNESS_TABLE]], i8*** %2, align 8
-func externalErasure(c c: ExternalConformer) -> ExternalP {
+func externalErasure(c: ExternalConformer) -> ExternalP {
return c
}
@@ -77,4 +77,4 @@
// CHECK: ret %swift.type* bitcast (i64* getelementptr inbounds {{.*}} @"$S18sil_witness_tables14AssocConformerVMf", i32 0, i32 1) to %swift.type*)
// CHECK-LABEL: define hidden i8** @"$S18sil_witness_tables9ConformerVAA1PAAWa"()
-// CHECK: ret i8** getelementptr inbounds ([4 x i8*], [4 x i8*]* @"$S18sil_witness_tables9ConformerVAA1PAAWP", i32 0, i32 0)
+// CHECK: ret i8** getelementptr inbounds ([5 x i8*], [5 x i8*]* @"$S18sil_witness_tables9ConformerVAA1PAAWP", i32 0, i32 0)
diff --git a/test/IRGen/vtable.sil b/test/IRGen/vtable.sil
index 6728376..905d8a0 100644
--- a/test/IRGen/vtable.sil
+++ b/test/IRGen/vtable.sil
@@ -27,7 +27,7 @@
// CHECK-objc: %swift.opaque* @_objc_empty_cache,
// CHECK-objc: %swift.opaque* null,
// CHECK-objc: i64 add (i64 ptrtoint ({ i32, i32, i32, i32, i8*, i8*, i8*, i8*, i8*, i8*, i8* }* @_DATA__TtC6vtable1C to i64), i64 {{1|2}}),
-// CHECK-objc: i32 2, i32 0, i32 16, i16 7, i16 0,
+// CHECK-objc: i32 {{3|2}}, i32 0, i32 16, i16 7, i16 0,
// CHECK-objc: i32 112, i32 16,
// CHECK-objc: @"$S6vtable1CCMn"
// CHECK-objc: [[C]]* (%swift.type*)* @"$S6vtable1CCACycACmcfC",
@@ -42,7 +42,7 @@
// CHECK-native: %swift.opaque* null,
// CHECK-native: %swift.opaque* null,
// CHECK-native: i64 1,
-// CHECK-native: i32 2, i32 0, i32 16, i16 7, i16 0,
+// CHECK-native: i32 {{3|2}}, i32 0, i32 16, i16 7, i16 0,
// CHECK-native: i32 112, i32 16,
// CHECK-native: @"$S6vtable1CCMn"
// CHECK-native: [[C]]* (%swift.type*)* @"$S6vtable1CCACycACmcfC",
diff --git a/test/IRGen/witness_method.sil b/test/IRGen/witness_method.sil
index 5740978..5120876 100644
--- a/test/IRGen/witness_method.sil
+++ b/test/IRGen/witness_method.sil
@@ -13,8 +13,10 @@
sil @defcon : $@convention(thin) <T: DefCon> (@thick T.Type) -> @out T {
entry(%0: $*T, %1: $@thick T.Type):
- // CHECK-i386: [[WITNESS:%.*]] = load i8*, i8** %T.DefCon, align 4
- // CHECK-x86_64: [[WITNESS:%.*]] = load i8*, i8** %T.DefCon, align 8
+ // CHECK-x86_64: [[WITNESS_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.DefCon, i32 1
+ // CHECK-i386: [[WITNESS_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.DefCon, i32 1
+ // CHECK-i386: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_GEP]], align 4
+ // CHECK-x86_64: [[WITNESS:%.*]] = load i8*, i8** [[WITNESS_GEP]], align 8
// CHECK: [[METHOD:%.*]] = bitcast i8* [[WITNESS]] to void (%swift.opaque*, %swift.type*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[METHOD]]
%m = witness_method $T, #DefCon.init!allocator.1 : $@convention(witness_method: DefCon) <U: DefCon> (@thick U.Type) -> @out U
@@ -34,7 +36,7 @@
// CHECK-LABEL: define{{( protected)?}} swiftcc void @testInheritedConformance
sil @testInheritedConformance : $@convention(thin) (@in ImplementsDerived) -> () {
entry(%0: $*ImplementsDerived):
- // CHECK: [[WITNESS:%.*]] = load i8*, i8** @"$S14witness_method17ImplementsDerivedVAA4BaseAAWP"
+ // CHECK: [[WITNESS:%.*]] = load i8*, i8** getelementptr inbounds (i8*, i8** @"$S14witness_method17ImplementsDerivedVAA4BaseAAWP", i32 1)
// CHECK: [[METHOD:%.*]] = bitcast i8* [[WITNESS]] to void (%swift.opaque*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[METHOD]]
%m = witness_method $ImplementsDerived, #Base.foo!1 : $@convention(witness_method: Base) <U: Base> (@in_guaranteed U) -> ()
@@ -64,7 +66,7 @@
// CHECK: entry:
// CHECK: [[METADATA:%.*]] = call %swift.type* @"$S14witness_method6SyncUpVMa"(%swift.type* %T)
// CHECK: [[WTABLE:%.*]] = call i8** @"$S14witness_method6SyncUpVyxGAA7SynergyAAWa"(%swift.type* [[METADATA]], i8*** undef, {{i32|i64}} 0)
-// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** [[WTABLE]], i32 1
+// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** [[WTABLE]], i32 2
// CHECK: [[WITNESS_FN:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK: [[WITNESS:%.*]] = bitcast i8* [[WITNESS_FN]] to void (%swift.opaque*, %swift.opaque*, %swift.type*, i8**)*
// CHECK: [[ARG:%.*]] = bitcast %T14witness_method6SyncUpV* %1 to %swift.opaque*
@@ -88,7 +90,7 @@
// CHECK-LABEL: define{{( protected)?}} swiftcc void @testArchetypeWitnessMethod(%swift.opaque* noalias nocapture sret, %swift.opaque* noalias nocapture, %swift.type* %T, i8** %T.Strategy)
// CHECK: entry:
-// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.Strategy, i32 2
+// CHECK: [[WITNESS_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.Strategy, i32 3
// CHECK: [[WITNESS_FN:%.*]] = load i8*, i8** [[WITNESS_ADDR]]
// CHECK: [[WITNESS:%.*]] = bitcast i8* [[WITNESS_FN]] to void (%swift.opaque*, %swift.opaque*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[WITNESS]](%swift.opaque* noalias nocapture sret %0, %swift.opaque* noalias nocapture swiftself %1, %swift.type* %T, i8** %T.Strategy)
@@ -120,7 +122,7 @@
// CHECK-LABEL: define{{( protected)?}} swiftcc void @testClassArchetypeWitnessMethod(%T14witness_method7ToVideoV* noalias nocapture sret, %T14witness_method9TPSReportC** noalias nocapture dereferenceable({{4|8}}), %swift.type* %T, %swift.type* %CoverSheet)
// CHECK: entry:
-// CHECK: [[WITNESS_FN:%.*]] = load i8*, i8** getelementptr inbounds (i8*, i8** @"$S14witness_method9TPSReportCyxGAA8StrategyAAWP", i32 2)
+// CHECK: [[WITNESS_FN:%.*]] = load i8*, i8** getelementptr inbounds (i8*, i8** @"$S14witness_method9TPSReportCyxGAA8StrategyAAWP", i32 3)
// CHECK: [[WITNESS:%.*]] = bitcast i8* [[WITNESS_FN]] to void (%swift.opaque*, %swift.opaque*, %swift.type*, i8**)*
// CHECK: call swiftcc void [[WITNESS]](%swift.opaque* noalias nocapture sret %4, %swift.opaque* noalias nocapture swiftself %5, %swift.type* %T, i8** @"$S14witness_method9TPSReportCyxGAA8StrategyAAWP")
// CHECK: ret void
diff --git a/test/IRGen/witness_method_phi.sil b/test/IRGen/witness_method_phi.sil
index fefa311..6e6cdab 100644
--- a/test/IRGen/witness_method_phi.sil
+++ b/test/IRGen/witness_method_phi.sil
@@ -6,7 +6,8 @@
// CHECK-LABEL: define{{( protected)?}} swiftcc void @phi_witness_method(%swift.type* %T, i8** %T.P) #0 {
sil @phi_witness_method : $@convention(thin) <T: P> () -> () {
entry:
- // CHECK: [[LOAD:%.*]] = load i8*, i8** %T.P,
+ // CHECK: [[T0_GEP:%.*]] = getelementptr inbounds i8*, i8** %T.P, i32 1
+ // CHECK: [[LOAD:%.*]] = load i8*, i8** [[T0_GEP]],
// CHECK: [[T0:%.*]] = bitcast i8* [[LOAD]] to void (%swift.opaque*, %swift.type*, i8**)*
// CHECK: [[FUNC:%.*]] = bitcast void (%swift.opaque*, %swift.type*, i8**)* [[T0]] to i8*
%1 = witness_method $T, #P.foo!1 : $@convention(witness_method: P) <T: P> (@in T) -> ()
diff --git a/test/IRGen/witness_table_indirect_conformances.swift b/test/IRGen/witness_table_indirect_conformances.swift
index 27019ba..26669d3 100644
--- a/test/IRGen/witness_table_indirect_conformances.swift
+++ b/test/IRGen/witness_table_indirect_conformances.swift
@@ -30,7 +30,11 @@
func getAssocP2() -> Y { return Y() }
}
-// CHECK: @"$S35witness_table_indirect_conformances1WVAA2P3AAWP" = hidden constant [4 x i8*] [i8* bitcast (%swift.type* ()* @"$S35witness_table_indirect_conformances1ZVMa" to i8*), i8* bitcast (i8** ()* @"$S35witness_table_indirect_conformances1ZVAA2P2AAWa" to i8*), i8* bitcast (i8** ()* @"$S35witness_table_indirect_conformances1YVAA1QAAWa" to i8*), i8* bitcast (void (%T35witness_table_indirect_conformances1ZV*, %T35witness_table_indirect_conformances1WV*, %swift.type*, i8**)* @"$S35witness_table_indirect_conformances1WVAA2P3A2aDP08getAssocE00gE0QzyFTW" to i8*)]
+// CHECK: @"$S35witness_table_indirect_conformances1WVAA2P3AAWP" = hidden constant [5 x i8*] [
+// CHECK-SAME: @"$S35witness_table_indirect_conformances1WVAA2P3AAMc"
+// CHECK-SAME: i8* bitcast (%swift.type* ()* @"$S35witness_table_indirect_conformances1ZVMa" to i8*), i8* bitcast (i8** ()* @"$S35witness_table_indirect_conformances1ZVAA2P2AAWa" to i8*),
+// CHECK-SAME: i8* bitcast (i8** ()* @"$S35witness_table_indirect_conformances1YVAA1QAAWa" to i8*),
+// CHECK-SAME: i8* bitcast (void (%T35witness_table_indirect_conformances1ZV*, %T35witness_table_indirect_conformances1WV*, %swift.type*, i8**)* @"$S35witness_table_indirect_conformances1WVAA2P3A2aDP08getAssocE00gE0QzyFTW" to i8*)]
struct W: P3 {
typealias AssocP3 = Z
@@ -39,11 +43,11 @@
// CHECK-LABEL: define hidden i8** @"$S35witness_table_indirect_conformances1YVAA1QAAWa"()
// CHECK-NEXT: entry:
-// CHECK-NEXT: ret i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S35witness_table_indirect_conformances1YVAA1QAAWP", i32 0, i32 0)
+// CHECK-NEXT: ret i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S35witness_table_indirect_conformances1YVAA1QAAWP", i32 0, i32 0)
// CHECK-LABEL: define hidden i8** @"$S35witness_table_indirect_conformances1ZVAA2P2AAWa"()
// CHECK-NEXT: entry:
-// CHECK: ret i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @"$S35witness_table_indirect_conformances1ZVAA2P2AAWP", i32 0, i32 0)
+// CHECK: ret i8** getelementptr inbounds ([4 x i8*], [4 x i8*]* @"$S35witness_table_indirect_conformances1ZVAA2P2AAWP", i32 0, i32 0)
// CHECK-LABEL: define hidden %swift.type* @"$S35witness_table_indirect_conformances1ZVMa"()
// CHECK-NEXT: entry:
diff --git a/test/IRGen/witness_table_multifile.swift b/test/IRGen/witness_table_multifile.swift
index 3442b0d..e8535db 100644
--- a/test/IRGen/witness_table_multifile.swift
+++ b/test/IRGen/witness_table_multifile.swift
@@ -8,7 +8,7 @@
// CHECK: [[WITNESS_TABLE_ADDR:%[0-9]+]] = getelementptr inbounds [[P_WITNESS_TABLE]], [[P_WITNESS_TABLE]]* %0, i32 0, i32 2
// CHECK: [[WITNESS_TABLE:%[A-Za-z0-9_-]+]] = load i8**, i8*** [[WITNESS_TABLE_ADDR]]
// CHECK: [[BUFFER:%[0-9]+]] = call %swift.opaque* @__swift_project_boxed_opaque_existential_1
- // CHECK-NEXT: getelementptr inbounds i8*, i8** [[WITNESS_TABLE]], i32 3
+ // CHECK-NEXT: getelementptr inbounds i8*, i8** [[WITNESS_TABLE]], i32 4
go().foo()
}
diff --git a/test/IRGen/witness_table_objc_associated_type.swift b/test/IRGen/witness_table_objc_associated_type.swift
index 61b67ee..6a13148 100644
--- a/test/IRGen/witness_table_objc_associated_type.swift
+++ b/test/IRGen/witness_table_objc_associated_type.swift
@@ -19,7 +19,7 @@
typealias AA = SA
func foo() {}
}
-// CHECK-LABEL: @"$S34witness_table_objc_associated_type2SBVAA1BAAWP" = hidden constant [3 x i8*] [
+// CHECK-LABEL: @"$S34witness_table_objc_associated_type2SBVAA1BAAWP" = hidden constant [4 x i8*] [
// CHECK: i8* bitcast (%swift.type* ()* @"$S34witness_table_objc_associated_type2SAVMa" to i8*)
// CHECK: i8* bitcast (i8** ()* @"$S34witness_table_objc_associated_type2SAVAA1AAAWa" to i8*)
// CHECK: i8* bitcast {{.*}} @"$S34witness_table_objc_associated_type2SBVAA1BA2aDP3fooyyFTW"
@@ -30,14 +30,14 @@
typealias OO = CO
func foo() {}
}
-// CHECK-LABEL: @"$S34witness_table_objc_associated_type2SOVAA1CAAWP" = hidden constant [2 x i8*] [
+// CHECK-LABEL: @"$S34witness_table_objc_associated_type2SOVAA1CAAWP" = hidden constant [3 x i8*] [
// CHECK: i8* bitcast (%swift.type* ()* @"$S34witness_table_objc_associated_type2COCMa" to i8*)
// CHECK: i8* bitcast {{.*}} @"$S34witness_table_objc_associated_type2SOVAA1CA2aDP3fooyyFTW"
// CHECK: ]
// CHECK-LABEL: define hidden swiftcc void @"$S34witness_table_objc_associated_type0A25OffsetAfterAssociatedTypeyyxAA1BRzlF"(%swift.opaque* noalias nocapture, %swift.type* %T, i8** %T.B)
func witnessOffsetAfterAssociatedType<T: B>(_ x: T) {
- // CHECK: [[FOO_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.B, i32 2
+ // CHECK: [[FOO_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.B, i32 3
// CHECK: [[FOO_OPAQUE:%.*]] = load {{.*}} [[FOO_ADDR]]
// CHECK: [[FOO:%.*]] = bitcast {{.*}} [[FOO_OPAQUE]]
// CHECK: call swiftcc void [[FOO]]
@@ -46,7 +46,7 @@
// CHECK-LABEL: define hidden swiftcc void @"$S34witness_table_objc_associated_type0A29OffsetAfterAssociatedTypeObjCyyxAA1CRzlF"(%swift.opaque* noalias nocapture, %swift.type* %T, i8** %T.C) {{.*}} {
func witnessOffsetAfterAssociatedTypeObjC<T: C>(_ x: T) {
- // CHECK: [[FOO_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.C, i32 1
+ // CHECK: [[FOO_ADDR:%.*]] = getelementptr inbounds i8*, i8** %T.C, i32 2
// CHECK: [[FOO_OPAQUE:%.*]] = load {{.*}} [[FOO_ADDR]]
// CHECK: [[FOO:%.*]] = bitcast {{.*}} [[FOO_OPAQUE]]
// CHECK: call swiftcc void [[FOO]]
diff --git a/test/Inputs/clang-importer-sdk/usr/include/macros.h b/test/Inputs/clang-importer-sdk/usr/include/macros.h
index 234e592..8f360bc 100644
--- a/test/Inputs/clang-importer-sdk/usr/include/macros.h
+++ b/test/Inputs/clang-importer-sdk/usr/include/macros.h
@@ -144,3 +144,12 @@
#define RECURSION_WITH_EXPR3 RECURSION_WITH_EXPR3_HELPER + 1
#define RECURSION_WITH_EXPR3_HELPER RECURSION_WITH_EXPR3 + 1
+
+
+// Casts with problematic types
+#define UNAVAILABLE_ONE ((unavailable_t)1)
+typedef unsigned unavailable_t __attribute__((unavailable));
+#define DEPRECATED_ONE ((deprecated_t)1)
+typedef unsigned deprecated_t __attribute__((deprecated));
+#define OKAY_TYPED_ONE ((okay_t)1)
+typedef unsigned okay_t;
diff --git a/test/Inputs/conditional_conformance_basic_conformances.swift b/test/Inputs/conditional_conformance_basic_conformances.swift
index 3e1ad077..10fb901 100644
--- a/test/Inputs/conditional_conformance_basic_conformances.swift
+++ b/test/Inputs/conditional_conformance_basic_conformances.swift
@@ -100,7 +100,7 @@
// CHECK-NEXT: [[CONDITIONAL_REQUIREMENTS:%.*]] = getelementptr inbounds [1 x i8**], [1 x i8**]* %conditional.requirement.buffer, i32 0, i32 0
// CHECK-NEXT: [[A_P2_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 0
-// CHECK-NEXT: store i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S42conditional_conformance_basic_conformances4IsP2VAA0F0AAWP", i32 0, i32 0), i8*** [[A_P2_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S42conditional_conformance_basic_conformances4IsP2VAA0F0AAWP", i32 0, i32 0), i8*** [[A_P2_PTR]], align 8
// CHECK-NEXT: [[Single_P1:%.*]] = call i8** @"$S42conditional_conformance_basic_conformances6SingleVyxGAA2P1A2A2P2RzlWa"(%swift.type* [[Single_TYPE]], i8*** [[CONDITIONAL_REQUIREMENTS]], i64 1)
// CHECK-NEXT: store atomic i8** [[Single_P1]], i8*** @"$S42conditional_conformance_basic_conformances6SingleVyAA4IsP2VGACyxGAA2P1A2A0G0RzlWL" release, align 8
@@ -214,7 +214,7 @@
// CHECK-NEXT: [[B_P2_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 0
// CHECK-NEXT: store i8** %X.P2, i8*** [[B_P2_PTR]], align 8
// CHECK-NEXT: [[C_P3_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 1
-// CHECK-NEXT: store i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S42conditional_conformance_basic_conformances4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[C_P3_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S42conditional_conformance_basic_conformances4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[C_P3_PTR]], align 8
// CHECK-NEXT: [[Double_P1:%.*]] = call i8** @"$S42conditional_conformance_basic_conformances6DoubleVyxq_GAA2P1A2A2P2RzAA2P3R_rlWa"(%swift.type* %1, i8*** [[CONDITIONAL_REQUIREMENTS]], i64 2)
// CHECK-NEXT: call swiftcc void @"$S42conditional_conformance_basic_conformances8takes_p1yyxmAA2P1RzlF"(%swift.type* [[Double_TYPE]], %swift.type* [[Double_TYPE]], i8** [[Double_P1]])
@@ -246,9 +246,9 @@
// CHECK-NEXT: [[CONDITIONAL_REQUIREMENTS:%.*]] = getelementptr inbounds [2 x i8**], [2 x i8**]* %conditional.requirement.buffer, i32 0, i32 0
// CHECK-NEXT: [[B_P2_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 0
-// CHECK-NEXT: store i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S42conditional_conformance_basic_conformances4IsP2VAA0F0AAWP", i32 0, i32 0), i8*** [[B_P2_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S42conditional_conformance_basic_conformances4IsP2VAA0F0AAWP", i32 0, i32 0), i8*** [[B_P2_PTR]], align 8
// CHECK-NEXT: [[C_P3_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 1
-// CHECK-NEXT: store i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S42conditional_conformance_basic_conformances4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[C_P3_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S42conditional_conformance_basic_conformances4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[C_P3_PTR]], align 8
// CHECK-NEXT: [[Double_P1:%.*]] = call i8** @"$S42conditional_conformance_basic_conformances6DoubleVyxq_GAA2P1A2A2P2RzAA2P3R_rlWa"(%swift.type* [[Double_TYPE]], i8*** [[CONDITIONAL_REQUIREMENTS]], i64 2)
// CHECK-NEXT: store atomic i8** [[Double_P1]], i8*** @"$S42conditional_conformance_basic_conformances6DoubleVyAA4IsP2VAA0F2P3VGACyxq_GAA2P1A2A0G0RzAA0H0R_rlWL" release, align 8
diff --git a/test/Inputs/conditional_conformance_recursive.swift b/test/Inputs/conditional_conformance_recursive.swift
index 6d5cd45..147ea89 100644
--- a/test/Inputs/conditional_conformance_recursive.swift
+++ b/test/Inputs/conditional_conformance_recursive.swift
@@ -28,12 +28,14 @@
// CHECK: [[T_TO_P2_PTR:%.*]] = getelementptr inbounds i8*, i8** [[WRAPPER_T_TO_P2:%.*]], i32 -1
// CHECK: [[T_TO_P2_VAL:%.*]] = load i8*, i8** [[T_TO_P2_PTR]]
// CHECK: [[T_TO_P2:%.*]] = bitcast i8* [[T_TO_P2_VAL]] to i8**
-// CHECK: [[T_TO_P1_VAL:%.*]] = load i8*, i8** [[T_TO_P2]]
+// CHECK: [[T_TO_P1_VAL_GEP:%.*]] = getelementptr inbounds i8*, i8** [[T_TO_P2]], i32 1
+// CHECK: [[T_TO_P1_VAL:%.*]] = load i8*, i8** [[T_TO_P1_VAL_GEP]]
// CHECK: [[T_TO_P1:%.*]] = bitcast i8* [[T_TO_P1_VAL]] to i8**
// CHECK: [[WRAPPER_T_TYPE:%.*]] = bitcast %swift.type* [[WRAPPER_T:%.*]] to %swift.type**
// CHECK: [[T_TYPE_PTR:%.*]] = getelementptr inbounds %swift.type*, %swift.type** [[WRAPPER_T_TYPE]], i64 2
// CHECK: [[T_TYPE:%.*]] = load %swift.type*, %swift.type** [[T_TYPE_PTR]]
-// CHECK: [[T_B_TYPE_ACCESSOR_PTR:%.*]] = load i8*, i8** [[T_TO_P1]], align 8
+// CHECK: [[T_B_TYPE_ACCESSOR_PTR_GEP:%.*]] = getelementptr inbounds i8*, i8** [[T_TO_P1]], i32 1
+// CHECK: [[T_B_TYPE_ACCESSOR_PTR:%.*]] = load i8*, i8** [[T_B_TYPE_ACCESSOR_PTR_GEP]], align 8
// CHECK: [[T_B_TYPE_ACCESSOR:%.*]] = bitcast i8* [[T_B_TYPE_ACCESSOR_PTR]] to %swift.type* (%swift.type*, i8**)*
// CHECK: [[T_A_TYPE:%.*]] = call %swift.type* [[T_B_TYPE_ACCESSOR]](%swift.type* [[T_TYPE]], i8** [[T_TO_P1]])
// CHECK: ret %swift.type* [[T_A_TYPE]]
diff --git a/test/Inputs/conditional_conformance_subclass.swift b/test/Inputs/conditional_conformance_subclass.swift
index 55c464c..878f55e 100644
--- a/test/Inputs/conditional_conformance_subclass.swift
+++ b/test/Inputs/conditional_conformance_subclass.swift
@@ -98,7 +98,7 @@
// CHECK-NEXT: [[SubclassGeneric_TYPE:%.*]] = call %swift.type* @"$S32conditional_conformance_subclass15SubclassGenericCyAA4IsP2VGMa"()
// CHECK-NEXT: [[CONDITIONAL_REQUIREMENTS:%.*]] = getelementptr inbounds [1 x i8**], [1 x i8**]* %conditional.requirement.buffer, i32 0, i32 0
// CHECK-NEXT: [[A_P2_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 0
-// CHECK-NEXT: store i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S32conditional_conformance_subclass4IsP2VAA0E0AAWP", i32 0, i32 0), i8*** [[A_P2_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S32conditional_conformance_subclass4IsP2VAA0E0AAWP", i32 0, i32 0), i8*** [[A_P2_PTR]], align 8
// CHECK-NEXT: [[Base_P1:%.*]] = call i8** @"$S32conditional_conformance_subclass4BaseCyxGAA2P1A2A2P2RzlWa"(%swift.type* [[SubclassGeneric_TYPE]], i8*** [[CONDITIONAL_REQUIREMENTS]], i64 1)
// CHECK-NEXT: store atomic i8** [[Base_P1]], i8*** @"$S32conditional_conformance_subclass15SubclassGenericCyAA4IsP2VGAA4BaseCyxGAA2P1A2A0G0RzlWL" release, align 8
// CHECK-NEXT: br label %cont
@@ -131,7 +131,7 @@
// CHECK-NEXT: [[SubclassConcrete_TYPE:%.*]] = call %swift.type* @"$S32conditional_conformance_subclass16SubclassConcreteCMa"()
// CHECK-NEXT: [[CONDITIONAL_REQUIREMENTS:%.*]] = getelementptr inbounds [1 x i8**], [1 x i8**]* %conditional.requirement.buffer, i32 0, i32 0
// CHECK-NEXT: [[A_P2_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 0
-// CHECK-NEXT: store i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S32conditional_conformance_subclass4IsP2VAA0E0AAWP", i32 0, i32 0), i8*** [[A_P2_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S32conditional_conformance_subclass4IsP2VAA0E0AAWP", i32 0, i32 0), i8*** [[A_P2_PTR]], align 8
// CHECK-NEXT: [[Base_P1:%.*]] = call i8** @"$S32conditional_conformance_subclass4BaseCyxGAA2P1A2A2P2RzlWa"(%swift.type* [[SubclassGeneric_TYPE]], i8*** [[CONDITIONAL_REQUIREMENTS]], i64 1)
// CHECK-NEXT: store atomic i8** [[Base_P1]], i8*** @"$S32conditional_conformance_subclass16SubclassConcreteCAA4BaseCyxGAA2P1A2A2P2RzlWL" release, align 8
// CHECK-NEXT: br label %cont
@@ -164,7 +164,7 @@
// CHECK-NEXT: [[SubclassGenericConcrete_TYPE:%.*]] = call %swift.type* @"$S32conditional_conformance_subclass23SubclassGenericConcreteCMa"()
// CHECK-NEXT: [[CONDITIONAL_REQUIREMENTS:%.*]] = getelementptr inbounds [1 x i8**], [1 x i8**]* %conditional.requirement.buffer, i32 0, i32 0
// CHECK-NEXT: [[A_P2_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 0
-// CHECK-NEXT: store i8** getelementptr inbounds ([0 x i8*], [0 x i8*]* @"$S32conditional_conformance_subclass4IsP2VAA0E0AAWP", i32 0, i32 0), i8*** [[A_P2_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S32conditional_conformance_subclass4IsP2VAA0E0AAWP", i32 0, i32 0), i8*** [[A_P2_PTR]], align 8
// CHECK-NEXT: [[Base_P1:%.*]] = call i8** @"$S32conditional_conformance_subclass4BaseCyxGAA2P1A2A2P2RzlWa"(%swift.type* [[SubclassGeneric_TYPE]], i8*** [[CONDITIONAL_REQUIREMENTS]], i64 1)
// CHECK-NEXT: store atomic i8** [[Base_P1]], i8*** @"$S32conditional_conformance_subclass23SubclassGenericConcreteCAA4BaseCyxGAA2P1A2A2P2RzlWL" release, align 8
// CHECK-NEXT: br label %cont
diff --git a/test/Inputs/conditional_conformance_with_assoc.swift b/test/Inputs/conditional_conformance_with_assoc.swift
index 7df9a02..3d35d1b 100644
--- a/test/Inputs/conditional_conformance_with_assoc.swift
+++ b/test/Inputs/conditional_conformance_with_assoc.swift
@@ -139,7 +139,7 @@
// CHECK-NEXT: [[CONDITIONAL_REQUIREMENTS:%.*]] = getelementptr inbounds [3 x i8**], [3 x i8**]* %conditional.requirement.buffer, i32 0, i32 0
// CHECK-NEXT: [[C_P3_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 0
-// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S34conditional_conformance_with_assoc4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[C_P3_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @"$S34conditional_conformance_with_assoc4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[C_P3_PTR]], align 8
// CHECK-NEXT: [[B_AT2_P2_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 1
// CHECK-NEXT: store i8** %T.AT2.P2, i8*** [[B_AT2_P2_PTR]], align 8
// CHECK-NEXT: [[B_AT2_AT2_AT3_P3_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 2
@@ -160,14 +160,14 @@
// CHECK-LABEL: define{{( protected)?}} swiftcc void @"$S34conditional_conformance_with_assoc16concrete_genericyyxmAA2P3RzlF"(%swift.type*, %swift.type* %U, i8** %U.P3) #0 {
// CHECK-NEXT: entry:
// CHECK-NEXT: %conditional.requirement.buffer = alloca [3 x i8**], align 8
-// CHECK-NEXT: [[Double_TYPE:%.*]] = call %swift.type* @"$S34conditional_conformance_with_assoc6DoubleVMa"(%swift.type* bitcast (i64* getelementptr inbounds (<{ i8**, i64, <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i32 }>* }>, <{ i8**, i64, <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i32 }>* }>* @"$S34conditional_conformance_with_assoc8IsAlsoP2VMf", i32 0, i32 1) to %swift.type*), %swift.type* %U, i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @"$S34conditional_conformance_with_assoc8IsAlsoP2VAA0G0AAWP", i32 0, i32 0))
+// CHECK-NEXT: [[Double_TYPE:%.*]] = call %swift.type* @"$S34conditional_conformance_with_assoc6DoubleVMa"(%swift.type* bitcast (i64* getelementptr inbounds (<{ i8**, i64, <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i32 }>* }>, <{ i8**, i64, <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i16, i16, i32 }>* }>* @"$S34conditional_conformance_with_assoc8IsAlsoP2VMf", i32 0, i32 1) to %swift.type*), %swift.type* %U, i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @"$S34conditional_conformance_with_assoc8IsAlsoP2VAA0G0AAWP", i32 0, i32 0))
// CHECK-NEXT: [[CONDITIONAL_REQUIREMENTS:%.*]] = getelementptr inbounds [3 x i8**], [3 x i8**]* %conditional.requirement.buffer, i32 0, i32 0
// CHECK-NEXT: [[C_P3_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 0
// CHECK-NEXT: store i8** %U.P3, i8*** [[C_P3_PTR]], align 8
// CHECK-NEXT: [[B_AT2_P2_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 1
-// CHECK-NEXT: store i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @"$S34conditional_conformance_with_assoc6IsBothVAA2P2AAWP", i32 0, i32 0), i8*** [[B_AT2_P2_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @"$S34conditional_conformance_with_assoc6IsBothVAA2P2AAWP", i32 0, i32 0), i8*** [[B_AT2_P2_PTR]], align 8
// CHECK-NEXT: [[B_AT2_AT2_AT3_P3_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 2
-// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S34conditional_conformance_with_assoc4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[B_AT2_AT2_AT3_P3_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @"$S34conditional_conformance_with_assoc4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[B_AT2_AT2_AT3_P3_PTR]], align 8
// CHECK-NEXT: [[Double_P1:%.*]] = call i8** @"$S34conditional_conformance_with_assoc6DoubleVyxq_GAA2P1A2A2P3R_AA2P23AT2RpzAafH_AhaGP3AT3RPzrlWa"(%swift.type* [[Double_TYPE]], i8*** [[CONDITIONAL_REQUIREMENTS]], i64 3)
// CHECK-NEXT: call swiftcc void @"$S34conditional_conformance_with_assoc8takes_p1yyxmAA2P1RzlF"(%swift.type* [[Double_TYPE]], %swift.type* [[Double_TYPE]], i8** [[Double_P1]])
// CHECK-NEXT: ret void
@@ -199,11 +199,11 @@
// CHECK-NEXT: [[Double_TYPE:%.*]] = call %swift.type* @"$S34conditional_conformance_with_assoc6DoubleVyAA8IsAlsoP2VAA0F2P3VGMa"() #10
// CHECK-NEXT: [[CONDITIONAL_REQUIREMENTS:%.*]] = getelementptr inbounds [3 x i8**], [3 x i8**]* %conditional.requirement.buffer, i32 0, i32 0
// CHECK-NEXT: [[C_P3_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 0
-// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S34conditional_conformance_with_assoc4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[C_P3_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @"$S34conditional_conformance_with_assoc4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[C_P3_PTR]], align 8
// CHECK-NEXT: [[B_AT2_P2_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 1
-// CHECK-NEXT: store i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @"$S34conditional_conformance_with_assoc6IsBothVAA2P2AAWP", i32 0, i32 0), i8*** [[B_AT2_P2_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @"$S34conditional_conformance_with_assoc6IsBothVAA2P2AAWP", i32 0, i32 0), i8*** [[B_AT2_P2_PTR]], align 8
// CHECK-NEXT: [[B_AT2_AT2_AT3_P3_PTR:%.*]] = getelementptr inbounds i8**, i8*** [[CONDITIONAL_REQUIREMENTS]], i32 2
-// CHECK-NEXT: store i8** getelementptr inbounds ([1 x i8*], [1 x i8*]* @"$S34conditional_conformance_with_assoc4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[B_AT2_AT2_AT3_P3_PTR]], align 8
+// CHECK-NEXT: store i8** getelementptr inbounds ([2 x i8*], [2 x i8*]* @"$S34conditional_conformance_with_assoc4IsP3VAA0F0AAWP", i32 0, i32 0), i8*** [[B_AT2_AT2_AT3_P3_PTR]], align 8
// CHECK-NEXT: [[Double_P1:%.*]] = call i8** @"$S34conditional_conformance_with_assoc6DoubleVyxq_GAA2P1A2A2P3R_AA2P23AT2RpzAafH_AhaGP3AT3RPzrlWa"(%swift.type* [[Double_TYPE]], i8*** [[CONDITIONAL_REQUIREMENTS]], i64 3)
// CHECK-NEXT: store atomic i8** [[Double_P1]], i8*** @"$S34conditional_conformance_with_assoc6DoubleVyAA8IsAlsoP2VAA0F2P3VGACyxq_GAA2P1A2A0I0R_AA0H03AT2RpzAakM_AmaLP3AT3RPzrlWL" release, align 8
// CHECK-NEXT: br label %cont
diff --git a/test/SILOptimizer/dead_alloc_elim.sil b/test/SILOptimizer/dead_alloc_elim.sil
index bb2667b..1c1700a 100644
--- a/test/SILOptimizer/dead_alloc_elim.sil
+++ b/test/SILOptimizer/dead_alloc_elim.sil
@@ -14,6 +14,10 @@
deinit { }
}
+class NontrivialDestructor {
+ init()
+}
+
sil @$S4main17TrivialDestructorCfD : $@convention(method) (@owned TrivialDestructor) -> () {
bb0(%0 : $TrivialDestructor):
// Alloc/Dealloc stack should not disrupt elimination of the
@@ -60,6 +64,21 @@
return %1 : $()
}
+// If the destructor is not called, we don't care about it.
+//
+// CHECK-LABEL: sil @devirtualized_destructor : $@convention(thin) () -> () {
+// CHECK: bb0
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
+sil @devirtualized_destructor : $@convention(thin) () -> () {
+ %0 = alloc_ref $NontrivialDestructor
+ set_deallocating %0 : $NontrivialDestructor
+ fix_lifetime %0 : $NontrivialDestructor
+ dealloc_ref %0 : $NontrivialDestructor
+ %1 = tuple()
+ return %1 : $()
+}
+
// We load/use a pointer from the alloc_ref, do nothing.
//
// CHECK-LABEL: sil @trivial_destructor_load : $@convention(thin) () -> () {
diff --git a/test/SILOptimizer/devirt_covariant_return.swift b/test/SILOptimizer/devirt_covariant_return.swift
index f3526c0..a1f838d 100644
--- a/test/SILOptimizer/devirt_covariant_return.swift
+++ b/test/SILOptimizer/devirt_covariant_return.swift
@@ -1,4 +1,4 @@
-// RUN: %target-swift-frontend -Xllvm -sil-full-demangle -O -Xllvm -disable-sil-cm-rr-cm=0 -Xllvm -sil-inline-generics=false -primary-file %s -emit-sil -sil-inline-threshold 1000 -Xllvm -sil-disable-pass=GlobalOpt -sil-verify-all | %FileCheck %s
+// RUN: %target-swift-frontend -Xllvm -sil-full-demangle -O -Xllvm -disable-sil-cm-rr-cm=0 -Xllvm -sil-inline-generics=false -primary-file %s -emit-sil -sil-inline-threshold 1000 -Xllvm -sil-disable-pass=ObjectOutliner -sil-verify-all | %FileCheck %s
// Make sure that we can dig all the way through the class hierarchy and
// protocol conformances with covariant return types correctly. The verifier
@@ -11,7 +11,6 @@
// CHECK: bb0
// CHECK: alloc_ref
// CHECK: alloc_ref
-// CHECK: alloc_ref
// CHECK: function_ref @unknown1a : $@convention(thin) () -> ()
// CHECK: apply
// CHECK: function_ref @defenestrate : $@convention(thin) () -> ()
diff --git a/test/SILOptimizer/devirt_specialized_conformance.swift b/test/SILOptimizer/devirt_specialized_conformance.swift
index b24a5e3..62cb05d 100644
--- a/test/SILOptimizer/devirt_specialized_conformance.swift
+++ b/test/SILOptimizer/devirt_specialized_conformance.swift
@@ -1,11 +1,10 @@
-// RUN: %target-swift-frontend -O -Xllvm -sil-inline-generics=false -Xllvm -sil-disable-pass=GlobalOpt %s -emit-sil -sil-verify-all | %FileCheck %s
+// RUN: %target-swift-frontend -O -Xllvm -sil-inline-generics=false -Xllvm -sil-disable-pass=ObjectOutliner %s -emit-sil -sil-verify-all | %FileCheck %s
// Make sure that we completely inline/devirtualize/substitute all the way down
// to unknown1.
// CHECK-LABEL: sil @main
// CHECK: bb0({{.*}}):
-// CHECK: alloc_ref
// CHECK: function_ref @unknown1
// CHECK: apply
// CHECK: apply
diff --git a/test/SILOptimizer/globalopt-iter.sil b/test/SILOptimizer/globalopt-iter.sil
index 1be24b1..d048cd5 100644
--- a/test/SILOptimizer/globalopt-iter.sil
+++ b/test/SILOptimizer/globalopt-iter.sil
@@ -1,4 +1,4 @@
-// RUN: %target-sil-opt -assume-parsing-unqualified-ownership-sil -enable-sil-verify-all %s -global-opt | %FileCheck %s
+// RUN: %target-sil-opt -assume-parsing-unqualified-ownership-sil -enable-sil-verify-all %s -object-outliner | %FileCheck %s
import Builtin
diff --git a/test/SILOptimizer/globalopt.sil b/test/SILOptimizer/globalopt.sil
index a6c22a3..594c265 100644
--- a/test/SILOptimizer/globalopt.sil
+++ b/test/SILOptimizer/globalopt.sil
@@ -9,205 +9,6 @@
import Builtin
import Swift
-class Obj {
- @sil_stored var value: Int64
- init()
-}
-
-// CHECK-LABEL: sil_global private @outline_global_simpleTv_ : $Obj = {
-// CHECK-NEXT: %0 = integer_literal $Builtin.Int64, 1
-// CHECK-NEXT: %1 = struct $Int64 (%0 : $Builtin.Int64)
-// CHECK-NEXT: %initval = object $Obj (%1 : $Int64)
-// CHECK-NEXT: }
-
-// CHECK-LABEL: sil_global private @outline_global_tailelemsTv_ : $Obj = {
-// CHECK-NEXT: %0 = integer_literal $Builtin.Int64, 3
-// CHECK-NEXT: %1 = struct $Int64 (%0 : $Builtin.Int64)
-// CHECK-NEXT: %2 = integer_literal $Builtin.Int64, 2
-// CHECK-NEXT: %3 = struct $Int64 (%2 : $Builtin.Int64)
-// CHECK-NEXT: %4 = integer_literal $Builtin.Int64, 1
-// CHECK-NEXT: %5 = struct $Int64 (%4 : $Builtin.Int64)
-// CHECK-NEXT: %initval = object $Obj (%5 : $Int64, [tail_elems] %3 : $Int64, %1 : $Int64)
-// CHECK-NEXT: }
-
-// CHECK-LABEL: sil @outline_global_simple
-// CHECK: [[G:%[0-9]+]] = global_value @outline_global_simpleTv_ : $Obj
-// CHECK: strong_retain [[G]] : $Obj
-// CHECK-NOT: store
-// CHECK: strong_release [[G]] : $Obj
-// CHECK: return
-sil @outline_global_simple : $@convention(thin) () -> () {
-bb0:
- %1 = integer_literal $Builtin.Int64, 1
- %4 = struct $Int64 (%1 : $Builtin.Int64)
- %7 = alloc_ref $Obj
- %9 = ref_element_addr %7 : $Obj, #Obj.value
- store %4 to %9 : $*Int64
- strong_release %7 : $Obj
- %r = tuple ()
- return %r : $()
-}
-
-
-// CHECK-LABEL: sil @outline_global_tailelems
-// CHECK: [[G:%[0-9]+]] = global_value @outline_global_tailelemsTv_ : $Obj
-// CHECK: strong_retain [[G]] : $Obj
-// CHECK-NOT: store
-// CHECK: strong_release [[G]] : $Obj
-// CHECK: return
-sil @outline_global_tailelems : $@convention(thin) () -> () {
-bb0:
- %0 = integer_literal $Builtin.Word, 2
- %1 = integer_literal $Builtin.Int64, 1
- %2 = integer_literal $Builtin.Int64, 2
- %3 = integer_literal $Builtin.Int64, 3
- %4 = struct $Int64 (%1 : $Builtin.Int64)
- %5 = struct $Int64 (%2 : $Builtin.Int64)
- %6 = struct $Int64 (%3 : $Builtin.Int64)
- %7 = alloc_ref [tail_elems $Int64 * %0 : $Builtin.Word] $Obj
- %9 = ref_element_addr %7 : $Obj, #Obj.value
- store %4 to %9 : $*Int64
- %15 = ref_tail_addr %7 : $Obj, $Int64
- store %5 to %15 : $*Int64
- %19 = integer_literal $Builtin.Word, 1
- %20 = index_addr %15 : $*Int64, %19 : $Builtin.Word
- store %6 to %20 : $*Int64
- strong_release %7 : $Obj
- %r = tuple ()
- return %r : $()
-}
-
-// CHECK-LABEL: sil @dont_outline_global_double_store
-// CHECK: alloc_ref
-// CHECK: store
-// CHECK: return
-sil @dont_outline_global_double_store : $@convention(thin) () -> () {
-bb0:
- %1 = integer_literal $Builtin.Int64, 1
- %4 = struct $Int64 (%1 : $Builtin.Int64)
- %7 = alloc_ref $Obj
- %9 = ref_element_addr %7 : $Obj, #Obj.value
- store %4 to %9 : $*Int64
- store %4 to %9 : $*Int64
- strong_release %7 : $Obj
- %r = tuple ()
- return %r : $()
-}
-
-// CHECK-LABEL: sil @dont_outline_global_missing_store
-// CHECK: alloc_ref
-// CHECK: return
-sil @dont_outline_global_missing_store : $@convention(thin) () -> () {
-bb0:
- %1 = integer_literal $Builtin.Int64, 1
- %4 = struct $Int64 (%1 : $Builtin.Int64)
- %7 = alloc_ref $Obj
- %9 = ref_element_addr %7 : $Obj, #Obj.value
- strong_release %7 : $Obj
- %r = tuple ()
- return %r : $()
-}
-
-// CHECK-LABEL: sil @dont_outline_objc_allocation
-// CHECK: alloc_ref
-// CHECK: return
-sil @dont_outline_objc_allocation : $@convention(thin) () -> () {
-bb0:
- %1 = integer_literal $Builtin.Int64, 1
- %4 = struct $Int64 (%1 : $Builtin.Int64)
- // A hack, because Obj is not really an ObjC class. But for the test it should be ok.
- %7 = alloc_ref [objc] $Obj
- %9 = ref_element_addr %7 : $Obj, #Obj.value
- store %4 to %9 : $*Int64
- strong_release %7 : $Obj
- %r = tuple ()
- return %r : $()
-}
-
-sil @take_pointer : $@convention(thin) (Builtin.RawPointer) -> ()
-
-// CHECK-LABEL: sil @dont_outline_global_unknown_addr_use
-// CHECK: alloc_ref
-// CHECK: return
-sil @dont_outline_global_unknown_addr_use : $@convention(thin) () -> () {
-bb0:
- %1 = integer_literal $Builtin.Int64, 1
- %4 = struct $Int64 (%1 : $Builtin.Int64)
- %7 = alloc_ref $Obj
- %9 = ref_element_addr %7 : $Obj, #Obj.value
- store %4 to %9 : $*Int64
- %10 = address_to_pointer %9 : $*Int64 to $Builtin.RawPointer
- %f = function_ref @take_pointer : $@convention(thin) (Builtin.RawPointer) -> ()
- %a = apply %f(%10) : $@convention(thin) (Builtin.RawPointer) -> ()
- strong_release %7 : $Obj
- %r = tuple ()
- return %r : $()
-}
-
-// CHECK-LABEL: sil @dont_outline_global_escaping_obj
-// CHECK: alloc_ref
-// CHECK: return
-sil @dont_outline_global_escaping_obj : $@convention(thin) (@inout Obj) -> () {
-bb0(%0: $*Obj):
- %1 = integer_literal $Builtin.Int64, 1
- %4 = struct $Int64 (%1 : $Builtin.Int64)
- %7 = alloc_ref $Obj
- %9 = ref_element_addr %7 : $Obj, #Obj.value
- store %4 to %9 : $*Int64
- store %7 to %0 : $*Obj
- %r = tuple ()
- return %r : $()
-}
-
-// CHECK-LABEL: sil @dont_outline_global_missing_tailelem_store
-// CHECK: alloc_ref
-// CHECK: return
-sil @dont_outline_global_missing_tailelem_store : $@convention(thin) () -> () {
-bb0:
- %0 = integer_literal $Builtin.Word, 2
- %1 = integer_literal $Builtin.Int64, 1
- %2 = integer_literal $Builtin.Int64, 2
- %3 = integer_literal $Builtin.Int64, 3
- %4 = struct $Int64 (%1 : $Builtin.Int64)
- %5 = struct $Int64 (%2 : $Builtin.Int64)
- %6 = struct $Int64 (%3 : $Builtin.Int64)
- %7 = alloc_ref [tail_elems $Int64 * %0 : $Builtin.Word] $Obj
- %9 = ref_element_addr %7 : $Obj, #Obj.value
- store %4 to %9 : $*Int64
- %15 = ref_tail_addr %7 : $Obj, $Int64
- store %5 to %15 : $*Int64
- strong_release %7 : $Obj
- %r = tuple ()
- return %r : $()
-}
-
-// CHECK-LABEL: sil @dont_outline_global_double_tailelem_store
-// CHECK: alloc_ref
-// CHECK: return
-sil @dont_outline_global_double_tailelem_store : $@convention(thin) () -> () {
-bb0:
- %0 = integer_literal $Builtin.Word, 2
- %1 = integer_literal $Builtin.Int64, 1
- %2 = integer_literal $Builtin.Int64, 2
- %3 = integer_literal $Builtin.Int64, 3
- %4 = struct $Int64 (%1 : $Builtin.Int64)
- %5 = struct $Int64 (%2 : $Builtin.Int64)
- %6 = struct $Int64 (%3 : $Builtin.Int64)
- %7 = alloc_ref [tail_elems $Int64 * %0 : $Builtin.Word] $Obj
- %9 = ref_element_addr %7 : $Obj, #Obj.value
- store %4 to %9 : $*Int64
- %15 = ref_tail_addr %7 : $Obj, $Int64
- store %5 to %15 : $*Int64
- store %5 to %15 : $*Int64
- %19 = integer_literal $Builtin.Word, 1
- %20 = index_addr %15 : $*Int64, %19 : $Builtin.Word
- store %6 to %20 : $*Int64
- strong_release %7 : $Obj
- %r = tuple ()
- return %r : $()
-}
-
-
// globalinit_token0
sil_global private @globalinit_token0 : $Builtin.Word
sil_global @MyConst : $Int32
diff --git a/test/SILOptimizer/inline_heuristics.sil b/test/SILOptimizer/inline_heuristics.sil
index 2e613fb..7059216 100644
--- a/test/SILOptimizer/inline_heuristics.sil
+++ b/test/SILOptimizer/inline_heuristics.sil
@@ -304,16 +304,35 @@
return %9 : $Int32
}
-// CHECK-LABEL: sil @testSwitchEnum
+// CHECK-LABEL: sil @testSwitchEnumArg
// CHECK-NOT: apply
// CHECK: builtin "assert_configuration"()
// CHECK-NOT: apply
// CHECK: return %{{.*}} : $()
-// CHECK-LOG-LABEL: Inline into caller: testSwitchEnum
+// CHECK-LOG-LABEL: Inline into caller: testSwitchEnumArg
// CHECK-LOG-NEXT: decision {{.*}}, b=50,
-sil @testSwitchEnum : $@convention(thin) () -> () {
+sil @testSwitchEnumArg : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%1 : $Builtin.Int32):
+ %0 = function_ref @switchEnumCallee : $@convention(thin) (Optional<Int32>) -> Int32
+ %2 = struct $Int32 (%1 : $Builtin.Int32)
+ %3 = enum $Optional<Int32>, #Optional.some!enumelt.1, %2 : $Int32
+ %4 = apply %0(%3) : $@convention(thin) (Optional<Int32>) -> Int32
+ %5 = tuple ()
+ return %5 : $()
+}
+
+// CHECK-LABEL: sil @testSwitchEnumConst
+// CHECK-NOT: apply
+// CHECK: builtin "assert_configuration"()
+// CHECK-NOT: apply
+// CHECK: return %{{.*}} : $()
+
+// CHECK-LOG-LABEL: Inline into caller: testSwitchEnumConst
+// CHECK-LOG-NEXT: pure-call decision
+
+sil @testSwitchEnumConst : $@convention(thin) () -> () {
bb0:
%0 = function_ref @switchEnumCallee : $@convention(thin) (Optional<Int32>) -> Int32
%1 = integer_literal $Builtin.Int32, 27
diff --git a/test/SILOptimizer/latecodemotion.sil b/test/SILOptimizer/latecodemotion.sil
index a9e0d16..0393902 100644
--- a/test/SILOptimizer/latecodemotion.sil
+++ b/test/SILOptimizer/latecodemotion.sil
@@ -513,8 +513,6 @@
// CHECK-NEXT: function_ref @blocker
// CHECK-NEXT: alloc_stack
// CHECK-NEXT: dealloc_stack
-// CHECK-NEXT: retain_value
-// CHECK-NEXT: release_value
// CHECK-NEXT: switch_enum
// CHECK: bb2:
// CHECK-NOT: retain_value
@@ -722,8 +720,6 @@
// CHECK-NEXT: alloc_stack
// CHECK-NEXT: dealloc_stack
// CHECK-NEXT: select_enum
-// CHECK-NEXT: retain_value
-// CHECK-NEXT: release_value
// CHECK-NEXT: cond_br
// CHECK: bb2:
// CHECK-NOT: retain_value
@@ -843,8 +839,8 @@
// CHECK-LABEL: sil @enum_simplification_test1 : $@convention(thin) (FakeOptional<Builtin.NativeObject>) -> () {
// CHECK: bb0
-// CHECK-NEXT: retain_value
-// CHECK-NEXT: release_value
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
sil @enum_simplification_test1 : $@convention(thin) (FakeOptional<Builtin.NativeObject>) -> () {
bb0(%0 : $FakeOptional<Builtin.NativeObject>):
retain_value %0 : $FakeOptional<Builtin.NativeObject>
@@ -855,8 +851,9 @@
// CHECK-LABEL: sil @enum_simplification_test2 : $@convention(thin) (FakeOptional<Builtin.NativeObject>) -> () {
// CHECK: bb0
-// CHECK: strong_retain
-// CHECK: release_value
+// CHECK-NEXT: unchecked_enum_data
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
sil @enum_simplification_test2 : $@convention(thin) (FakeOptional<Builtin.NativeObject>) -> () {
bb0(%0 : $FakeOptional<Builtin.NativeObject>):
%1 = unchecked_enum_data %0 : $FakeOptional<Builtin.NativeObject>, #FakeOptional.some!enumelt.1
@@ -887,8 +884,8 @@
// CHECK-NEXT: get_object
// CHECK-NEXT: function_ref @get_object
// CHECK-NEXT: apply
-// CHECK: strong_retain
-// CHECK: strong_release
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
sil @enum_simplification_test4 : $@convention(thin) () -> () {
bb0:
%0 = function_ref @get_object : $@convention(thin) () -> Builtin.NativeObject
@@ -1288,9 +1285,7 @@
// CHECK: bb0
// CHECK: cond_br undef, bb1, bb2
// CHECK: bb1:
-// CHECK: strong_retain
-// CHECK: strong_release
-// CHECK: br bb3
+// CHECK-NEXT: br bb3
// CHECK: bb2:
// CHECK: strong_retain
// CHECK: apply
@@ -1322,8 +1317,7 @@
// Make sure release can be hoisted across memory that do not escape.
// CHECK-LABEL: sil @hoist_release_across_local_memory_use
// CHECK: bb1:
-// CHECK: strong_release
-// CHECK: br bb3
+// CHECK-NEXT: br bb3
// CHECK: bb2:
// CHECK: strong_release
// CHECK: br bb3
@@ -1426,11 +1420,12 @@
// CHECK-LABEL: sil @hoist_silargument_release
// CHECK: bb1
-// CHECK: release
+// CHECK: strong_retain
+// CHECK: apply
+// CHECK: strong_release
// CHECK: br
// CHECK: bb2
-// CHECK: release
-// CHECK: br
+// CHECK-NEXT: br
// CHECK: bb3
sil @hoist_silargument_release : $@convention(thin) (@owned X, @owned X) -> () {
bb0(%0 : $X, %1 : $X):
diff --git a/test/SILOptimizer/licm.sil b/test/SILOptimizer/licm.sil
index 705ff1d..08cde42 100644
--- a/test/SILOptimizer/licm.sil
+++ b/test/SILOptimizer/licm.sil
@@ -268,3 +268,51 @@
%52 = tuple ()
return %52 : $()
}
+
+
+sil @get_unknown_value : $@convention(thin) () -> Builtin.Int32
+sil @get_unknown_value2 : $@convention(thin) () -> Builtin.Int32
+
+sil @callee : $@convention(thin) (@inout Builtin.Int32) -> () {
+bb0(%0 : $*Builtin.Int32):
+ %1 = function_ref @get_unknown_value : $@convention(thin) () -> Builtin.Int32
+ %2 = apply %1() : $@convention(thin) () -> Builtin.Int32
+ store %2 to %0 : $*Builtin.Int32
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+sil @use_value : $@convention(thin) (Builtin.Int32) -> ()
+
+// Check if escape analysis figures out that the alloc_stack escapes to callee.
+//
+// CHECK-LABEL: sil @dont_hoist_aliased_load
+// CHECK: bb2:
+// CHECK-NEXT: apply
+// CHECK-NEXT: load
+// CHECK-NEXT: apply
+sil @dont_hoist_aliased_load : $@convention(thin) () -> () {
+bb0:
+ %0 = alloc_stack $Builtin.Int32
+ %1 = integer_literal $Builtin.Int32, 0
+ %3 = function_ref @callee : $@convention(thin) (@inout Builtin.Int32) -> ()
+ %5 = function_ref @use_value : $@convention(thin) (Builtin.Int32) -> ()
+ %unknown_value_fn = function_ref @get_unknown_value2 : $@convention(thin) () -> Builtin.Int32
+ store %1 to %0 : $*Builtin.Int32
+ br bb1(%0 : $*Builtin.Int32)
+
+bb1(%phi1 : $*Builtin.Int32):
+ br bb2
+
+bb2:
+ apply %3(%0) : $@convention(thin) (@inout Builtin.Int32) -> ()
+ %4 = load %phi1 : $*Builtin.Int32
+ %6 = apply %unknown_value_fn() : $@convention(thin) () -> Builtin.Int32
+ %33 = builtin "cmp_eq_Int32"(%4 : $Builtin.Int32, %6 : $Builtin.Int32) : $Builtin.Int1
+ cond_br %33, bb2, bb3
+
+bb3:
+ %9999 = tuple()
+ dealloc_stack %0 : $*Builtin.Int32
+ return %9999 : $()
+}
diff --git a/test/SILOptimizer/objectoutliner.sil b/test/SILOptimizer/objectoutliner.sil
new file mode 100644
index 0000000..5e788b0
--- /dev/null
+++ b/test/SILOptimizer/objectoutliner.sil
@@ -0,0 +1,207 @@
+// RUN: %target-sil-opt -assume-parsing-unqualified-ownership-sil -enable-sil-verify-all %s -object-outliner | %FileCheck %s
+//
+
+sil_stage canonical
+
+import Builtin
+import Swift
+
+class Obj {
+ @sil_stored var value: Int64
+ init()
+}
+
+// CHECK-LABEL: sil_global private @outline_global_simpleTv_ : $Obj = {
+// CHECK-NEXT: %0 = integer_literal $Builtin.Int64, 1
+// CHECK-NEXT: %1 = struct $Int64 (%0 : $Builtin.Int64)
+// CHECK-NEXT: %initval = object $Obj (%1 : $Int64)
+// CHECK-NEXT: }
+
+// CHECK-LABEL: sil_global private @outline_global_tailelemsTv_ : $Obj = {
+// CHECK-NEXT: %0 = integer_literal $Builtin.Int64, 3
+// CHECK-NEXT: %1 = struct $Int64 (%0 : $Builtin.Int64)
+// CHECK-NEXT: %2 = integer_literal $Builtin.Int64, 2
+// CHECK-NEXT: %3 = struct $Int64 (%2 : $Builtin.Int64)
+// CHECK-NEXT: %4 = integer_literal $Builtin.Int64, 1
+// CHECK-NEXT: %5 = struct $Int64 (%4 : $Builtin.Int64)
+// CHECK-NEXT: %initval = object $Obj (%5 : $Int64, [tail_elems] %3 : $Int64, %1 : $Int64)
+// CHECK-NEXT: }
+
+// CHECK-LABEL: sil @outline_global_simple
+// CHECK: [[G:%[0-9]+]] = global_value @outline_global_simpleTv_ : $Obj
+// CHECK: strong_retain [[G]] : $Obj
+// CHECK-NOT: store
+// CHECK: strong_release [[G]] : $Obj
+// CHECK: return
+sil @outline_global_simple : $@convention(thin) () -> () {
+bb0:
+ %1 = integer_literal $Builtin.Int64, 1
+ %4 = struct $Int64 (%1 : $Builtin.Int64)
+ %7 = alloc_ref $Obj
+ %9 = ref_element_addr %7 : $Obj, #Obj.value
+ store %4 to %9 : $*Int64
+ strong_release %7 : $Obj
+ %r = tuple ()
+ return %r : $()
+}
+
+
+// CHECK-LABEL: sil @outline_global_tailelems
+// CHECK: [[G:%[0-9]+]] = global_value @outline_global_tailelemsTv_ : $Obj
+// CHECK: strong_retain [[G]] : $Obj
+// CHECK-NOT: store
+// CHECK: strong_release [[G]] : $Obj
+// CHECK: return
+sil @outline_global_tailelems : $@convention(thin) () -> () {
+bb0:
+ %0 = integer_literal $Builtin.Word, 2
+ %1 = integer_literal $Builtin.Int64, 1
+ %2 = integer_literal $Builtin.Int64, 2
+ %3 = integer_literal $Builtin.Int64, 3
+ %4 = struct $Int64 (%1 : $Builtin.Int64)
+ %5 = struct $Int64 (%2 : $Builtin.Int64)
+ %6 = struct $Int64 (%3 : $Builtin.Int64)
+ %7 = alloc_ref [tail_elems $Int64 * %0 : $Builtin.Word] $Obj
+ %9 = ref_element_addr %7 : $Obj, #Obj.value
+ store %4 to %9 : $*Int64
+ %15 = ref_tail_addr %7 : $Obj, $Int64
+ store %5 to %15 : $*Int64
+ %19 = integer_literal $Builtin.Word, 1
+ %20 = index_addr %15 : $*Int64, %19 : $Builtin.Word
+ store %6 to %20 : $*Int64
+ strong_release %7 : $Obj
+ %r = tuple ()
+ return %r : $()
+}
+
+// CHECK-LABEL: sil @dont_outline_global_double_store
+// CHECK: alloc_ref
+// CHECK: store
+// CHECK: return
+sil @dont_outline_global_double_store : $@convention(thin) () -> () {
+bb0:
+ %1 = integer_literal $Builtin.Int64, 1
+ %4 = struct $Int64 (%1 : $Builtin.Int64)
+ %7 = alloc_ref $Obj
+ %9 = ref_element_addr %7 : $Obj, #Obj.value
+ store %4 to %9 : $*Int64
+ store %4 to %9 : $*Int64
+ strong_release %7 : $Obj
+ %r = tuple ()
+ return %r : $()
+}
+
+// CHECK-LABEL: sil @dont_outline_global_missing_store
+// CHECK: alloc_ref
+// CHECK: return
+sil @dont_outline_global_missing_store : $@convention(thin) () -> () {
+bb0:
+ %1 = integer_literal $Builtin.Int64, 1
+ %4 = struct $Int64 (%1 : $Builtin.Int64)
+ %7 = alloc_ref $Obj
+ %9 = ref_element_addr %7 : $Obj, #Obj.value
+ strong_release %7 : $Obj
+ %r = tuple ()
+ return %r : $()
+}
+
+// CHECK-LABEL: sil @dont_outline_objc_allocation
+// CHECK: alloc_ref
+// CHECK: return
+sil @dont_outline_objc_allocation : $@convention(thin) () -> () {
+bb0:
+ %1 = integer_literal $Builtin.Int64, 1
+ %4 = struct $Int64 (%1 : $Builtin.Int64)
+ // A hack, because Obj is not really an ObjC class. But for the test it should be ok.
+ %7 = alloc_ref [objc] $Obj
+ %9 = ref_element_addr %7 : $Obj, #Obj.value
+ store %4 to %9 : $*Int64
+ strong_release %7 : $Obj
+ %r = tuple ()
+ return %r : $()
+}
+
+sil @take_pointer : $@convention(thin) (Builtin.RawPointer) -> ()
+
+// CHECK-LABEL: sil @dont_outline_global_unknown_addr_use
+// CHECK: alloc_ref
+// CHECK: return
+sil @dont_outline_global_unknown_addr_use : $@convention(thin) () -> () {
+bb0:
+ %1 = integer_literal $Builtin.Int64, 1
+ %4 = struct $Int64 (%1 : $Builtin.Int64)
+ %7 = alloc_ref $Obj
+ %9 = ref_element_addr %7 : $Obj, #Obj.value
+ store %4 to %9 : $*Int64
+ %10 = address_to_pointer %9 : $*Int64 to $Builtin.RawPointer
+ %f = function_ref @take_pointer : $@convention(thin) (Builtin.RawPointer) -> ()
+ %a = apply %f(%10) : $@convention(thin) (Builtin.RawPointer) -> ()
+ strong_release %7 : $Obj
+ %r = tuple ()
+ return %r : $()
+}
+
+// CHECK-LABEL: sil @dont_outline_global_escaping_obj
+// CHECK: alloc_ref
+// CHECK: return
+sil @dont_outline_global_escaping_obj : $@convention(thin) (@inout Obj) -> () {
+bb0(%0: $*Obj):
+ %1 = integer_literal $Builtin.Int64, 1
+ %4 = struct $Int64 (%1 : $Builtin.Int64)
+ %7 = alloc_ref $Obj
+ %9 = ref_element_addr %7 : $Obj, #Obj.value
+ store %4 to %9 : $*Int64
+ store %7 to %0 : $*Obj
+ %r = tuple ()
+ return %r : $()
+}
+
+// CHECK-LABEL: sil @dont_outline_global_missing_tailelem_store
+// CHECK: alloc_ref
+// CHECK: return
+sil @dont_outline_global_missing_tailelem_store : $@convention(thin) () -> () {
+bb0:
+ %0 = integer_literal $Builtin.Word, 2
+ %1 = integer_literal $Builtin.Int64, 1
+ %2 = integer_literal $Builtin.Int64, 2
+ %3 = integer_literal $Builtin.Int64, 3
+ %4 = struct $Int64 (%1 : $Builtin.Int64)
+ %5 = struct $Int64 (%2 : $Builtin.Int64)
+ %6 = struct $Int64 (%3 : $Builtin.Int64)
+ %7 = alloc_ref [tail_elems $Int64 * %0 : $Builtin.Word] $Obj
+ %9 = ref_element_addr %7 : $Obj, #Obj.value
+ store %4 to %9 : $*Int64
+ %15 = ref_tail_addr %7 : $Obj, $Int64
+ store %5 to %15 : $*Int64
+ strong_release %7 : $Obj
+ %r = tuple ()
+ return %r : $()
+}
+
+// CHECK-LABEL: sil @dont_outline_global_double_tailelem_store
+// CHECK: alloc_ref
+// CHECK: return
+sil @dont_outline_global_double_tailelem_store : $@convention(thin) () -> () {
+bb0:
+ %0 = integer_literal $Builtin.Word, 2
+ %1 = integer_literal $Builtin.Int64, 1
+ %2 = integer_literal $Builtin.Int64, 2
+ %3 = integer_literal $Builtin.Int64, 3
+ %4 = struct $Int64 (%1 : $Builtin.Int64)
+ %5 = struct $Int64 (%2 : $Builtin.Int64)
+ %6 = struct $Int64 (%3 : $Builtin.Int64)
+ %7 = alloc_ref [tail_elems $Int64 * %0 : $Builtin.Word] $Obj
+ %9 = ref_element_addr %7 : $Obj, #Obj.value
+ store %4 to %9 : $*Int64
+ %15 = ref_tail_addr %7 : $Obj, $Int64
+ store %5 to %15 : $*Int64
+ store %5 to %15 : $*Int64
+ %19 = integer_literal $Builtin.Word, 1
+ %20 = index_addr %15 : $*Int64, %19 : $Builtin.Word
+ store %6 to %20 : $*Int64
+ strong_release %7 : $Obj
+ %r = tuple ()
+ return %r : $()
+}
+
+
diff --git a/test/SILOptimizer/optionset.swift b/test/SILOptimizer/optionset.swift
new file mode 100644
index 0000000..ba74ab1
--- /dev/null
+++ b/test/SILOptimizer/optionset.swift
@@ -0,0 +1,34 @@
+// RUN: %target-swift-frontend -parse-as-library -primary-file %s -O -sil-verify-all -module-name=test -emit-sil | %FileCheck %s
+// RUN: %target-swift-frontend -parse-as-library -primary-file %s -Osize -sil-verify-all -module-name=test -emit-sil | %FileCheck %s
+// REQUIRES: swift_stdlib_no_asserts,optimized_stdlib
+
+public struct TestOptions: OptionSet {
+ public let rawValue: Int
+ public init(rawValue: Int) { self.rawValue = rawValue }
+
+ static let first = TestOptions(rawValue: 1 << 0)
+ static let second = TestOptions(rawValue: 1 << 1)
+ static let third = TestOptions(rawValue: 1 << 2)
+ static let fourth = TestOptions(rawValue: 1 << 3)
+}
+
+// CHECK: sil @{{.*}}returnTestOptions{{.*}}
+// CHECK-NEXT: bb0:
+// CHECK-NEXT: integer_literal {{.*}}, 15
+// CHECK-NEXT: struct $Int
+// CHECK-NEXT: struct $TestOptions
+// CHECK-NEXT: return
+public func returnTestOptions() -> TestOptions {
+ return [.first, .second, .third, .fourth]
+}
+
+// CHECK: alloc_global @{{.*}}globalTestOptions{{.*}}
+// CHECK-NEXT: global_addr
+// CHECK-NEXT: integer_literal {{.*}}, 15
+// CHECK-NEXT: struct $Int
+// CHECK-NEXT: struct $TestOptions
+// CHECK-NEXT: store
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
+let globalTestOptions: TestOptions = [.first, .second, .third, .fourth]
+
diff --git a/test/SILOptimizer/retain_release_code_motion.sil b/test/SILOptimizer/retain_release_code_motion.sil
index cfa2047..7b5a06b 100644
--- a/test/SILOptimizer/retain_release_code_motion.sil
+++ b/test/SILOptimizer/retain_release_code_motion.sil
@@ -266,9 +266,7 @@
// CHECK-LABEL: sil @builtin_does_not_block_locally_allocated_ref
// CHECK: builtin
-// CHECK-NEXT: strong_retain
-// CHECK-NEXT: strong_release
-// CHECK: return
+// CHECK-NEXT: return
sil @builtin_does_not_block_locally_allocated_ref : $@convention(thin) () -> @owned MyArrayBuffer {
bb0:
%3 = integer_literal $Builtin.Word, 3
@@ -285,9 +283,7 @@
// CHECK: bb0
// CHECK: cond_br undef, bb1, bb2
// CHECK: bb1:
-// CHECK: strong_retain
-// CHECK: strong_release
-// CHECK: br bb3
+// CHECK-NEXT: br bb3
// CHECK: bb2:
// CHECK: strong_retain
// CHECK: apply
@@ -345,8 +341,7 @@
// Make sure release can be hoisted across memory that do not escape.
// CHECK-LABEL: sil @hoist_release_across_local_memory_use
// CHECK: bb1:
-// CHECK: strong_release
-// CHECK: br bb3
+// CHECK-NEXT: br bb3
// CHECK: bb2:
// CHECK: strong_release
// CHECK: br bb3
@@ -512,9 +507,8 @@
// CHECK: bb2:
// CHECK-NEXT: br bb2
// CHECK: bb3:
-// CHECK: strong_retain
-// CHECK: strong_release
-// CHECK: return
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
sil @handle_infinite_loop : $@convention(thin) (@inout Builtin.NativeObject) -> () {
bb0(%a : $*Builtin.NativeObject):
cond_br undef, bb1, bb3
@@ -540,8 +534,6 @@
/// One round of retain-sinking can sink only one of retains.
/// CHECK-LABEL: sil @checkRetainSinkingMultipleRounds
/// CHECK: bb9:
-/// CHECK-NEXT: retain_value %2 : $S
-/// CHECK-NEXT: release_value %2 : $S
/// CHECK-NEXT: release_value %2 : $S
/// In the ideal world, we should see a third retain_value here.
/// But it would require another round of retain sinking.
@@ -552,8 +544,6 @@
/// CHECK-MULTIPLE-RS-ROUNDS-LABEL: sil @checkRetainSinkingMultipleRounds
/// CHECK-MULTIPLE-RS-ROUNDS: bb9:
/// CHECK-MULTIPLE-RS-ROUNDS-NEXT: retain_value %2 : $S
-/// CHECK-MULTIPLE-RS-ROUNDS-NEXT: retain_value %2 : $S
-/// CHECK-MULTIPLE-RS-ROUNDS-NEXT: release_value %2 : $S
/// CHECK-MULTIPLE-RS-ROUNDS-NEXT: release_value %2 : $S
/// CHECK-MULTIPLE-RS-ROUNDS-NEXT: br bb5
diff --git a/test/SILOptimizer/side-effect.sil b/test/SILOptimizer/side-effect.sil
index 56e4482..e24a53f 100644
--- a/test/SILOptimizer/side-effect.sil
+++ b/test/SILOptimizer/side-effect.sil
@@ -42,7 +42,7 @@
struct SP {
- var value: Builtin.BridgeObject
+ var value: X
}
enum EP {
@@ -147,7 +147,7 @@
}
// CHECK-LABEL: sil @release_owned
-// CHECK: <func=rw+-,param0=-;alloc;trap;readrc>
+// CHECK: <func=,param0=->
sil @release_owned : $@convention(thin) (@owned X) -> () {
bb0(%0 : $X):
strong_release %0 : $X
@@ -291,7 +291,8 @@
%ee = unchecked_enum_data %0 : $EP, #EP.data!enumelt.1
%te = tuple_extract %ee : $(SP, Builtin.Int1), 0
%se = struct_extract %te : $SP, #SP.value
- %utbc = unchecked_trivial_bit_cast %se : $Builtin.BridgeObject to $Builtin.RawPointer
+ %urc = unchecked_ref_cast %se : $X to $Builtin.BridgeObject
+ %utbc = unchecked_trivial_bit_cast %urc : $Builtin.BridgeObject to $Builtin.RawPointer
%idx = integer_literal $Builtin.Int32, 0
%irp = index_raw_pointer %utbc : $Builtin.RawPointer, %idx : $Builtin.Int32
%pta = pointer_to_address %irp : $Builtin.RawPointer to [strict] $*Int32
diff --git a/test/SILOptimizer/simplify_cfg.sil b/test/SILOptimizer/simplify_cfg.sil
index 01dd1aa..41422c0 100644
--- a/test/SILOptimizer/simplify_cfg.sil
+++ b/test/SILOptimizer/simplify_cfg.sil
@@ -2982,3 +2982,38 @@
br bb5
}
+// CHECK-LABEL: sil @test_constant_folding
+// CHECK: [[R:%[0-9]+]] = integer_literal $Builtin.Int32, 30
+// CHECK: return [[R]] : $Builtin.Int32
+// CHECK-NEXT: }
+sil @test_constant_folding : $@convention(thin) () -> Builtin.Int32 {
+bb0:
+ %0 = integer_literal $Builtin.Int1, 0
+ %20 = integer_literal $Builtin.Int32, 20
+ %30 = integer_literal $Builtin.Int32, 30
+ cond_br %0, bb1, bb2
+bb1:
+ br bb3(%20 : $Builtin.Int32)
+bb2:
+ br bb3(%30 : $Builtin.Int32)
+
+bb3(%2 : $Builtin.Int32):
+ %3 = builtin "cmp_slt_Int32"(%2 : $Builtin.Int32, %30 : $Builtin.Int32) : $Builtin.Int1
+ cond_br %3, bb4, bb5
+bb4:
+ br bb6(%20 : $Builtin.Int32)
+bb5:
+ br bb6(%30 : $Builtin.Int32)
+
+bb6(%4 : $Builtin.Int32):
+ %5 = builtin "cmp_slt_Int32"(%4 : $Builtin.Int32, %30 : $Builtin.Int32) : $Builtin.Int1
+ cond_br %5, bb7, bb8
+bb7:
+ br bb9(%20 : $Builtin.Int32)
+bb8:
+ br bb9(%30 : $Builtin.Int32)
+
+bb9(%6 : $Builtin.Int32):
+ return %6 : $Builtin.Int32
+}
+
diff --git a/test/SILOptimizer/stack_promotion_array_literal.swift b/test/SILOptimizer/stack_promotion_array_literal.swift
index 1960fc6..1f6b4e2 100644
--- a/test/SILOptimizer/stack_promotion_array_literal.swift
+++ b/test/SILOptimizer/stack_promotion_array_literal.swift
@@ -1,8 +1,5 @@
// RUN: %target-swift-frontend -parse-as-library -O -module-name=test %s -emit-sil | %FileCheck %s
// REQUIRES: swift_stdlib_no_asserts,optimized_stdlib
-// XFAIL: linux
-// rdar://problem/34758773
-
// This is an end-to-end test to check if the array literal in the loop is
// stack promoted.
diff --git a/test/SILOptimizer/static_arrays.swift b/test/SILOptimizer/static_arrays.swift
index dbc62d0..adfce54 100644
--- a/test/SILOptimizer/static_arrays.swift
+++ b/test/SILOptimizer/static_arrays.swift
@@ -8,13 +8,6 @@
// Check if the optimizer is able to convert array literals to statically initialized arrays.
-// CHECK-LABEL: sil_global private @{{.*}}main{{.*}} = {
-// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 100
-// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 101
-// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 102
-// CHECK: object {{.*}} ({{[^,]*}}, [tail_elems] {{[^,]*}}, {{[^,]*}}, {{[^,]*}})
-// CHECK-NEXT: }
-
// CHECK-LABEL: outlined variable #0 of arrayLookup(_:)
// CHECK-NEXT: sil_global private @{{.*}}arrayLookup{{.*}} = {
// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 10
@@ -57,11 +50,10 @@
// CHECK: object {{.*}} ({{[^,]*}}, [tail_elems] {{[^,]*}}, {{[^,]*}})
// CHECK-NEXT: }
-// CHECK-LABEL: outlined variable #0 of overwriteLiteral(_:)
-// CHECK-NEXT: sil_global private @{{.*}}overwriteLiteral{{.*}} = {
-// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 1
-// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 2
-// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 3
+// CHECK-LABEL: sil_global private @{{.*}}main{{.*}} = {
+// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 100
+// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 101
+// CHECK-DAG: integer_literal $Builtin.Int{{[0-9]+}}, 102
// CHECK: object {{.*}} ({{[^,]*}}, [tail_elems] {{[^,]*}}, {{[^,]*}}, {{[^,]*}})
// CHECK-NEXT: }
@@ -120,18 +112,6 @@
gg = [227, 228]
}
-// CHECK-LABEL: sil {{.*}}overwriteLiteral{{.*}} : $@convention(thin) (Int) -> @owned Array<Int> {
-// CHECK: global_value @{{.*}}overwriteLiteral{{.*}}
-// CHECK: is_unique
-// CHECK: store
-// CHECK: return
-@inline(never)
-func overwriteLiteral(_ x: Int) -> [Int] {
- var a = [ 1, 2, 3 ]
- a[x] = 0
- return a
-}
-
struct Empty { }
// CHECK-LABEL: sil {{.*}}arrayWithEmptyElements{{.*}} : $@convention(thin) () -> @owned Array<Empty> {
@@ -155,10 +135,6 @@
storeArray()
// CHECK-OUTPUT-NEXT: [227, 228]
print(gg!)
-// CHECK-OUTPUT-NEXT: [0, 2, 3]
-print(overwriteLiteral(0))
-// CHECK-OUTPUT-NEXT: [1, 0, 3]
-print(overwriteLiteral(1))
diff --git a/test/Serialization/Recovery/typedefs-in-protocols.swift b/test/Serialization/Recovery/typedefs-in-protocols.swift
index 559d74b..0a26746 100644
--- a/test/Serialization/Recovery/typedefs-in-protocols.swift
+++ b/test/Serialization/Recovery/typedefs-in-protocols.swift
@@ -21,7 +21,7 @@
// for the witness table slot for 'lastMethod()'. If the layout here
// changes, please check that offset 11 is still correct.
// CHECK-IR-NOT: ret
- // CHECK-IR: [[SLOT:%.+]] = getelementptr inbounds i8*, i8** {{%.+}}, i32 11
+ // CHECK-IR: [[SLOT:%.+]] = getelementptr inbounds i8*, i8** {{%.+}}, i32 12
// CHECK-IR-NOT: ret
// CHECK-IR: [[RAW_METHOD:%.+]] = load i8*, i8** [[SLOT]]
// CHECK-IR-NOT: ret
@@ -37,7 +37,7 @@
// for the witness table slot for 'lastMethod()'. If the layout here
// changes, please check that offset 11 is still correct.
// CHECK-IR-NOT: ret
- // CHECK-IR: [[SLOT:%.+]] = getelementptr inbounds i8*, i8** %T.Proto, i32 11
+ // CHECK-IR: [[SLOT:%.+]] = getelementptr inbounds i8*, i8** %T.Proto, i32 12
// CHECK-IR-NOT: ret
// CHECK-IR: [[RAW_METHOD:%.+]] = load i8*, i8** [[SLOT]]
// CHECK-IR-NOT: ret
diff --git a/test/stdlib/Filter.swift b/test/stdlib/Filter.swift
index ee9d631..cb894a5 100644
--- a/test/stdlib/Filter.swift
+++ b/test/stdlib/Filter.swift
@@ -73,4 +73,36 @@
expectEqual(30, count)
}
+FilterTests.test("Double filter type/Sequence") {
+ func foldingLevels<S : Sequence>(_ xs: S) {
+ var result = xs.lazy.filter { _ in true }.filter { _ in true }
+ expectType(LazyFilterSequence<S>.self, &result)
+ }
+ foldingLevels(Array(0..<10))
+
+ func backwardCompatible<S : Sequence>(_ xs: S) {
+ typealias ExpectedType = LazyFilterSequence<LazyFilterSequence<S>>
+ var result: ExpectedType = xs.lazy
+ .filter { _ in true }.filter { _ in true }
+ expectType(ExpectedType.self, &result)
+ }
+ backwardCompatible(Array(0..<10))
+}
+
+FilterTests.test("Double filter type/Collection") {
+ func foldingLevels<C : Collection>(_ xs: C) {
+ var result = xs.lazy.filter { _ in true }.filter { _ in true }
+ expectType(LazyFilterCollection<C>.self, &result)
+ }
+ foldingLevels(Array(0..<10))
+
+ func backwardCompatible<C : Collection>(_ xs: C) {
+ typealias ExpectedType = LazyFilterCollection<LazyFilterCollection<C>>
+ var result: ExpectedType = xs.lazy
+ .filter { _ in true }.filter { _ in true }
+ expectType(ExpectedType.self, &result)
+ }
+ backwardCompatible(Array(0..<10))
+}
+
runAllTests()
diff --git a/test/stdlib/Integers.swift.gyb b/test/stdlib/Integers.swift.gyb
index c3addef..0bdc526 100644
--- a/test/stdlib/Integers.swift.gyb
+++ b/test/stdlib/Integers.swift.gyb
@@ -63,6 +63,170 @@
typealias DWord = Int${word_bits*2}
typealias UDWord = UInt${word_bits*2}
+struct MockBinaryInteger<T : BinaryInteger> {
+ var _value: T
+
+ init(_ value: T) {
+ _value = value
+ }
+}
+
+extension MockBinaryInteger : CustomStringConvertible {
+ var description: String {
+ return _value.description
+ }
+}
+
+extension MockBinaryInteger : ExpressibleByIntegerLiteral {
+ init(integerLiteral value: T.IntegerLiteralType) {
+ _value = T(integerLiteral: value)
+ }
+}
+
+extension MockBinaryInteger : Comparable {
+ static func < (lhs: MockBinaryInteger<T>, rhs: MockBinaryInteger<T>) -> Bool {
+ return lhs._value < rhs._value
+ }
+
+ static func == (
+ lhs: MockBinaryInteger<T>, rhs: MockBinaryInteger<T>
+ ) -> Bool {
+ return lhs._value == rhs._value
+ }
+}
+
+extension MockBinaryInteger : Hashable {
+ var hashValue: Int {
+ return _value.hashValue
+ }
+}
+
+extension MockBinaryInteger : BinaryInteger {
+ static var isSigned: Bool {
+ return T.isSigned
+ }
+
+ init<Source>(_ source: Source) where Source : BinaryFloatingPoint {
+ _value = T(source)
+ }
+
+ init?<Source>(exactly source: Source) where Source : BinaryFloatingPoint {
+ guard let result = T(exactly: source) else { return nil }
+ _value = result
+ }
+
+ init<Source>(_ source: Source) where Source : BinaryInteger {
+ _value = T(source)
+ }
+
+ init?<Source>(exactly source: Source) where Source : BinaryInteger {
+ guard let result = T(exactly: source) else { return nil }
+ _value = result
+ }
+
+ init<Source>(truncatingIfNeeded source: Source) where Source : BinaryInteger {
+ _value = T(truncatingIfNeeded: source)
+ }
+
+ init<Source>(clamping source: Source) where Source : BinaryInteger {
+ _value = T(clamping: source)
+ }
+
+ var magnitude: MockBinaryInteger<T.Magnitude> {
+ return MockBinaryInteger<T.Magnitude>(_value.magnitude)
+ }
+
+ var words: T.Words {
+ return _value.words
+ }
+
+ var bitWidth: Int {
+ return _value.bitWidth
+ }
+
+ var trailingZeroBitCount: Int {
+ return _value.trailingZeroBitCount
+ }
+
+ static func + (
+ lhs: MockBinaryInteger<T>, rhs: MockBinaryInteger<T>
+ ) -> MockBinaryInteger<T> {
+ return MockBinaryInteger(lhs._value + rhs._value)
+ }
+
+ static func += (lhs: inout MockBinaryInteger<T>, rhs: MockBinaryInteger<T>) {
+ lhs._value += rhs._value
+ }
+
+ static func - (
+ lhs: MockBinaryInteger<T>, rhs: MockBinaryInteger<T>
+ ) -> MockBinaryInteger<T> {
+ return MockBinaryInteger(lhs._value - rhs._value)
+ }
+
+ static func -= (lhs: inout MockBinaryInteger<T>, rhs: MockBinaryInteger<T>) {
+ lhs._value -= rhs._value
+ }
+
+ static func * (
+ lhs: MockBinaryInteger<T>, rhs: MockBinaryInteger<T>
+ ) -> MockBinaryInteger<T> {
+ return MockBinaryInteger(lhs._value * rhs._value)
+ }
+
+ static func *= (lhs: inout MockBinaryInteger<T>, rhs: MockBinaryInteger<T>) {
+ lhs._value *= rhs._value
+ }
+
+ static func / (
+ lhs: MockBinaryInteger<T>, rhs: MockBinaryInteger<T>
+ ) -> MockBinaryInteger<T> {
+ return MockBinaryInteger(lhs._value / rhs._value)
+ }
+
+ static func /= (lhs: inout MockBinaryInteger<T>, rhs: MockBinaryInteger<T>) {
+ lhs._value /= rhs._value
+ }
+
+ static func % (
+ lhs: MockBinaryInteger<T>, rhs: MockBinaryInteger<T>
+ ) -> MockBinaryInteger<T> {
+ return MockBinaryInteger(lhs._value % rhs._value)
+ }
+
+ static func %= (lhs: inout MockBinaryInteger<T>, rhs: MockBinaryInteger<T>) {
+ lhs._value %= rhs._value
+ }
+
+ static func &= (lhs: inout MockBinaryInteger<T>, rhs: MockBinaryInteger<T>) {
+ lhs._value &= rhs._value
+ }
+
+ static func |= (lhs: inout MockBinaryInteger<T>, rhs: MockBinaryInteger<T>) {
+ lhs._value |= rhs._value
+ }
+
+ static func ^= (lhs: inout MockBinaryInteger<T>, rhs: MockBinaryInteger<T>) {
+ lhs._value ^= rhs._value
+ }
+
+ static prefix func ~ (x: MockBinaryInteger<T>) -> MockBinaryInteger<T> {
+ return MockBinaryInteger(~x._value)
+ }
+
+ static func >>= <RHS>(
+ lhs: inout MockBinaryInteger<T>, rhs: RHS
+ ) where RHS : BinaryInteger {
+ lhs._value >>= rhs
+ }
+
+ static func <<= <RHS>(
+ lhs: inout MockBinaryInteger<T>, rhs: RHS
+ ) where RHS : BinaryInteger {
+ lhs._value <<= rhs
+ }
+}
+
import StdlibUnittest
@@ -652,4 +816,48 @@
% end
% end
}
+
+tests.test("binaryLogarithm/generic") {
+ expectEqual(
+ Int((42 as MockBinaryInteger<Int8>)._binaryLogarithm()),
+ Int((42 as Int8)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<UInt8>)._binaryLogarithm()),
+ Int((42 as UInt8)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<Int16>)._binaryLogarithm()),
+ Int((42 as Int16)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<UInt16>)._binaryLogarithm()),
+ Int((42 as UInt16)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<Int32>)._binaryLogarithm()),
+ Int((42 as Int32)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<UInt32>)._binaryLogarithm()),
+ Int((42 as UInt32)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<Int64>)._binaryLogarithm()),
+ Int((42 as Int64)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<UInt64>)._binaryLogarithm()),
+ Int((42 as UInt64)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<Int>)._binaryLogarithm()),
+ (42 as Int)._binaryLogarithm())
+ expectEqual(
+ Int((42 as MockBinaryInteger<UInt>)._binaryLogarithm()),
+ Int((42 as UInt)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<DoubleWidth<Int>>)._binaryLogarithm()),
+ Int((42 as DoubleWidth<Int>)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<DoubleWidth<UInt>>)._binaryLogarithm()),
+ Int((42 as DoubleWidth<UInt>)._binaryLogarithm()))
+ expectEqual(
+ Int((42 as MockBinaryInteger<DoubleWidth<DoubleWidth<Int>>>)
+ ._binaryLogarithm()),
+ Int((42 as DoubleWidth<DoubleWidth<Int>>)._binaryLogarithm()))
+}
+
runAllTests()
diff --git a/test/stdlib/Map.swift b/test/stdlib/Map.swift
index 0946f18..1d43894 100644
--- a/test/stdlib/Map.swift
+++ b/test/stdlib/Map.swift
@@ -100,5 +100,40 @@
// CHECK-NEXT: [2, 4, 6, 8]
print(Array(m1))
+// lazy.map.map chain should fold two layers of LazyMapSequence
+func foldingLevelsSequence<S : Sequence>(_ xs: S) {
+ let result = xs.lazy.map { $0 }.map { $0 }
+ print(type(of: result))
+}
+// CHECK-NEXT: LazyMapSequence<Array<Int>, Int>
+foldingLevelsSequence(Array(0..<10))
+
+// ... but the old way should also be available given explicit type context
+func backwardCompatibleSequence<S : Sequence>(_ xs: S) {
+ typealias ExpectedType = LazyMapSequence<LazyMapSequence<S, S.Element>, S.Element>
+ let result: ExpectedType = xs.lazy.map { $0 }.map { $0 }
+ print(type(of: result))
+}
+// CHECK-NEXT: LazyMapSequence<LazyMapSequence<Array<Int>, Int>, Int>
+backwardCompatibleSequence(Array(0..<10))
+
+// lazy.map.map chain should fold two layers of LazyMapCollection
+func foldingLevelsCollection<C : Collection>(_ xs: C) {
+ let result = xs.lazy.map { $0 }.map { $0 }
+ print(type(of: result))
+}
+// CHECK-NEXT: LazyMapCollection<Array<Int>, Int>
+foldingLevelsCollection(Array(0..<10))
+
+// ... but the old way should also be available given explicit type context
+func backwardCompatibleCollection<C : Collection>(_ xs: C) {
+ typealias ExpectedType =
+ LazyMapCollection<LazyMapCollection<C, C.Element>, C.Element>
+ let result: ExpectedType = xs.lazy.map { $0 }.map { $0 }
+ print(type(of: result))
+}
+// CHECK-NEXT: LazyMapCollection<LazyMapCollection<Array<Int>, Int>, Int>
+backwardCompatibleCollection(Array(0..<10))
+
// CHECK-NEXT: all done.
print("all done.")
diff --git a/unittests/runtime/LongTests/LongRefcounting.cpp b/unittests/runtime/LongTests/LongRefcounting.cpp
index ba5da09..d954a89 100644
--- a/unittests/runtime/LongTests/LongRefcounting.cpp
+++ b/unittests/runtime/LongTests/LongRefcounting.cpp
@@ -330,6 +330,112 @@
}
+/////////////////////////////////////////////////
+// Max weak retain count and overflow checking //
+/////////////////////////////////////////////////
+
+static HeapObjectSideTableEntry *weakRetainALot(TestObject *object, uint64_t count) {
+ if (count == 0) return nullptr;
+
+ auto side = object->refCounts.formWeakReference();
+ for (uint64_t i = 1; i < count; i++) {
+ side = side->incrementWeak();
+ EXPECT_ALLOCATED(object);
+ }
+ return side;
+}
+
+template <bool atomic>
+static void weakReleaseALot(HeapObjectSideTableEntry *side, uint64_t count) {
+ for (uint64_t i = 0; i < count; i++) {
+ if (atomic) side->decrementWeak();
+ else side->decrementWeakNonAtomic();
+ }
+}
+
+// Maximum legal weak retain count. 32 bits with no implicit +1.
+const uint64_t maxWRC = (1ULL << 32) - 1;
+
+TEST(LongRefcountingTest, weak_retain_max) {
+ // Don't generate millions of failures if something goes wrong.
+ ::testing::FLAGS_gtest_break_on_failure = true;
+
+ size_t deinited = 0;
+ auto object = allocTestObject(&deinited, 1);
+
+ // RC is 1. WRC is 1.
+ // Weak-retain to maxWRC.
+ // Release and verify deallocated object and live side table.
+ // Weak-release back to 1, then weak-release and verify deallocated.
+ EXPECT_EQ(swift_retainCount(object), 1u);
+ EXPECT_EQ(object->refCounts.getWeakCount(), 1u);
+ auto side = weakRetainALot(object, maxWRC - 1);
+ EXPECT_EQ(side->getWeakCount(), maxWRC);
+
+ EXPECT_EQ(0u, deinited);
+ EXPECT_ALLOCATED(object);
+ EXPECT_ALLOCATED(side);
+ swift_release(object);
+ EXPECT_EQ(1u, deinited);
+ EXPECT_UNALLOCATED(object);
+ EXPECT_ALLOCATED(side);
+
+ weakReleaseALot<true>(side, maxWRC - 2);
+ EXPECT_EQ(side->getWeakCount(), 1u);
+
+ EXPECT_ALLOCATED(side);
+ side->decrementWeak();
+ EXPECT_UNALLOCATED(side);
+}
+
+TEST(LongRefcountingTest, weak_retain_overflow_DeathTest) {
+ // Don't generate millions of failures if something goes wrong.
+ ::testing::FLAGS_gtest_break_on_failure = true;
+
+ size_t deinited = 0;
+ auto object = allocTestObject(&deinited, 1);
+
+ // URC is 1. Retain to maxURC, then retain again and verify overflow error.
+ weakRetainALot(object, maxWRC - 1);
+ EXPECT_EQ(0u, deinited);
+ EXPECT_ALLOCATED(object);
+ ASSERT_DEATH(weakRetainALot(object, 1),
+ "Object's weak reference was retained too many times");
+}
+
+TEST(LongRefcountingTest, nonatomic_weak_retain_max) {
+ // Don't generate millions of failures if something goes wrong.
+ ::testing::FLAGS_gtest_break_on_failure = true;
+
+ size_t deinited = 0;
+ auto object = allocTestObject(&deinited, 1);
+
+ // RC is 1. WRC is 1.
+ // Weak-retain to maxWRC.
+ // Release and verify deallocated object and live side table.
+ // Weak-release back to 1, then weak-release and verify deallocated.
+ EXPECT_EQ(swift_retainCount(object), 1u);
+ EXPECT_EQ(object->refCounts.getWeakCount(), 1u);
+ auto side = weakRetainALot(object, maxWRC - 1);
+ EXPECT_EQ(side->getWeakCount(), maxWRC);
+
+ EXPECT_EQ(0u, deinited);
+ EXPECT_ALLOCATED(object);
+ EXPECT_ALLOCATED(side);
+ swift_release(object);
+ EXPECT_EQ(1u, deinited);
+ EXPECT_UNALLOCATED(object);
+ EXPECT_ALLOCATED(side);
+
+ weakReleaseALot<false>(side, maxWRC - 2);
+ EXPECT_EQ(side->getWeakCount(), 1u);
+
+ EXPECT_ALLOCATED(side);
+ side->decrementWeak();
+ EXPECT_UNALLOCATED(side);
+}
+
+
//////////////////////
// Object lifecycle //
//////////////////////
diff --git a/unittests/runtime/Metadata.cpp b/unittests/runtime/Metadata.cpp
index af9493e..97b2981 100644
--- a/unittests/runtime/Metadata.cpp
+++ b/unittests/runtime/Metadata.cpp
@@ -906,11 +906,11 @@
void * const *instantiationArgs) {
EXPECT_EQ(type, nullptr);
- EXPECT_EQ(((void **) instantiatedTable)[0], (void*) 123);
- EXPECT_EQ(((void **) instantiatedTable)[1], (void*) 234);
+ EXPECT_EQ(((void **) instantiatedTable)[1], (void*) 123);
+ EXPECT_EQ(((void **) instantiatedTable)[2], (void*) 234);
// The last witness is computed dynamically at instantiation time.
- ((void **) instantiatedTable)[2] = (void *) 345;
+ ((void **) instantiatedTable)[3] = (void *) 345;
auto conditionalTables =
reinterpret_cast<const WitnessTableSlice *>(instantiationArgs);
@@ -975,6 +975,7 @@
GenericWitnessTable::PrivateDataType tablePrivateData4;
const void *witnesses[] = {
+ (void *) 0, // protocol descriptor
(void *) 123,
(void *) 234,
(void *) 0, // filled in by instantiator function
@@ -996,7 +997,7 @@
// Conformance provides all requirements, and we don't have an
// instantiator, so we can just return the pattern.
{
- tableStorage1.WitnessTableSizeInWords = 5;
+ tableStorage1.WitnessTableSizeInWords = 6;
tableStorage1.WitnessTablePrivateSizeInWords = 0;
initializeRelativePointer(&tableStorage1.Protocol, &testProtocol.descriptor);
initializeRelativePointer(&tableStorage1.Pattern, witnesses);
@@ -1019,7 +1020,7 @@
// Conformance provides all requirements, but we have private storage
// and an initializer, so we must instantiate.
{
- tableStorage2.WitnessTableSizeInWords = 5;
+ tableStorage2.WitnessTableSizeInWords = 6;
tableStorage2.WitnessTablePrivateSizeInWords = 1 + 1;
initializeRelativePointer(&tableStorage2.Protocol, &testProtocol.descriptor);
initializeRelativePointer(&tableStorage2.Pattern, witnesses);
@@ -1042,15 +1043,15 @@
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[-1],
reinterpret_cast<void *>(678));
- EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[0],
- reinterpret_cast<void *>(123));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[1],
- reinterpret_cast<void *>(234));
+ reinterpret_cast<void *>(123));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[2],
- reinterpret_cast<void *>(345));
+ reinterpret_cast<void *>(234));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[3],
- reinterpret_cast<void *>(456));
+ reinterpret_cast<void *>(345));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[4],
+ reinterpret_cast<void *>(456));
+ EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[5],
reinterpret_cast<void *>(567));
return instantiatedTable;
@@ -1059,7 +1060,7 @@
// Conformance needs one default requirement to be filled in
{
- tableStorage3.WitnessTableSizeInWords = 4;
+ tableStorage3.WitnessTableSizeInWords = 5;
tableStorage3.WitnessTablePrivateSizeInWords = 1 + 1;
initializeRelativePointer(&tableStorage3.Protocol, &testProtocol.descriptor);
initializeRelativePointer(&tableStorage3.Pattern, witnesses);
@@ -1081,15 +1082,15 @@
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[-1],
reinterpret_cast<void *>(678));
- EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[0],
- reinterpret_cast<void *>(123));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[1],
- reinterpret_cast<void *>(234));
+ reinterpret_cast<void *>(123));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[2],
- reinterpret_cast<void *>(345));
+ reinterpret_cast<void *>(234));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[3],
- reinterpret_cast<void *>(456));
+ reinterpret_cast<void *>(345));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[4],
+ reinterpret_cast<void *>(456));
+ EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[5],
reinterpret_cast<void *>(fakeDefaultWitness2));
return instantiatedTable;
@@ -1099,7 +1100,7 @@
// Third case: conformance needs both default requirements
// to be filled in
{
- tableStorage4.WitnessTableSizeInWords = 3;
+ tableStorage4.WitnessTableSizeInWords = 4;
tableStorage4.WitnessTablePrivateSizeInWords = 1 + 1;
initializeRelativePointer(&tableStorage4.Protocol, &testProtocol.descriptor);
initializeRelativePointer(&tableStorage4.Pattern, witnesses);
@@ -1121,15 +1122,15 @@
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[-1],
reinterpret_cast<void *>(678));
- EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[0],
- reinterpret_cast<void *>(123));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[1],
- reinterpret_cast<void *>(234));
+ reinterpret_cast<void *>(123));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[2],
- reinterpret_cast<void *>(345));
+ reinterpret_cast<void *>(234));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[3],
- reinterpret_cast<void *>(fakeDefaultWitness1));
+ reinterpret_cast<void *>(345));
EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[4],
+ reinterpret_cast<void *>(fakeDefaultWitness1));
+ EXPECT_EQ(reinterpret_cast<void * const *>(instantiatedTable)[5],
reinterpret_cast<void *>(fakeDefaultWitness2));
return instantiatedTable;
diff --git a/utils/build-script b/utils/build-script
index 22d0091..34a3e78 100755
--- a/utils/build-script
+++ b/utils/build-script
@@ -553,6 +553,8 @@
# have a separate bot that checks for leaks.
if platform.system() == 'Linux':
os.environ['ASAN_OPTIONS'] = 'detect_leaks=0'
+ if args.enable_ubsan:
+ impl_args += ["--enable-ubsan"]
# If we have lsan, we need to export our suppression list. The actual
# passing in of the LSAN flag is done via the normal cmake method. We
diff --git a/utils/build-script-impl b/utils/build-script-impl
index 8e70b8f..e8fbeba 100755
--- a/utils/build-script-impl
+++ b/utils/build-script-impl
@@ -83,6 +83,7 @@
swiftpm-build-type "Debug" "the build variant for swiftpm"
llbuild-enable-assertions "1" "enable assertions in llbuild"
enable-asan "" "enable Address Sanitizer"
+ enable-ubsan "" "enable Undefined Behavior Sanitizer"
cmake "" "path to the cmake binary"
distcc "" "use distcc in pump mode"
distcc-pump "" "the path to distcc pump executable. This argument is required if distcc is set."
@@ -1840,6 +1841,13 @@
-enableAddressSanitizer=YES
)
fi
+ if [[ "${ENABLE_UBSAN}" ]] ; then
+ lldb_xcodebuild_options=(
+ "${lldb_xcodebuild_options[@]}"
+ ENABLE_UNDEFINED_BEHAVIOR_SANITIZER="YES"
+ -enableUndefinedBehaviorSanitizer=YES
+ )
+ fi
}
#