Merge pull request #22016 from BalestraPatrick/create-benchmark-script
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 00a8fab..556ac56 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -180,6 +180,14 @@
"Generate .swiftinterface files alongside .swiftmodule files"
TRUE)
+# Allow building Swift with Clang's Profile Guided Optimization
+if(SWIFT_PROFDATA_FILE AND EXISTS ${SWIFT_PROFDATA_FILE})
+ if(NOT CMAKE_C_COMPILER_ID MATCHES Clang)
+ message(FATAL_ERROR "SWIFT_PROFDATA_FILE can only be specified when compiling with clang")
+ endif()
+ add_definitions("-fprofile-instr-use=${SWIFT_PROFDATA_FILE}")
+endif()
+
#
# User-configurable Android specific options.
#
@@ -977,12 +985,24 @@
add_dependencies(dispatch libdispatch-install)
add_dependencies(BlocksRuntime libdispatch-install)
+ if(SWIFT_HOST_VARIANT_SDK STREQUAL WINDOWS)
+ set(SOURCEKIT_RUNTIME_DIR bin)
+ else()
+ set(SOURCEKIT_RUNTIME_DIR lib)
+ endif()
swift_install_in_component(sourcekit-inproc
FILES
$<TARGET_FILE:dispatch>
$<TARGET_FILE:BlocksRuntime>
- DESTINATION
- lib${LLVM_LIBDIR_SUFFIX})
+ DESTINATION ${SOURCEKIT_RUNTIME_DIR})
+ if(SWIFT_HOST_VARIANT_SDK STREQUAL WINDOWS)
+ swift_install_in_component(sourcekit-inproc
+ FILES
+ $<TARGET_LINKER_FILE:dispatch>
+ $<TARGET_LINKER_FILE:BlocksRuntime>
+ DESTINATION lib)
+ endif()
+
# FIXME(compnerd) this should be taken care of by the
# INTERFACE_INCLUDE_DIRECTORIES above
diff --git a/benchmark/multi-source/PrimsSplit/Prims_main.swift b/benchmark/multi-source/PrimsSplit/Prims_main.swift
index 380eca5..25d2877 100644
--- a/benchmark/multi-source/PrimsSplit/Prims_main.swift
+++ b/benchmark/multi-source/PrimsSplit/Prims_main.swift
@@ -15,11 +15,12 @@
public let PrimsSplit = BenchmarkInfo(
name: "PrimsSplit",
runFunction: run_PrimsSplit,
- tags: [.validation, .algorithm])
+ tags: [.validation, .algorithm],
+ legacyFactor: 5)
@inline(never)
public func run_PrimsSplit(_ N: Int) {
- for _ in 1...5*N {
+ for _ in 1...N {
let nodes : [Int] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
diff --git a/benchmark/single-source/ErrorHandling.swift b/benchmark/single-source/ErrorHandling.swift
index 8e3ca66..d072907 100644
--- a/benchmark/single-source/ErrorHandling.swift
+++ b/benchmark/single-source/ErrorHandling.swift
@@ -15,7 +15,8 @@
public let ErrorHandling = BenchmarkInfo(
name: "ErrorHandling",
runFunction: run_ErrorHandling,
- tags: [.validation, .exceptions])
+ tags: [.validation, .exceptions],
+ legacyFactor: 10)
enum PizzaError : Error {
case Pepperoni, Olives, Anchovy
@@ -35,7 +36,7 @@
@inline(never)
public func run_ErrorHandling(_ N: Int) {
- for _ in 1...5000*N {
+ for _ in 1...500*N {
do {
_ = try doSomething()
} catch _ {
@@ -43,4 +44,3 @@
}
}
}
-
diff --git a/benchmark/single-source/Hanoi.swift b/benchmark/single-source/Hanoi.swift
index 9f21acd..152c3d9 100644
--- a/benchmark/single-source/Hanoi.swift
+++ b/benchmark/single-source/Hanoi.swift
@@ -18,7 +18,8 @@
public let Hanoi = BenchmarkInfo(
name: "Hanoi",
runFunction: run_Hanoi,
- tags: [.validation, .algorithm])
+ tags: [.validation, .algorithm],
+ legacyFactor: 10)
struct Move {
var from: String
@@ -46,7 +47,7 @@
@inline(never)
public func run_Hanoi(_ N: Int) {
- for _ in 1...100*N {
+ for _ in 1...10*N {
let hanoi: TowersOfHanoi = TowersOfHanoi()
hanoi.solve(10, start: "A", auxiliary: "B", end: "C")
}
diff --git a/benchmark/single-source/Hash.swift b/benchmark/single-source/Hash.swift
index 366a8e0..1d576ed 100644
--- a/benchmark/single-source/Hash.swift
+++ b/benchmark/single-source/Hash.swift
@@ -18,7 +18,8 @@
public let HashTest = BenchmarkInfo(
name: "HashTest",
runFunction: run_HashTest,
- tags: [.validation, .algorithm])
+ tags: [.validation, .algorithm],
+ legacyFactor: 10)
class Hash {
/// C'tor.
@@ -581,7 +582,7 @@
"The quick brown fox jumps over the lazy dog." : "ef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c"]
let size = 50
- for _ in 1...10*N {
+ for _ in 1...N {
// Check for precomputed values.
let MD = MD5()
for (K, V) in TestMD5 {
diff --git a/benchmark/single-source/LazyFilter.swift b/benchmark/single-source/LazyFilter.swift
index d762b03..f9348b3 100644
--- a/benchmark/single-source/LazyFilter.swift
+++ b/benchmark/single-source/LazyFilter.swift
@@ -15,21 +15,29 @@
import TestsUtils
public let LazyFilter = [
- BenchmarkInfo(name: "LazilyFilteredArrays2", runFunction: run_LazilyFilteredArrays, tags: [.validation, .api, .Array],
- setUpFunction: { blackHole(filteredRange) }),
- BenchmarkInfo(name: "LazilyFilteredRange", runFunction: run_LazilyFilteredRange, tags: [.validation, .api, .Array]),
+ BenchmarkInfo(name: "LazilyFilteredArrays2",
+ runFunction: run_LazilyFilteredArrays,
+ tags: [.validation, .api, .Array],
+ setUpFunction: { blackHole(filteredRange) },
+ legacyFactor: 100),
+ BenchmarkInfo(name: "LazilyFilteredRange",
+ runFunction: run_LazilyFilteredRange,
+ tags: [.validation, .api, .Array],
+ legacyFactor: 10),
BenchmarkInfo(
name: "LazilyFilteredArrayContains",
runFunction: run_LazilyFilteredArrayContains,
tags: [.validation, .api, .Array],
- setUpFunction: setup_LazilyFilteredArrayContains,
- tearDownFunction: teardown_LazilyFilteredArrayContains),
+ setUpFunction: {
+ multiplesOfThree = Array(1..<500).lazy.filter { $0 % 3 == 0 } },
+ tearDownFunction: { multiplesOfThree = nil },
+ legacyFactor: 100),
]
@inline(never)
public func run_LazilyFilteredRange(_ N: Int) {
var res = 123
- let c = (1..<1_000_000).lazy.filter { $0 % 7 == 0 }
+ let c = (1..<100_000).lazy.filter { $0 % 7 == 0 }
for _ in 1...N {
res += Array(c).count
res -= Array(c).count
@@ -37,7 +45,7 @@
CheckResults(res == 123)
}
-let filteredRange = (1..<100_000).map({[$0]}).lazy.filter { $0.first! % 7 == 0 }
+let filteredRange = (1..<1_000).map({[$0]}).lazy.filter { $0.first! % 7 == 0 }
@inline(never)
public func run_LazilyFilteredArrays(_ N: Int) {
@@ -52,22 +60,14 @@
fileprivate var multiplesOfThree: LazyFilterCollection<Array<Int>>?
-fileprivate func setup_LazilyFilteredArrayContains() {
- multiplesOfThree = Array(1..<5_000).lazy.filter { $0 % 3 == 0 }
-}
-
-fileprivate func teardown_LazilyFilteredArrayContains() {
- multiplesOfThree = nil
-}
-
@inline(never)
fileprivate func run_LazilyFilteredArrayContains(_ N: Int) {
let xs = multiplesOfThree!
for _ in 1...N {
var filteredCount = 0
- for candidate in 1..<5_000 {
+ for candidate in 1..<500 {
filteredCount += xs.contains(candidate) ? 1 : 0
}
- CheckResults(filteredCount == 1666)
+ CheckResults(filteredCount == 166)
}
}
diff --git a/benchmark/single-source/LinkedList.swift b/benchmark/single-source/LinkedList.swift
index 2cfbfa6..343b7b7 100644
--- a/benchmark/single-source/LinkedList.swift
+++ b/benchmark/single-source/LinkedList.swift
@@ -19,8 +19,13 @@
public var LinkedList = BenchmarkInfo(
name: "LinkedList",
runFunction: run_LinkedList,
- tags: [.runtime, .cpubench, .refcount]
-)
+ tags: [.runtime, .cpubench, .refcount],
+ setUpFunction: { for i in 0..<size { head = Node(n:head, d:i) } },
+ tearDownFunction: { head = Node(n:nil, d:0) },
+ legacyFactor: 40)
+
+let size = 100
+var head = Node(n:nil, d:0)
final class Node {
var next: Node?
@@ -34,16 +39,10 @@
@inline(never)
public func run_LinkedList(_ N: Int) {
- let size = 100
- var head = Node(n:nil, d:0)
- for i in 0..<size {
- head = Node(n:head, d:i)
- }
-
var sum = 0
let ref_result = size*(size-1)/2
var ptr = head
- for _ in 1...5000*N {
+ for _ in 1...125*N {
ptr = head
sum = 0
while let nxt = ptr.next {
diff --git a/benchmark/single-source/MapReduce.swift b/benchmark/single-source/MapReduce.swift
index d232fc0..f5269d1 100644
--- a/benchmark/single-source/MapReduce.swift
+++ b/benchmark/single-source/MapReduce.swift
@@ -13,25 +13,41 @@
import TestsUtils
import Foundation
+let t: [BenchmarkCategory] = [.validation, .algorithm]
+let ts: [BenchmarkCategory] = [.validation, .algorithm, .String]
+
public let MapReduce = [
- BenchmarkInfo(name: "MapReduce", runFunction: run_MapReduce, tags: [.validation, .algorithm]),
- BenchmarkInfo(name: "MapReduceAnyCollection", runFunction: run_MapReduceAnyCollection, tags: [.validation, .algorithm]),
- BenchmarkInfo(name: "MapReduceAnyCollectionShort", runFunction: run_MapReduceAnyCollectionShort, tags: [.validation, .algorithm]),
- BenchmarkInfo(name: "MapReduceClass2", runFunction: run_MapReduceClass, tags: [.validation, .algorithm],
+ BenchmarkInfo(name: "MapReduce", runFunction: run_MapReduce, tags: t),
+ BenchmarkInfo(name: "MapReduceAnyCollection",
+ runFunction: run_MapReduceAnyCollection, tags: t),
+ BenchmarkInfo(name: "MapReduceAnyCollectionShort",
+ runFunction: run_MapReduceAnyCollectionShort, tags: t, legacyFactor: 10),
+ BenchmarkInfo(name: "MapReduceClass2",
+ runFunction: run_MapReduceClass, tags: t,
setUpFunction: { boxedNumbers(1000) }, tearDownFunction: releaseDecimals),
- BenchmarkInfo(name: "MapReduceClassShort2", runFunction: run_MapReduceClassShort, tags: [.validation, .algorithm],
+ BenchmarkInfo(name: "MapReduceClassShort2",
+ runFunction: run_MapReduceClassShort, tags: t,
setUpFunction: { boxedNumbers(10) }, tearDownFunction: releaseDecimals),
- BenchmarkInfo(name: "MapReduceNSDecimalNumber", runFunction: run_MapReduceNSDecimalNumber, tags: [.validation, .algorithm],
+ BenchmarkInfo(name: "MapReduceNSDecimalNumber",
+ runFunction: run_MapReduceNSDecimalNumber, tags: t,
setUpFunction: { decimals(1000) }, tearDownFunction: releaseDecimals),
- BenchmarkInfo(name: "MapReduceNSDecimalNumberShort", runFunction: run_MapReduceNSDecimalNumberShort, tags: [.validation, .algorithm],
+ BenchmarkInfo(name: "MapReduceNSDecimalNumberShort",
+ runFunction: run_MapReduceNSDecimalNumberShort, tags: t,
setUpFunction: { decimals(10) }, tearDownFunction: releaseDecimals),
- BenchmarkInfo(name: "MapReduceLazyCollection", runFunction: run_MapReduceLazyCollection, tags: [.validation, .algorithm]),
- BenchmarkInfo(name: "MapReduceLazyCollectionShort", runFunction: run_MapReduceLazyCollectionShort, tags: [.validation, .algorithm]),
- BenchmarkInfo(name: "MapReduceLazySequence", runFunction: run_MapReduceLazySequence, tags: [.validation, .algorithm]),
- BenchmarkInfo(name: "MapReduceSequence", runFunction: run_MapReduceSequence, tags: [.validation, .algorithm]),
- BenchmarkInfo(name: "MapReduceShort", runFunction: run_MapReduceShort, tags: [.validation, .algorithm]),
- BenchmarkInfo(name: "MapReduceShortString", runFunction: run_MapReduceShortString, tags: [.validation, .algorithm, .String]),
- BenchmarkInfo(name: "MapReduceString", runFunction: run_MapReduceString, tags: [.validation, .algorithm, .String]),
+ BenchmarkInfo(name: "MapReduceLazyCollection",
+ runFunction: run_MapReduceLazyCollection, tags: t),
+ BenchmarkInfo(name: "MapReduceLazyCollectionShort",
+ runFunction: run_MapReduceLazyCollectionShort, tags: t),
+ BenchmarkInfo(name: "MapReduceLazySequence",
+ runFunction: run_MapReduceLazySequence, tags: t),
+ BenchmarkInfo(name: "MapReduceSequence",
+ runFunction: run_MapReduceSequence, tags: t),
+ BenchmarkInfo(name: "MapReduceShort",
+ runFunction: run_MapReduceShort, tags: t, legacyFactor: 10),
+ BenchmarkInfo(name: "MapReduceShortString",
+ runFunction: run_MapReduceShortString, tags: ts),
+ BenchmarkInfo(name: "MapReduceString",
+ runFunction: run_MapReduceString, tags: ts),
]
#if _runtime(_ObjC)
@@ -83,7 +99,7 @@
let numbers = AnyCollection([Int](0..<10))
var c = 0
- for _ in 1...N*10000 {
+ for _ in 1...N*1_000 {
let mapped = numbers.map { $0 &+ 5 }
c += mapped.reduce(0, &+)
}
@@ -95,7 +111,7 @@
var numbers = [Int](0..<10)
var c = 0
- for _ in 1...N*10000 {
+ for _ in 1...N*1_000 {
numbers = numbers.map { $0 &+ 5 }
c += numbers.reduce(0, &+)
}
diff --git a/benchmark/single-source/MonteCarloE.swift b/benchmark/single-source/MonteCarloE.swift
index 20e1233..b38090a 100644
--- a/benchmark/single-source/MonteCarloE.swift
+++ b/benchmark/single-source/MonteCarloE.swift
@@ -22,10 +22,11 @@
public let MonteCarloE = BenchmarkInfo(
name: "MonteCarloE",
runFunction: run_MonteCarloE,
- tags: [.validation, .algorithm])
+ tags: [.validation, .algorithm],
+ legacyFactor: 20)
public func run_MonteCarloE(scale: Int) {
- let N = 200000*scale
+ let N = 10_000*scale
var intervals = [Bool](repeating: false, count: N)
for _ in 1...N {
let pos = Int(UInt(truncatingIfNeeded: Random())%UInt(N))
@@ -37,5 +38,5 @@
CheckResults(numEmptyIntervals != N)
let e_estimate = Double(N)/Double(numEmptyIntervals)
let e = 2.71828
- CheckResults(abs(e_estimate - e) < 0.1)
+ CheckResults(abs(e_estimate - e) < 0.2)
}
diff --git a/benchmark/single-source/MonteCarloPi.swift b/benchmark/single-source/MonteCarloPi.swift
index 74c5ba3..94a48fb 100644
--- a/benchmark/single-source/MonteCarloPi.swift
+++ b/benchmark/single-source/MonteCarloPi.swift
@@ -15,12 +15,13 @@
public let MonteCarloPi = BenchmarkInfo(
name: "MonteCarloPi",
runFunction: run_MonteCarloPi,
- tags: [.validation, .algorithm])
+ tags: [.validation, .algorithm],
+ legacyFactor: 125)
public func run_MonteCarloPi(scale: Int) {
var pointsInside = 0
let r = 10000
- let N = 500000*scale
+ let N = 4_000*scale
for _ in 1...N {
let x = Int(truncatingIfNeeded: Random())%r
let y = Int(truncatingIfNeeded: Random())%r
@@ -30,5 +31,5 @@
}
let pi_estimate: Double = Double(pointsInside)*4.0/Double(N)
let pi = 3.1415
- CheckResults(abs(pi_estimate - pi) < 0.1)
+ CheckResults(abs(pi_estimate - pi) < 0.2)
}
diff --git a/benchmark/single-source/NSDictionaryCastToSwift.swift b/benchmark/single-source/NSDictionaryCastToSwift.swift
index c9d0550..a6a6f20 100644
--- a/benchmark/single-source/NSDictionaryCastToSwift.swift
+++ b/benchmark/single-source/NSDictionaryCastToSwift.swift
@@ -21,14 +21,15 @@
public let NSDictionaryCastToSwift = BenchmarkInfo(
name: "NSDictionaryCastToSwift",
runFunction: run_NSDictionaryCastToSwift,
- tags: [.validation, .api, .Dictionary, .bridging])
+ tags: [.validation, .api, .Dictionary, .bridging],
+ legacyFactor: 10)
@inline(never)
public func run_NSDictionaryCastToSwift(_ N: Int) {
#if _runtime(_ObjC)
let NSDict = NSDictionary()
var swiftDict = [String: NSObject]()
- for _ in 1...10000*N {
+ for _ in 1...1_000*N {
swiftDict = NSDict as! [String: NSObject]
if !swiftDict.isEmpty {
break
diff --git a/benchmark/single-source/NibbleSort.swift b/benchmark/single-source/NibbleSort.swift
index 8d1cc91..bba6441 100644
--- a/benchmark/single-source/NibbleSort.swift
+++ b/benchmark/single-source/NibbleSort.swift
@@ -7,7 +7,8 @@
public var NibbleSort = BenchmarkInfo(
name: "NibbleSort",
runFunction: run_NibbleSort,
- tags: [.validation]
+ tags: [.validation],
+ legacyFactor: 10
)
@inline(never)
@@ -16,7 +17,7 @@
let v: UInt64 = 0xbadbeef
var c = NibbleCollection(v)
- for _ in 1...10000*N {
+ for _ in 1...1_000*N {
c.val = v
c.sort()
diff --git a/benchmark/single-source/NopDeinit.swift b/benchmark/single-source/NopDeinit.swift
index eb1192f..df79d63 100644
--- a/benchmark/single-source/NopDeinit.swift
+++ b/benchmark/single-source/NopDeinit.swift
@@ -16,7 +16,8 @@
public let NopDeinit = BenchmarkInfo(
name: "NopDeinit",
runFunction: run_NopDeinit,
- tags: [.regression])
+ tags: [.regression],
+ legacyFactor: 100)
class X<T : Comparable> {
let deinitIters = 10000
@@ -32,7 +33,7 @@
public func run_NopDeinit(_ N: Int) {
for _ in 1...N {
var arr :[X<Int>] = []
- let size = 500
+ let size = 5
for i in 1...size { arr.append(X(i)) }
arr.removeAll()
CheckResults(arr.count == 0)
diff --git a/benchmark/single-source/ObserverClosure.swift b/benchmark/single-source/ObserverClosure.swift
index debbf03..8bd1c5d 100644
--- a/benchmark/single-source/ObserverClosure.swift
+++ b/benchmark/single-source/ObserverClosure.swift
@@ -16,7 +16,8 @@
public let ObserverClosure = BenchmarkInfo(
name: "ObserverClosure",
runFunction: run_ObserverClosure,
- tags: [.validation])
+ tags: [.validation],
+ legacyFactor: 10)
class Observer {
@inline(never)
@@ -41,7 +42,7 @@
public func run_ObserverClosure(_ iterations: Int) {
let signal = Signal()
let observer = Observer()
- for _ in 0 ..< 10_000 * iterations {
+ for _ in 0 ..< 1_000 * iterations {
signal.subscribe { i in observer.receive(i) }
}
signal.send(1)
diff --git a/benchmark/single-source/ObserverForwarderStruct.swift b/benchmark/single-source/ObserverForwarderStruct.swift
index 54334e4..4faed86 100644
--- a/benchmark/single-source/ObserverForwarderStruct.swift
+++ b/benchmark/single-source/ObserverForwarderStruct.swift
@@ -15,7 +15,8 @@
public let ObserverForwarderStruct = BenchmarkInfo(
name: "ObserverForwarderStruct",
runFunction: run_ObserverForwarderStruct,
- tags: [.validation])
+ tags: [.validation],
+ legacyFactor: 5)
class Observer {
@inline(never)
@@ -52,7 +53,7 @@
public func run_ObserverForwarderStruct(_ iterations: Int) {
let signal = Signal()
let observer = Observer()
- for _ in 0 ..< 10_000 * iterations {
+ for _ in 0 ..< 2_000 * iterations {
signal.subscribe(Forwarder(object: observer))
}
signal.send(1)
diff --git a/benchmark/single-source/ObserverPartiallyAppliedMethod.swift b/benchmark/single-source/ObserverPartiallyAppliedMethod.swift
index 6aea587..78ae7fe 100644
--- a/benchmark/single-source/ObserverPartiallyAppliedMethod.swift
+++ b/benchmark/single-source/ObserverPartiallyAppliedMethod.swift
@@ -15,7 +15,8 @@
public let ObserverPartiallyAppliedMethod = BenchmarkInfo(
name: "ObserverPartiallyAppliedMethod",
runFunction: run_ObserverPartiallyAppliedMethod,
- tags: [.validation])
+ tags: [.validation],
+ legacyFactor: 20)
class Observer {
@inline(never)
@@ -40,7 +41,7 @@
public func run_ObserverPartiallyAppliedMethod(_ iterations: Int) {
let signal = Signal()
let observer = Observer()
- for _ in 0 ..< 10_000 * iterations {
+ for _ in 0 ..< 500 * iterations {
signal.subscribe(observer.receive)
}
signal.send(1)
diff --git a/benchmark/single-source/ObserverUnappliedMethod.swift b/benchmark/single-source/ObserverUnappliedMethod.swift
index 52a4b3e..0f39bfd 100644
--- a/benchmark/single-source/ObserverUnappliedMethod.swift
+++ b/benchmark/single-source/ObserverUnappliedMethod.swift
@@ -15,7 +15,8 @@
public let ObserverUnappliedMethod = BenchmarkInfo(
name: "ObserverUnappliedMethod",
runFunction: run_ObserverUnappliedMethod,
- tags: [.validation])
+ tags: [.validation],
+ legacyFactor: 10)
class Observer {
@inline(never)
@@ -53,7 +54,7 @@
public func run_ObserverUnappliedMethod(_ iterations: Int) {
let signal = Signal()
let observer = Observer()
- for _ in 0 ..< 10_000 * iterations {
+ for _ in 0 ..< 1_000 * iterations {
let forwarder = Forwarder(object: observer, method: Observer.receive)
signal.subscribe(forwarder)
}
diff --git a/benchmark/single-source/OpaqueConsumingUsers.swift b/benchmark/single-source/OpaqueConsumingUsers.swift
index d92d921..fbd505f 100644
--- a/benchmark/single-source/OpaqueConsumingUsers.swift
+++ b/benchmark/single-source/OpaqueConsumingUsers.swift
@@ -16,7 +16,8 @@
name: "OpaqueConsumingUsers",
runFunction: run_OpaqueConsumingUsers,
tags: [.regression, .abstraction, .refcount],
- setUpFunction: setup_OpaqueConsumingUsers)
+ setUpFunction: setup_OpaqueConsumingUsers,
+ legacyFactor: 20)
// This test exercises the ability of the optimizer to propagate the +1 from a
// consuming argument of a non-inlineable through multiple non-inlinable call
@@ -82,7 +83,7 @@
public func run_OpaqueConsumingUsers(_ N: Int) {
let d = data.unsafelyUnwrapped
let u = user.unsafelyUnwrapped
- for _ in 0..<N*200000 {
+ for _ in 0..<N*10_000 {
callFrame4(d, u)
}
}
diff --git a/benchmark/single-source/Phonebook.swift b/benchmark/single-source/Phonebook.swift
index 5c76ba9..8618fbd 100644
--- a/benchmark/single-source/Phonebook.swift
+++ b/benchmark/single-source/Phonebook.swift
@@ -18,7 +18,8 @@
name: "Phonebook",
runFunction: run_Phonebook,
tags: [.validation, .api, .String],
- setUpFunction: { blackHole(names) }
+ setUpFunction: { blackHole(names) },
+ legacyFactor: 7
)
let words = [
@@ -27,14 +28,6 @@
"Paul", "Mark", "George", "Steven", "Kenneth", "Andrew", "Edward", "Brian",
"Joshua", "Kevin", "Ronald", "Timothy", "Jason", "Jeffrey", "Gary", "Ryan",
"Nicholas", "Eric", "Stephen", "Jacob", "Larry", "Frank", "Jonathan", "Scott",
- "Justin", "Raymond", "Brandon", "Gregory", "Samuel", "Patrick", "Benjamin",
- "Jack", "Dennis", "Jerry", "Alexander", "Tyler", "Douglas", "Henry", "Peter",
- "Walter", "Aaron", "Jose", "Adam", "Harold", "Zachary", "Nathan", "Carl",
- "Kyle", "Arthur", "Gerald", "Lawrence", "Roger", "Albert", "Keith", "Jeremy",
- "Terry", "Joe", "Sean", "Willie", "Jesse", "Ralph", "Billy", "Austin", "Bruce",
- "Christian", "Roy", "Bryan", "Eugene", "Louis", "Harry", "Wayne", "Ethan",
- "Jordan", "Russell", "Alan", "Philip", "Randy", "Juan", "Howard", "Vincent",
- "Bobby", "Dylan", "Johnny", "Phillip", "Craig"
]
let names: [Record] = {
// The list of names in the phonebook.
diff --git a/benchmark/single-source/PointerArithmetics.swift b/benchmark/single-source/PointerArithmetics.swift
index 37a33ba..8ca1a4d 100644
--- a/benchmark/single-source/PointerArithmetics.swift
+++ b/benchmark/single-source/PointerArithmetics.swift
@@ -13,7 +13,10 @@
import TestsUtils
public let PointerArithmetics = [
- BenchmarkInfo(name: "PointerArithmetics", runFunction: run_PointerArithmetics, tags: [.validation, .api]),
+ BenchmarkInfo(name: "PointerArithmetics",
+ runFunction: run_PointerArithmetics,
+ tags: [.validation, .api],
+ legacyFactor: 100),
]
@inline(never)
@@ -23,7 +26,7 @@
var c = 0
withUnsafePointer(to: &numbers) {
$0.withMemoryRebound(to: Int.self, capacity: 10) { ptr in
- for _ in 1...N*10_000_000 {
+ for _ in 1...N*100_000 {
c += (ptr + getInt(10) - getInt(5)).pointee
}
}
diff --git a/benchmark/single-source/PopFront.swift b/benchmark/single-source/PopFront.swift
index bb76da0..e9e2b4a 100644
--- a/benchmark/single-source/PopFront.swift
+++ b/benchmark/single-source/PopFront.swift
@@ -13,19 +13,23 @@
import TestsUtils
public let PopFront = [
- BenchmarkInfo(name: "PopFrontArray", runFunction: run_PopFrontArray, tags: [.validation, .api, .Array]),
- BenchmarkInfo(name: "PopFrontUnsafePointer", runFunction: run_PopFrontUnsafePointer, tags: [.validation, .api]),
+ BenchmarkInfo(name: "PopFrontArray",
+ runFunction: run_PopFrontArray,
+ tags: [.validation, .api, .Array],
+ legacyFactor: 20),
+ BenchmarkInfo(name: "PopFrontUnsafePointer",
+ runFunction: run_PopFrontUnsafePointer,
+ tags: [.validation, .api],
+ legacyFactor: 100),
]
-let reps = 1
let arrayCount = 1024
@inline(never)
public func run_PopFrontArray(_ N: Int) {
let orig = Array(repeating: 1, count: arrayCount)
var a = [Int]()
- for _ in 1...20*N {
- for _ in 1...reps {
+ for _ in 1...N {
var result = 0
a.append(contentsOf: orig)
while a.count != 0 {
@@ -33,7 +37,6 @@
a.remove(at: 0)
}
CheckResults(result == arrayCount)
- }
}
}
@@ -41,8 +44,7 @@
public func run_PopFrontUnsafePointer(_ N: Int) {
var orig = Array(repeating: 1, count: arrayCount)
let a = UnsafeMutablePointer<Int>.allocate(capacity: arrayCount)
- for _ in 1...100*N {
- for _ in 1...reps {
+ for _ in 1...N {
for i in 0..<arrayCount {
a[i] = orig[i]
}
@@ -54,8 +56,6 @@
count -= 1
}
CheckResults(result == arrayCount)
- }
}
a.deallocate()
}
-
diff --git a/benchmark/single-source/PopFrontGeneric.swift b/benchmark/single-source/PopFrontGeneric.swift
index d05e6ec..f6d8521 100644
--- a/benchmark/single-source/PopFrontGeneric.swift
+++ b/benchmark/single-source/PopFrontGeneric.swift
@@ -15,9 +15,9 @@
public let PopFrontArrayGeneric = BenchmarkInfo(
name: "PopFrontArrayGeneric",
runFunction: run_PopFrontArrayGeneric,
- tags: [.validation, .api, .Array])
+ tags: [.validation, .api, .Array],
+ legacyFactor: 20)
-let reps = 1
let arrayCount = 1024
// This test case exposes rdar://17440222 which caused rdar://17974483 (popFront
@@ -50,8 +50,7 @@
public func run_PopFrontArrayGeneric(_ N: Int) {
let orig = Array(repeating: 1, count: arrayCount)
var a = [Int]()
- for _ in 1...20*N {
- for _ in 1...reps {
+ for _ in 1...N {
var result = 0
a.append(contentsOf: orig)
while a.count != 0 {
@@ -59,6 +58,5 @@
myArrayReplace(&a, 0..<1, EmptyCollection())
}
CheckResults(result == arrayCount)
- }
}
}
diff --git a/benchmark/single-source/Prims.swift b/benchmark/single-source/Prims.swift
index 0404286..3906a89 100644
--- a/benchmark/single-source/Prims.swift
+++ b/benchmark/single-source/Prims.swift
@@ -25,7 +25,8 @@
public let Prims = BenchmarkInfo(
name: "Prims",
runFunction: run_Prims,
- tags: [.validation, .algorithm])
+ tags: [.validation, .algorithm],
+ legacyFactor: 5)
class PriorityQueue {
final var heap: Array<EdgeCost>
@@ -219,7 +220,7 @@
@inline(never)
public func run_Prims(_ N: Int) {
- for _ in 1...5*N {
+ for _ in 1...N {
let nodes : [Int] = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
diff --git a/benchmark/single-source/Queue.swift b/benchmark/single-source/Queue.swift
index 46e5cf4..7b31023 100644
--- a/benchmark/single-source/Queue.swift
+++ b/benchmark/single-source/Queue.swift
@@ -17,15 +17,15 @@
runFunction: run_QueueGeneric,
tags: [.validation, .api],
setUpFunction: { buildWorkload() },
- tearDownFunction: nil)
+ legacyFactor: 10)
public let QueueConcrete = BenchmarkInfo(
name: "QueueConcrete",
runFunction: run_QueueConcrete,
tags: [.validation, .api],
setUpFunction: { buildWorkload() },
- tearDownFunction: nil)
-
+ legacyFactor: 10)
+
// TODO: remove when there is a native equivalent in the std lib
extension RangeReplaceableCollection where Self: BidirectionalCollection {
public mutating func popLast() -> Element? {
@@ -40,14 +40,14 @@
internal var _in: Storage
internal var _out: Storage
-
+
public init() {
_in = Storage()
_out = Storage()
}
}
-extension Queue {
+extension Queue {
public mutating func enqueue(_ newElement: Element) {
_in.append(newElement)
}
@@ -72,7 +72,7 @@
CheckResults(j == elements.count*2)
}
-let n = 10_000
+let n = 1_000
let workload = (0..<n).map { "\($0): A long enough string to defeat the SSO, or so I hope." }
public func buildWorkload() {
@@ -90,14 +90,14 @@
public struct ConcreteQueue {
internal var _in: [String]
internal var _out: [String]
-
+
public init() {
_in = Array()
_out = Array()
}
}
-extension ConcreteQueue {
+extension ConcreteQueue {
public mutating func enqueue(_ newElement: String) {
_in.append(newElement)
}
@@ -128,4 +128,3 @@
testConcreteQueue(elements: workload)
}
}
-
diff --git a/benchmark/single-source/RGBHistogram.swift b/benchmark/single-source/RGBHistogram.swift
index ea16523..01a50d1 100644
--- a/benchmark/single-source/RGBHistogram.swift
+++ b/benchmark/single-source/RGBHistogram.swift
@@ -19,14 +19,20 @@
import TestsUtils
public let RGBHistogram = [
- BenchmarkInfo(name: "RGBHistogram", runFunction: run_RGBHistogram, tags: [.validation, .algorithm]),
- BenchmarkInfo(name: "RGBHistogramOfObjects", runFunction: run_RGBHistogramOfObjects, tags: [.validation, .algorithm]),
+ BenchmarkInfo(name: "RGBHistogram",
+ runFunction: run_RGBHistogram,
+ tags: [.validation, .algorithm],
+ legacyFactor: 10),
+ BenchmarkInfo(name: "RGBHistogramOfObjects",
+ runFunction: run_RGBHistogramOfObjects,
+ tags: [.validation, .algorithm],
+ legacyFactor: 100),
]
@inline(never)
public func run_RGBHistogram(_ N: Int) {
var histogram = [(key: rrggbb_t, value: Int)]()
- for _ in 1...100*N {
+ for _ in 1...10*N {
histogram = createSortedSparseRGBHistogram(samples)
if !isCorrectHistogram(histogram) {
break
@@ -164,7 +170,7 @@
@inline(never)
public func run_RGBHistogramOfObjects(_ N: Int) {
var histogram = [(key: Box<rrggbb_t>, value: Box<Int>)]()
- for _ in 1...100*N {
+ for _ in 1...N {
histogram = createSortedSparseRGBHistogramOfObjects(samples)
if !isCorrectHistogramOfObjects(histogram) {
break
@@ -172,5 +178,3 @@
}
CheckResults(isCorrectHistogramOfObjects(histogram))
}
-
-
diff --git a/benchmark/single-source/RandomShuffle.swift b/benchmark/single-source/RandomShuffle.swift
index 9d26ee6..975f553 100644
--- a/benchmark/single-source/RandomShuffle.swift
+++ b/benchmark/single-source/RandomShuffle.swift
@@ -19,11 +19,9 @@
public let RandomShuffle = [
BenchmarkInfo(name: "RandomShuffleDef2", runFunction: run_RandomShuffleDef,
- tags: [.api],
- setUpFunction: { blackHole(numbersDef) }),
+ tags: [.api], setUpFunction: { blackHole(numbersDef) }, legacyFactor: 4),
BenchmarkInfo(name: "RandomShuffleLCG2", runFunction: run_RandomShuffleLCG,
- tags: [.api],
- setUpFunction: { blackHole(numbersLCG) }),
+ tags: [.api], setUpFunction: { blackHole(numbersLCG) }, legacyFactor: 16),
]
/// A linear congruential PRNG.
@@ -41,8 +39,8 @@
}
}
-var numbersDef: [Int] = Array(0...10_000)
-var numbersLCG: [Int] = Array(0...100_000)
+var numbersDef: [Int] = Array(0...2_500)
+var numbersLCG: [Int] = Array(0...6_250)
@inline(never)
public func run_RandomShuffleDef(_ N: Int) {
diff --git a/benchmark/single-source/RandomValues.swift b/benchmark/single-source/RandomValues.swift
index f8fe741..34eaf83 100644
--- a/benchmark/single-source/RandomValues.swift
+++ b/benchmark/single-source/RandomValues.swift
@@ -19,21 +19,25 @@
//
public let RandomValues = [
- BenchmarkInfo(name: "RandomIntegersDef", runFunction: run_RandomIntegersDef, tags: [.api]),
- BenchmarkInfo(name: "RandomIntegersLCG", runFunction: run_RandomIntegersLCG, tags: [.api]),
- BenchmarkInfo(name: "RandomDoubleDef", runFunction: run_RandomDoubleDef, tags: [.api]),
- BenchmarkInfo(name: "RandomDoubleLCG", runFunction: run_RandomDoubleLCG, tags: [.api]),
+ BenchmarkInfo(name: "RandomIntegersDef", runFunction: run_RandomIntegersDef,
+ tags: [.api], legacyFactor: 100),
+ BenchmarkInfo(name: "RandomIntegersLCG", runFunction: run_RandomIntegersLCG,
+ tags: [.api]),
+ BenchmarkInfo(name: "RandomDoubleDef", runFunction: run_RandomDoubleDef,
+ tags: [.api], legacyFactor: 100),
+ BenchmarkInfo(name: "RandomDoubleLCG", runFunction: run_RandomDoubleLCG,
+ tags: [.api], legacyFactor: 2),
]
/// A linear congruential PRNG.
struct LCRNG: RandomNumberGenerator {
private var state: UInt64
-
+
init(seed: Int) {
state = UInt64(truncatingIfNeeded: seed)
for _ in 0..<10 { _ = next() }
}
-
+
mutating func next() -> UInt64 {
state = 2862933555777941757 &* state &+ 3037000493
return state
@@ -44,7 +48,7 @@
public func run_RandomIntegersDef(_ N: Int) {
for _ in 0 ..< N {
var x = 0
- for _ in 0 ..< 100_000 {
+ for _ in 0 ..< 1_000 {
x &+= Int.random(in: 0...10_000)
}
blackHole(x)
@@ -67,7 +71,7 @@
public func run_RandomDoubleDef(_ N: Int) {
for _ in 0 ..< N {
var x = 0.0
- for _ in 0 ..< 100_000 {
+ for _ in 0 ..< 1_000 {
x += Double.random(in: -1000...1000)
}
blackHole(x)
@@ -79,7 +83,7 @@
for _ in 0 ..< N {
var x = 0.0
var generator = LCRNG(seed: 0)
- for _ in 0 ..< 100_000 {
+ for _ in 0 ..< 50_000 {
x += Double.random(in: -1000...1000, using: &generator)
}
blackHole(x)
diff --git a/benchmark/single-source/RangeReplaceableCollectionPlusDefault.swift b/benchmark/single-source/RangeReplaceableCollectionPlusDefault.swift
index 7169bb1..bc4992a 100644
--- a/benchmark/single-source/RangeReplaceableCollectionPlusDefault.swift
+++ b/benchmark/single-source/RangeReplaceableCollectionPlusDefault.swift
@@ -7,7 +7,8 @@
public var RangeReplaceableCollectionPlusDefault = BenchmarkInfo(
name: "RangeReplaceableCollectionPlusDefault",
runFunction: run_RangeReplaceableCollectionPlusDefault,
- tags: [.validation]
+ tags: [.validation],
+ legacyFactor: 4
)
@inline(never)
@@ -18,7 +19,7 @@
var a = [Int]()
var b = [Int]()
- for _ in 1...1000*N {
+ for _ in 1...250*N {
let a2: Array = mapSome(strings, toInt)
let b2 = mapSome(strings, toInt)
a = a2
diff --git a/benchmark/single-source/ReduceInto.swift b/benchmark/single-source/ReduceInto.swift
index b29b48c..8ee2e9d 100644
--- a/benchmark/single-source/ReduceInto.swift
+++ b/benchmark/single-source/ReduceInto.swift
@@ -14,7 +14,7 @@
import Foundation
public let ReduceInto = [
- BenchmarkInfo(name: "FilterEvenUsingReduce", runFunction: run_FilterEvenUsingReduce, tags: [.validation, .api]),
+ BenchmarkInfo(name: "FilterEvenUsingReduce", runFunction: run_FilterEvenUsingReduce, tags: [.validation, .api], legacyFactor: 10),
BenchmarkInfo(name: "FilterEvenUsingReduceInto", runFunction: run_FilterEvenUsingReduceInto, tags: [.validation, .api]),
BenchmarkInfo(name: "FrequenciesUsingReduce", runFunction: run_FrequenciesUsingReduce, tags: [.validation, .api]),
BenchmarkInfo(name: "FrequenciesUsingReduceInto", runFunction: run_FrequenciesUsingReduceInto, tags: [.validation, .api]),
@@ -27,7 +27,7 @@
@inline(never)
public func run_SumUsingReduce(_ N: Int) {
let numbers = [Int](0..<1000)
-
+
var c = 0
for _ in 1...N*1000 {
c = c &+ numbers.reduce(0) { (acc: Int, num: Int) -> Int in
@@ -40,7 +40,7 @@
@inline(never)
public func run_SumUsingReduceInto(_ N: Int) {
let numbers = [Int](0..<1000)
-
+
var c = 0
for _ in 1...N*1000 {
c = c &+ numbers.reduce(into: 0) { (acc: inout Int, num: Int) in
@@ -55,9 +55,9 @@
@inline(never)
public func run_FilterEvenUsingReduce(_ N: Int) {
let numbers = [Int](0..<100)
-
+
var c = 0
- for _ in 1...N*100 {
+ for _ in 1...N*10 {
let a = numbers.reduce([]) { (acc: [Int], num: Int) -> [Int] in
var a = acc
if num % 2 == 0 {
@@ -73,7 +73,7 @@
@inline(never)
public func run_FilterEvenUsingReduceInto(_ N: Int) {
let numbers = [Int](0..<100)
-
+
var c = 0
for _ in 1...N*100 {
let a = numbers.reduce(into: []) { (acc: inout [Int], num: Int) in
@@ -91,7 +91,7 @@
@inline(never)
public func run_FrequenciesUsingReduce(_ N: Int) {
let s = "thequickbrownfoxjumpsoverthelazydogusingasmanycharacteraspossible123456789"
-
+
var c = 0
for _ in 1...N*100 {
let a = s.reduce([:]) {
@@ -108,7 +108,7 @@
@inline(never)
public func run_FrequenciesUsingReduceInto(_ N: Int) {
let s = "thequickbrownfoxjumpsoverthelazydogusingasmanycharacteraspossible123456789"
-
+
var c = 0
for _ in 1...N*100 {
let a = s.reduce(into: [:]) {
diff --git a/cmake/modules/AddSwift.cmake b/cmake/modules/AddSwift.cmake
index d1eac0d..9663137 100644
--- a/cmake/modules/AddSwift.cmake
+++ b/cmake/modules/AddSwift.cmake
@@ -1849,12 +1849,6 @@
set(swiftlib_swift_compile_private_frameworks_flag "-Fsystem" "${SWIFT_SDK_${sdk}_ARCH_${arch}_PATH}/System/Library/PrivateFrameworks/")
endif()
- if("${sdk}" STREQUAL WINDOWS)
- if(arch STREQUAL x86_64)
- set(swiftlib_swift_compile_flags_arch -Xcc -D_AMD64_)
- endif()
- endif()
-
# Add this library variant.
_add_swift_library_single(
${VARIANT_NAME}
@@ -1974,6 +1968,12 @@
WORLD_READ)
endif()
+ set(optional_arg)
+ if(sdk IN_LIST SWIFT_APPLE_PLATFORMS)
+ # Allow installation of stdlib without building all variants on Darwin.
+ set(optional_arg "OPTIONAL")
+ endif()
+
if(sdk STREQUAL WINDOWS AND CMAKE_SYSTEM_NAME STREQUAL Windows)
swift_install_in_component("${SWIFTLIB_INSTALL_IN_COMPONENT}"
TARGETS ${name}-windows-${SWIFT_PRIMARY_VARIANT_ARCH}
@@ -1985,7 +1985,8 @@
swift_install_in_component("${SWIFTLIB_INSTALL_IN_COMPONENT}"
FILES "${UNIVERSAL_LIBRARY_NAME}"
DESTINATION "lib${LLVM_LIBDIR_SUFFIX}/${resource_dir}/${resource_dir_sdk_subdir}"
- PERMISSIONS ${file_permissions})
+ PERMISSIONS ${file_permissions}
+ "${optional_arg}")
endif()
if(sdk STREQUAL WINDOWS)
foreach(arch ${SWIFT_SDK_WINDOWS_ARCHITECTURES})
@@ -2032,7 +2033,8 @@
PERMISSIONS
OWNER_READ OWNER_WRITE
GROUP_READ
- WORLD_READ)
+ WORLD_READ
+ "${optional_arg}")
endif()
# Add Swift standard library targets as dependencies to the top-level
@@ -2201,83 +2203,6 @@
PROPERTIES FOLDER "Swift executables")
endfunction()
-# Add an executable for the host machine.
-#
-# Usage:
-# add_swift_executable(name
-# [DEPENDS dep1 ...]
-# [LLVM_COMPONENT_DEPENDS comp1 ...]
-# [FILE_DEPENDS target1 ...]
-# [LINK_LIBRARIES target1 ...]
-# [EXCLUDE_FROM_ALL]
-# [DONT_STRIP_NON_MAIN_SYMBOLS]
-# [DISABLE_ASLR]
-# source1 [source2 source3 ...])
-#
-# name
-# Name of the executable (e.g., swift).
-#
-# LIBRARIES
-# Libraries this executable depends on, without variant suffixes.
-#
-# LLVM_COMPONENT_DEPENDS
-# LLVM components this executable depends on.
-#
-# FILE_DEPENDS
-# Additional files this executable depends on.
-#
-# LINK_LIBRARIES
-# Libraries to link with.
-#
-# EXCLUDE_FROM_ALL
-# Whether to exclude this executable from the ALL_BUILD target.
-#
-# DONT_STRIP_NON_MAIN_SYMBOLS
-# Should we not strip non main symbols.
-#
-# DISABLE_ASLR
-# Should we compile with -Wl,-no_pie so that ASLR is disabled?
-#
-# source1 ...
-# Sources to add into this executable.
-#
-# Note:
-# Host executables are not given a variant suffix. To build an executable for
-# each SDK and ARCH variant, use add_swift_target_executable.
-function(add_swift_executable name)
- # Parse the arguments we were given.
- cmake_parse_arguments(SWIFTEXE
- "EXCLUDE_FROM_ALL;DONT_STRIP_NON_MAIN_SYMBOLS;DISABLE_ASLR"
- ""
- "DEPENDS;LLVM_COMPONENT_DEPENDS;LINK_LIBRARIES;COMPILE_FLAGS"
- ${ARGN})
-
- translate_flag(${SWIFTEXE_EXCLUDE_FROM_ALL}
- "EXCLUDE_FROM_ALL"
- SWIFTEXE_EXCLUDE_FROM_ALL_FLAG)
- translate_flag(${SWIFTEXE_DONT_STRIP_NON_MAIN_SYMBOLS}
- "DONT_STRIP_NON_MAIN_SYMBOLS"
- SWIFTEXE_DONT_STRIP_NON_MAIN_SYMBOLS_FLAG)
- translate_flag(${SWIFTEXE_DISABLE_ASLR}
- "DISABLE_ASLR"
- SWIFTEXE_DISABLE_ASLR_FLAG)
-
- set(SWIFTEXE_SOURCES ${SWIFTEXE_UNPARSED_ARGUMENTS})
-
- _add_swift_executable_single(
- ${name}
- ${SWIFTEXE_SOURCES}
- DEPENDS ${SWIFTEXE_DEPENDS}
- LLVM_COMPONENT_DEPENDS ${SWIFTEXE_LLVM_COMPONENT_DEPENDS}
- LINK_LIBRARIES ${SWIFTEXE_LINK_LIBRARIES}
- SDK ${SWIFT_HOST_VARIANT_SDK}
- ARCHITECTURE ${SWIFT_HOST_VARIANT_ARCH}
- COMPILE_FLAGS ${SWIFTEXE_COMPILE_FLAGS}
- ${SWIFTEXE_EXCLUDE_FROM_ALL_FLAG}
- ${SWIFTEXE_DONT_STRIP_NON_MAIN_SYMBOLS_FLAG}
- ${SWIFTEXE_DISABLE_ASLR_FLAG})
-endfunction()
-
macro(add_swift_tool_subdirectory name)
add_llvm_subdirectory(SWIFT TOOL ${name})
endmacro()
@@ -2301,7 +2226,9 @@
MESSAGE "Swift Component is required to add a host tool")
# Create the executable rule.
- add_swift_executable(${executable}
+ _add_swift_executable_single(${executable}
+ SDK ${SWIFT_HOST_VARIANT_SDK}
+ ARCHITECTURE ${SWIFT_HOST_VARIANT_ARCH}
${ASHT_UNPARSED_ARGUMENTS})
swift_install_in_component(${ASHT_SWIFT_COMPONENT}
diff --git a/cmake/modules/SwiftComponents.cmake b/cmake/modules/SwiftComponents.cmake
index 725c685..ada4a1a 100644
--- a/cmake/modules/SwiftComponents.cmake
+++ b/cmake/modules/SwiftComponents.cmake
@@ -67,25 +67,10 @@
set(_SWIFT_DEFINED_COMPONENTS
"autolink-driver;compiler;clang-builtin-headers;clang-resource-dir-symlink;clang-builtin-headers-in-clang-resource-dir;stdlib;stdlib-experimental;sdk-overlay;parser-lib;editor-integration;tools;testsuite-tools;toolchain-dev-tools;dev;license;sourcekit-xpc-service;sourcekit-inproc;swift-remote-mirror;swift-remote-mirror-headers")
-# The default install components include all of the defined components, except
-# for the following exceptions.
-set(_SWIFT_DEFAULT_COMPONENTS "${_SWIFT_DEFINED_COMPONENTS}")
-# 'dev' takes up a lot of disk space and isn't part of a normal toolchain.
-list(REMOVE_ITEM _SWIFT_DEFAULT_COMPONENTS "dev")
-# These clang header options conflict with 'clang-builtin-headers'.
-list(REMOVE_ITEM _SWIFT_DEFAULT_COMPONENTS "clang-resource-dir-symlink")
-list(REMOVE_ITEM _SWIFT_DEFAULT_COMPONENTS "clang-builtin-headers-in-clang-resource-dir")
-# The sourcekit install variants are currently mutually exclusive.
-if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
- list(REMOVE_ITEM _SWIFT_DEFAULT_COMPONENTS "sourcekit-inproc")
-else()
- list(REMOVE_ITEM _SWIFT_DEFAULT_COMPONENTS "sourcekit-xpc-service")
-endif()
-
macro(swift_configure_components)
# Set the SWIFT_INSTALL_COMPONENTS variable to the default value if it is not passed in via -D
- set(SWIFT_INSTALL_COMPONENTS "${_SWIFT_DEFAULT_COMPONENTS}" CACHE STRING
- "A semicolon-separated list of components to install from the set ${_SWIFT_DEFINED_COMPONENTS}")
+ set(SWIFT_INSTALL_COMPONENTS "${_SWIFT_DEFINED_COMPONENTS}" CACHE STRING
+ "A semicolon-separated list of components to install ${_SWIFT_DEFINED_COMPONENTS}")
foreach(component ${_SWIFT_DEFINED_COMPONENTS})
string(TOUPPER "${component}" var_name_piece)
diff --git a/cmake/modules/SwiftConfigureSDK.cmake b/cmake/modules/SwiftConfigureSDK.cmake
index f8a59d6..f5d7745 100644
--- a/cmake/modules/SwiftConfigureSDK.cmake
+++ b/cmake/modules/SwiftConfigureSDK.cmake
@@ -212,7 +212,7 @@
message(FATAL_ERROR "unknown arch for ${prefix}: ${arch}")
endif()
elseif("${prefix}" STREQUAL "FREEBSD")
- if(arch STREQUAL x86_64)
+ if(NOT arch STREQUAL x86_64)
message(FATAL_ERROR "unsupported arch for FreeBSD: ${arch}")
endif()
diff --git a/cmake/modules/SwiftSource.cmake b/cmake/modules/SwiftSource.cmake
index 3c6c5ee..d9be27c 100644
--- a/cmake/modules/SwiftSource.cmake
+++ b/cmake/modules/SwiftSource.cmake
@@ -347,9 +347,16 @@
list(APPEND module_outputs "${interface_file}")
endif()
+ set(optional_arg)
+ if(sdk IN_LIST SWIFT_APPLE_PLATFORMS)
+ # Allow installation of stdlib without building all variants on Darwin.
+ set(optional_arg "OPTIONAL")
+ endif()
+
swift_install_in_component("${SWIFTFILE_INSTALL_IN_COMPONENT}"
FILES ${module_outputs}
- DESTINATION "lib${LLVM_LIBDIR_SUFFIX}/swift/${library_subdir}")
+ DESTINATION "lib${LLVM_LIBDIR_SUFFIX}/swift/${library_subdir}"
+ "${optional_arg}")
set(line_directive_tool "${SWIFT_SOURCE_DIR}/utils/line-directive")
set(swift_compiler_tool "${SWIFT_NATIVE_SWIFT_TOOLS_PATH}/swiftc")
diff --git a/docs/WindowsBuild.md b/docs/WindowsBuild.md
index ecdd101..8cf5196 100644
--- a/docs/WindowsBuild.md
+++ b/docs/WindowsBuild.md
@@ -177,7 +177,7 @@
a file.
```cmd
-cmake -G "Visual Studio 2017" "S:\swift" -DCMAKE_GENERATOR_PLATFORM="x64"^ ...
+cmake -G "Visual Studio 2017" -DCMAKE_GENERATOR_PLATFORM="x64"^ ...
```
### 8. Build lldb
@@ -212,6 +212,7 @@
5. sed
```cmd
+set PATH=S:\build\Ninja-DebugAssert\swift-windows-amd64\bin;S:\build\Ninja-DebugAssert\swift-windows-amd64\libdispatch-prefix\bin;%PATH%;C:\Program Files (x86)\GnuWin32\bin
ninja -C "S:/build/Ninja-DebugAssert/swift-windows-amd64" check-swift
```
diff --git a/include/swift/AST/ASTContext.h b/include/swift/AST/ASTContext.h
index 1f154af..c96b226 100644
--- a/include/swift/AST/ASTContext.h
+++ b/include/swift/AST/ASTContext.h
@@ -942,6 +942,9 @@
return !LangOpts.EnableAccessControl;
}
+ /// Each kind and SourceFile has its own cache for a Type.
+ Type &getDefaultTypeRequestCache(SourceFile *, KnownProtocolKind);
+
private:
friend Decl;
Optional<RawComment> getRawComment(const Decl *D);
diff --git a/include/swift/AST/Decl.h b/include/swift/AST/Decl.h
index 52795bb..cf64424 100644
--- a/include/swift/AST/Decl.h
+++ b/include/swift/AST/Decl.h
@@ -2416,7 +2416,7 @@
bool isUsableFromInline() const;
/// Returns \c true if this declaration is *not* intended to be used directly
- /// by application developers despite of the visibility.
+ /// by application developers despite the visibility.
bool shouldHideFromEditor() const;
bool hasAccess() const {
diff --git a/include/swift/AST/DeclContext.h b/include/swift/AST/DeclContext.h
index ddfbbb9..41445ce 100644
--- a/include/swift/AST/DeclContext.h
+++ b/include/swift/AST/DeclContext.h
@@ -24,11 +24,14 @@
#include "swift/AST/ResilienceExpansion.h"
#include "swift/AST/TypeAlignments.h"
#include "swift/Basic/LLVM.h"
-#include "swift/Basic/SourceLoc.h"
#include "swift/Basic/STLExtras.h"
+#include "swift/Basic/SourceLoc.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <type_traits>
namespace llvm {
class raw_ostream;
@@ -586,7 +589,8 @@
bool walkContext(ASTWalker &Walker);
void dumpContext() const;
- unsigned printContext(llvm::raw_ostream &OS, unsigned indent = 0) const;
+ unsigned printContext(llvm::raw_ostream &OS, unsigned indent = 0,
+ bool onlyAPartialLine = false) const;
// Only allow allocation of DeclContext using the allocator in ASTContext.
void *operator new(size_t Bytes, ASTContext &C,
@@ -776,7 +780,18 @@
/// member is an invisible addition.
void addMemberSilently(Decl *member, Decl *hint = nullptr) const;
};
-
+
+/// Define simple_display for DeclContexts but not for subclasses in order to
+/// avoid ambiguities with Decl* arguments.
+template <typename ParamT, typename = typename std::enable_if<
+ std::is_same<ParamT, DeclContext>::value>::type>
+void simple_display(llvm::raw_ostream &out, const ParamT *dc) {
+ if (dc)
+ dc->printContext(out, 0, true);
+ else
+ out << "(null)";
+}
+
} // end namespace swift
namespace llvm {
diff --git a/include/swift/AST/KnownProtocols.def b/include/swift/AST/KnownProtocols.def
index 6af6744..ef44c1f 100644
--- a/include/swift/AST/KnownProtocols.def
+++ b/include/swift/AST/KnownProtocols.def
@@ -24,9 +24,11 @@
#define PROTOCOL_WITH_NAME(Id, Name)
#endif
-/// \def EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name)
+/// \def EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name, typeName, performLocalLookup)
+/// \param typeName supplies the name used for type lookup,
+/// \param performLocalLookup specifies whether to first look in the local context.
#ifndef EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME
-#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name) \
+#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name, typeName, performLocalLookup) \
PROTOCOL_WITH_NAME(Id, Name)
#endif
@@ -41,10 +43,17 @@
#define PROTOCOL(name) PROTOCOL_WITH_NAME(name, #name)
#define PROTOCOL_(name) PROTOCOL_WITH_NAME(name, "_" #name)
-#define EXPRESSIBLE_BY_LITERAL_PROTOCOL(name) \
- EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, #name)
-#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_(name) \
- EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, "_" #name)
+
+/// \param typeName supplies the name used for type lookup,
+/// \param performLocalLookup specifies whether to first look in the local context.
+#define EXPRESSIBLE_BY_LITERAL_PROTOCOL(name, typeName, performLocalLookup) \
+ EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, #name, typeName, performLocalLookup)
+
+/// \param typeName supplies the name used for type lookup,
+/// \param performLocalLookup specifies whether to first look in the local context.
+#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_(name, typeName, performLocalLookup) \
+ EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, "_" #name, typeName, performLocalLookup)
+
#define BUILTIN_EXPRESSIBLE_BY_LITERAL_PROTOCOL_(name) \
BUILTIN_EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, "_" #name)
@@ -72,20 +81,20 @@
PROTOCOL(StringInterpolationProtocol)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByArrayLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByBooleanLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByDictionaryLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByExtendedGraphemeClusterLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByFloatLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByIntegerLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByStringInterpolation)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByStringLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByNilLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByUnicodeScalarLiteral)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByArrayLiteral, "Array", false)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByBooleanLiteral, "BooleanLiteralType", true)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByDictionaryLiteral, "Dictionary", false)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByExtendedGraphemeClusterLiteral, "ExtendedGraphemeClusterType", true)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByFloatLiteral, "FloatLiteralType", true)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByIntegerLiteral, "IntegerLiteralType", true)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByStringInterpolation, "StringLiteralType", true)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByStringLiteral, "StringLiteralType", true)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByNilLiteral, nullptr, false)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL(ExpressibleByUnicodeScalarLiteral, "UnicodeScalarType", true)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL_(ExpressibleByColorLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL_(ExpressibleByImageLiteral)
-EXPRESSIBLE_BY_LITERAL_PROTOCOL_(ExpressibleByFileReferenceLiteral)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL_(ExpressibleByColorLiteral, "_ColorLiteralType", true)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL_(ExpressibleByImageLiteral, "_ImageLiteralType", true)
+EXPRESSIBLE_BY_LITERAL_PROTOCOL_(ExpressibleByFileReferenceLiteral, "_FileReferenceLiteralType", true)
BUILTIN_EXPRESSIBLE_BY_LITERAL_PROTOCOL_(ExpressibleByBuiltinBooleanLiteral)
BUILTIN_EXPRESSIBLE_BY_LITERAL_PROTOCOL_(ExpressibleByBuiltinExtendedGraphemeClusterLiteral)
diff --git a/include/swift/AST/TypeCheckRequests.h b/include/swift/AST/TypeCheckRequests.h
index 179b083..770953d 100644
--- a/include/swift/AST/TypeCheckRequests.h
+++ b/include/swift/AST/TypeCheckRequests.h
@@ -322,6 +322,48 @@
bool isCached() const { return true; }
};
+void simple_display(llvm::raw_ostream &out, const KnownProtocolKind);
+class TypeChecker;
+
+// Find the type in the cache or look it up
+class DefaultTypeRequest
+ : public SimpleRequest<DefaultTypeRequest, CacheKind::SeparatelyCached,
+ Type, KnownProtocolKind, const DeclContext *> {
+public:
+ using SimpleRequest::SimpleRequest;
+
+private:
+ friend SimpleRequest;
+
+ // Evaluation.
+ llvm::Expected<Type> evaluate(Evaluator &eval, KnownProtocolKind,
+ const DeclContext *) const;
+
+public:
+ // Cycle handling
+ void diagnoseCycle(DiagnosticEngine &diags) const;
+ void noteCycleStep(DiagnosticEngine &diags) const;
+
+ // Caching
+ bool isCached() const { return true; }
+ Optional<Type> getCachedResult() const;
+ void cacheResult(Type value) const;
+
+private:
+ KnownProtocolKind getKnownProtocolKind() const {
+ return std::get<0>(getStorage());
+ }
+ const DeclContext *getDeclContext() const {
+ return std::get<1>(getStorage());
+ }
+
+ static const char *getTypeName(KnownProtocolKind);
+ static bool getPerformLocalLookup(KnownProtocolKind);
+ TypeChecker &getTypeChecker() const;
+ SourceFile *getSourceFile() const;
+ Type &getCache() const;
+};
+
/// The zone number for the type checker.
#define SWIFT_TYPE_CHECKER_REQUESTS_TYPEID_ZONE 10
diff --git a/include/swift/AST/TypeCheckerTypeIDZone.def b/include/swift/AST/TypeCheckerTypeIDZone.def
index dacd761..df75c7a 100644
--- a/include/swift/AST/TypeCheckerTypeIDZone.def
+++ b/include/swift/AST/TypeCheckerTypeIDZone.def
@@ -22,3 +22,4 @@
SWIFT_TYPEID(IsDynamicRequest)
SWIFT_TYPEID(RequirementRequest)
SWIFT_TYPEID(USRGenerationRequest)
+SWIFT_TYPEID(DefaultTypeRequest)
diff --git a/include/swift/Basic/ImmutablePointerSet.h b/include/swift/Basic/ImmutablePointerSet.h
index 283bcc2..8f16369 100644
--- a/include/swift/Basic/ImmutablePointerSet.h
+++ b/include/swift/Basic/ImmutablePointerSet.h
@@ -349,7 +349,10 @@
ImmutablePointerSet<T>(nullptr, {});
template <typename T>
-constexpr unsigned ImmutablePointerSetFactory<T>::AllocAlignment =
+#if !defined(_MSC_VER) || defined(__clang__)
+constexpr
+#endif
+const unsigned ImmutablePointerSetFactory<T>::AllocAlignment =
(alignof(PtrSet) > alignof(PtrTy)) ? alignof(PtrSet) : alignof(PtrTy);
} // end swift namespace
diff --git a/include/swift/Parse/Lexer.h b/include/swift/Parse/Lexer.h
index cbe82c8..4d2dd71 100644
--- a/include/swift/Parse/Lexer.h
+++ b/include/swift/Parse/Lexer.h
@@ -55,6 +55,12 @@
Allowed,
};
+enum class LexerMode {
+ Swift,
+ SwiftInterface,
+ SIL
+};
+
/// Kinds of conflict marker which the lexer might encounter.
enum class ConflictMarkerKind {
/// A normal or diff3 conflict marker, initiated by at least 7 "<"s,
@@ -98,9 +104,10 @@
Token NextToken;
- /// This is true if we're lexing a .sil file instead of a .swift
- /// file. This enables the 'sil' keyword.
- const bool InSILMode;
+ /// The kind of source we're lexing. This either enables special behavior for
+ /// parseable interfaces, or enables things like the 'sil' keyword if lexing
+ /// a .sil file.
+ const LexerMode LexMode;
/// True if we should skip past a `#!` line at the start of the file.
const bool IsHashbangAllowed;
@@ -135,8 +142,8 @@
/// everything.
Lexer(const PrincipalTag &, const LangOptions &LangOpts,
const SourceManager &SourceMgr, unsigned BufferID,
- DiagnosticEngine *Diags, bool InSILMode, HashbangMode HashbangAllowed,
- CommentRetentionMode RetainComments,
+ DiagnosticEngine *Diags, LexerMode LexMode,
+ HashbangMode HashbangAllowed, CommentRetentionMode RetainComments,
TriviaRetentionMode TriviaRetention);
void initialize(unsigned Offset, unsigned EndOffset);
@@ -150,21 +157,21 @@
/// identifier), but not things like how many characters are
/// consumed. If that changes, APIs like getLocForEndOfToken will
/// need to take a LangOptions explicitly.
- /// \param InSILMode - whether we're parsing a SIL source file.
+ /// \param LexMode - the kind of source file we're lexing.
/// Unlike language options, this does affect primitive lexing, which
/// means that APIs like getLocForEndOfToken really ought to take
/// this flag; it's just that we don't care that much about fidelity
/// when parsing SIL files.
Lexer(
const LangOptions &Options, const SourceManager &SourceMgr,
- unsigned BufferID, DiagnosticEngine *Diags, bool InSILMode,
+ unsigned BufferID, DiagnosticEngine *Diags, LexerMode LexMode,
HashbangMode HashbangAllowed = HashbangMode::Disallowed,
CommentRetentionMode RetainComments = CommentRetentionMode::None,
TriviaRetentionMode TriviaRetention = TriviaRetentionMode::WithoutTrivia);
/// Create a lexer that scans a subrange of the source buffer.
Lexer(const LangOptions &Options, const SourceManager &SourceMgr,
- unsigned BufferID, DiagnosticEngine *Diags, bool InSILMode,
+ unsigned BufferID, DiagnosticEngine *Diags, LexerMode LexMode,
HashbangMode HashbangAllowed, CommentRetentionMode RetainComments,
TriviaRetentionMode TriviaRetention, unsigned Offset,
unsigned EndOffset);
diff --git a/include/swift/SIL/OwnershipUtils.h b/include/swift/SIL/OwnershipUtils.h
index 7a60169..9550260 100644
--- a/include/swift/SIL/OwnershipUtils.h
+++ b/include/swift/SIL/OwnershipUtils.h
@@ -35,8 +35,10 @@
ReturnFalse = 1,
PrintMessage = 2,
Assert = 4,
+ ReturnFalseOnLeak = 8,
PrintMessageAndReturnFalse = PrintMessage | ReturnFalse,
PrintMessageAndAssert = PrintMessage | Assert,
+ ReturnFalseOnLeakAssertOtherwise = ReturnFalseOnLeak | Assert,
} Value;
ErrorBehaviorKind() : Value(Invalid) {}
@@ -47,6 +49,11 @@
return Value & Assert;
}
+ bool shouldReturnFalseOnLeak() const {
+ assert(Value != Invalid);
+ return Value & ReturnFalseOnLeak;
+ }
+
bool shouldPrintMessage() const {
assert(Value != Invalid);
return Value & PrintMessage;
@@ -95,12 +102,21 @@
/// non-consuming uses, or from the producer instruction.
/// 2. The consuming use set jointly post dominates producers and all non
/// consuming uses.
-bool valueHasLinearLifetime(SILValue value,
- ArrayRef<BranchPropagatedUser> consumingUses,
- ArrayRef<BranchPropagatedUser> nonConsumingUses,
- SmallPtrSetImpl<SILBasicBlock *> &visitedBlocks,
- DeadEndBlocks &deadEndBlocks,
- ownership::ErrorBehaviorKind errorBehavior);
+///
+/// \p value The value whose lifetime we are checking.
+/// \p consumingUses the array of users that destroy or consume a value.
+/// \p nonConsumingUses regular uses
+/// \p deadEndBlocks a cache for the dead end block computation
+/// \p errorBehavior If we detect an error, should we return false or hard
+/// error.
+/// \p leakingBlocks If non-null a list of blocks where the value was detected
+/// to leak. Can be used to insert missing destroys.
+bool valueHasLinearLifetime(
+ SILValue value, ArrayRef<BranchPropagatedUser> consumingUses,
+ ArrayRef<BranchPropagatedUser> nonConsumingUses,
+ SmallPtrSetImpl<SILBasicBlock *> &visitedBlocks,
+ DeadEndBlocks &deadEndBlocks, ownership::ErrorBehaviorKind errorBehavior,
+ SmallVectorImpl<SILBasicBlock *> *leakingBlocks = nullptr);
/// Returns true if v is an address or trivial.
bool isValueAddressOrTrivial(SILValue v, SILModule &m);
diff --git a/include/swift/SIL/SILConstants.h b/include/swift/SIL/SILConstants.h
index 9eef3cc..9ddf944 100644
--- a/include/swift/SIL/SILConstants.h
+++ b/include/swift/SIL/SILConstants.h
@@ -103,6 +103,12 @@
/// "aggregate" member of the value union.
RK_Aggregate,
+ /// This value is an enum with no payload.
+ RK_Enum,
+
+ /// This value is an enum with a payload.
+ RK_EnumWithPayload,
+
/// This represents the address of a memory object.
RK_DirectAddress,
@@ -136,6 +142,14 @@
/// information about the array elements and count.
const SymbolicValue *aggregate;
+ /// When this SymbolicValue is of "Enum" kind, this pointer stores
+ /// information about the enum case type.
+ EnumElementDecl *enumVal;
+
+ /// When this SymbolicValue is of "EnumWithPayload" kind, this pointer
+ /// stores information about the enum case type and its payload.
+ EnumWithPayloadSymbolicValue *enumValWithPayload;
+
/// When the representationKind is "DirectAddress", this pointer is the
/// memory object referenced.
SymbolicValueMemoryObject *directAddress;
@@ -186,6 +200,12 @@
/// This can be an array, struct, tuple, etc.
Aggregate,
+ /// This is an enum without payload.
+ Enum,
+
+ /// This is an enum with payload (formally known as "associated value").
+ EnumWithPayload,
+
/// This value represents the address of, or into, a memory object.
Address,
@@ -271,6 +291,25 @@
ArrayRef<SymbolicValue> getAggregateValue() const;
+ /// This returns a constant Symbolic value for the enum case in `decl`, which
+ /// must not have an associated value.
+ static SymbolicValue getEnum(EnumElementDecl *decl) {
+ assert(decl);
+ SymbolicValue result;
+ result.representationKind = RK_Enum;
+ result.value.enumVal = decl;
+ return result;
+ }
+
+ /// `payload` must be a constant.
+ static SymbolicValue getEnumWithPayload(EnumElementDecl *decl,
+ SymbolicValue payload,
+ ASTContext &astContext);
+
+ EnumElementDecl *getEnumValue() const;
+
+ SymbolicValue getEnumPayloadValue() const;
+
/// Return a symbolic value that represents the address of a memory object.
static SymbolicValue getAddress(SymbolicValueMemoryObject *memoryObject) {
SymbolicValue result;
diff --git a/include/swift/SILOptimizer/Utils/SILSSAUpdater.h b/include/swift/SILOptimizer/Utils/SILSSAUpdater.h
index bff40e2..30437bc 100644
--- a/include/swift/SILOptimizer/Utils/SILSSAUpdater.h
+++ b/include/swift/SILOptimizer/Utils/SILSSAUpdater.h
@@ -63,6 +63,10 @@
SmallVectorImpl<SILPhiArgument *> *InsertedPHIs = nullptr);
~SILSSAUpdater();
+ void setInsertedPhis(SmallVectorImpl<SILPhiArgument *> *insertedPhis) {
+ InsertedPHIs = insertedPhis;
+ }
+
/// Initialize for a use of a value of type.
void Initialize(SILType T);
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 9dfddd5..87571f3 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -291,6 +291,13 @@
ProtocolConformanceRef>
DefaultAssociatedConformanceWitnesses;
+ /// Caches of default types for DefaultTypeRequest.
+ /// Used to be instance variables in the TypeChecker.
+ /// There is a logically separate cache for each SourceFile and
+ /// KnownProtocolKind.
+ llvm::DenseMap<SourceFile *, std::array<Type, NumKnownProtocols>>
+ DefaultTypeRequestCaches;
+
/// Structure that captures data that is segregated into different
/// arenas.
struct Arena {
@@ -5075,4 +5082,7 @@
return LayoutConstraint(New);
}
-
+Type &ASTContext::getDefaultTypeRequestCache(SourceFile *SF,
+ KnownProtocolKind kind) {
+ return getImpl().DefaultTypeRequestCaches[SF][size_t(kind)];
+}
diff --git a/lib/AST/ASTPrinter.cpp b/lib/AST/ASTPrinter.cpp
index 812cbb5..47c925c 100644
--- a/lib/AST/ASTPrinter.cpp
+++ b/lib/AST/ASTPrinter.cpp
@@ -178,7 +178,7 @@
result.PrintAccess = true;
result.ExcludeAttrList = {DAK_ImplicitlyUnwrappedOptional, DAK_AccessControl,
- DAK_SetterAccess};
+ DAK_SetterAccess, DAK_Lazy};
return result;
}
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index a33bddc..b52b407 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -2550,8 +2550,6 @@
return false;
}
-/// Returns \c true if this declaration is *not* intended to be used directly
-/// by application developers despite of the visibility.
bool ValueDecl::shouldHideFromEditor() const {
// Hide private stdlib declarations.
if (isPrivateStdlibDecl(/*treatNonBuiltinProtocolsAsPublic*/ false) ||
diff --git a/lib/AST/DeclContext.cpp b/lib/AST/DeclContext.cpp
index 1c4d610..2e6a37a 100644
--- a/lib/AST/DeclContext.cpp
+++ b/lib/AST/DeclContext.cpp
@@ -525,10 +525,12 @@
return ctx.SourceMgr.getLineAndColumn(loc).first;
}
-unsigned DeclContext::printContext(raw_ostream &OS, unsigned indent) const {
+unsigned DeclContext::printContext(raw_ostream &OS, const unsigned indent,
+ const bool onlyAPartialLine) const {
unsigned Depth = 0;
- if (auto *P = getParent())
- Depth = P->printContext(OS, indent);
+ if (!onlyAPartialLine)
+ if (auto *P = getParent())
+ Depth = P->printContext(OS, indent);
const char *Kind;
switch (getContextKind()) {
@@ -658,7 +660,8 @@
}
}
- OS << "\n";
+ if (!onlyAPartialLine)
+ OS << "\n";
return Depth + 1;
}
diff --git a/lib/AST/NameLookup.cpp b/lib/AST/NameLookup.cpp
index 220c8d4..5a6344b 100644
--- a/lib/AST/NameLookup.cpp
+++ b/lib/AST/NameLookup.cpp
@@ -271,6 +271,42 @@
}
}
+ // The Foundation overlay introduced Data.withUnsafeBytes, which is
+ // treated as being ambiguous with SwiftNIO's Data.withUnsafeBytes
+ // extension. Apply a special-case name shadowing rule to use the
+ // latter rather than the former, which be the consequence of a more
+ // significant change to name shadowing in the future.
+ if (auto owningStruct1
+ = firstDecl->getDeclContext()->getSelfStructDecl()) {
+ if (auto owningStruct2
+ = secondDecl->getDeclContext()->getSelfStructDecl()) {
+ if (owningStruct1 == owningStruct2 &&
+ owningStruct1->getName().is("Data") &&
+ isa<FuncDecl>(firstDecl) && isa<FuncDecl>(secondDecl) &&
+ firstDecl->getFullName() == secondDecl->getFullName() &&
+ firstDecl->getBaseName().userFacingName() == "withUnsafeBytes") {
+ // If the second module is the Foundation module and the first
+ // is the NIOFoundationCompat module, the second is shadowed by the
+ // first.
+ if (firstDecl->getModuleContext()->getName()
+ .is("NIOFoundationCompat") &&
+ secondDecl->getModuleContext()->getName().is("Foundation")) {
+ shadowed.insert(secondDecl);
+ continue;
+ }
+
+ // If it's the other way around, the first declaration is shadowed
+ // by the second.
+ if (secondDecl->getModuleContext()->getName()
+ .is("NIOFoundationCompat") &&
+ firstDecl->getModuleContext()->getName().is("Foundation")) {
+ shadowed.insert(firstDecl);
+ break;
+ }
+ }
+ }
+ }
+
// Prefer declarations in an overlay to similar declarations in
// the Clang module it customizes.
if (firstDecl->hasClangNode() != secondDecl->hasClangNode()) {
diff --git a/lib/AST/RawComment.cpp b/lib/AST/RawComment.cpp
index 894aa2a..403894b 100644
--- a/lib/AST/RawComment.cpp
+++ b/lib/AST/RawComment.cpp
@@ -108,7 +108,7 @@
unsigned Offset = SourceMgr.getLocOffsetInBuffer(Range.getStart(), BufferID);
unsigned EndOffset = SourceMgr.getLocOffsetInBuffer(Range.getEnd(), BufferID);
LangOptions FakeLangOpts;
- Lexer L(FakeLangOpts, SourceMgr, BufferID, nullptr, /*InSILMode=*/false,
+ Lexer L(FakeLangOpts, SourceMgr, BufferID, nullptr, LexerMode::Swift,
HashbangMode::Disallowed,
CommentRetentionMode::ReturnAsTokens,
TriviaRetentionMode::WithoutTrivia,
diff --git a/lib/AST/TypeCheckRequests.cpp b/lib/AST/TypeCheckRequests.cpp
index 466ff7c..69fd79e 100644
--- a/lib/AST/TypeCheckRequests.cpp
+++ b/lib/AST/TypeCheckRequests.cpp
@@ -13,6 +13,7 @@
#include "swift/AST/ASTContext.h"
#include "swift/AST/Decl.h"
#include "swift/AST/DiagnosticsCommon.h"
+#include "swift/AST/Module.h"
#include "swift/AST/TypeLoc.h"
#include "swift/AST/TypeRepr.h"
#include "swift/AST/Types.h"
@@ -429,3 +430,69 @@
auto &d = std::get<0>(storage);
diags.diagnose(d, diag::circular_reference);
}
+
+//----------------------------------------------------------------------------//
+// DefaultTypeRequest.
+//----------------------------------------------------------------------------//
+
+void swift::simple_display(llvm::raw_ostream &out,
+ const KnownProtocolKind kind) {
+ out << getProtocolName(kind);
+}
+
+void DefaultTypeRequest::diagnoseCycle(DiagnosticEngine &diags) const {
+ diags.diagnose(SourceLoc(), diag::circular_reference);
+}
+
+void DefaultTypeRequest::noteCycleStep(DiagnosticEngine &diags) const {
+ diags.diagnose(SourceLoc(), diag::circular_reference_through);
+}
+
+//----------------------------------------------------------------------------//
+// DefaultTypeRequest caching.
+//----------------------------------------------------------------------------//
+
+SourceFile *DefaultTypeRequest::getSourceFile() const {
+ return getDeclContext()->getParentSourceFile();
+}
+
+Type &DefaultTypeRequest::getCache() const {
+ return getDeclContext()->getASTContext().getDefaultTypeRequestCache(
+ getSourceFile(), getKnownProtocolKind());
+}
+
+Optional<Type> DefaultTypeRequest::getCachedResult() const {
+ auto const &cachedType = getCache();
+ return cachedType ? Optional<Type>(cachedType) : None;
+}
+
+void DefaultTypeRequest::cacheResult(Type value) const { getCache() = value; }
+
+const char *
+DefaultTypeRequest::getTypeName(const KnownProtocolKind knownProtocolKind) {
+ switch (knownProtocolKind) {
+
+// clang-format off
+ # define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name, typeName, performLocalLookup) \
+ case KnownProtocolKind::Id: return typeName;
+ # include "swift/AST/KnownProtocols.def"
+ # undef EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME
+ //clang-format on
+
+ default: return nullptr;
+ }
+}
+
+bool DefaultTypeRequest::getPerformLocalLookup(const KnownProtocolKind knownProtocolKind) {
+ switch (knownProtocolKind) {
+
+ // clang-format off
+ # define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name, typeName, performLocalLookup) \
+ case KnownProtocolKind::Id: return performLocalLookup;
+ # include "swift/AST/KnownProtocols.def"
+ # undef EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME
+ //clang-format on
+
+ default: return false;
+ }
+}
diff --git a/lib/IDE/TypeContextInfo.cpp b/lib/IDE/TypeContextInfo.cpp
index 526f5c6..dbda625 100644
--- a/lib/IDE/TypeContextInfo.cpp
+++ b/lib/IDE/TypeContextInfo.cpp
@@ -39,16 +39,16 @@
// Ignore callbacks for suffix completions
// {
- void completeDotExpr(Expr *E, SourceLoc DotLoc) override{};
- void completePostfixExpr(Expr *E, bool hasSpace) override{};
- void completeExprSuper(SuperRefExpr *SRE) override{};
- void completeExprSuperDot(SuperRefExpr *SRE) override{};
+ void completeDotExpr(Expr *E, SourceLoc DotLoc) override {};
+ void completePostfixExpr(Expr *E, bool hasSpace) override {};
+ void completeExprSuper(SuperRefExpr *SRE) override {};
+ void completeExprSuperDot(SuperRefExpr *SRE) override {};
// }
// Ignore non-expression callbacks.
// {
- void completeInPrecedenceGroup(SyntaxKind SK) override{};
- void completePoundAvailablePlatform() override{};
+ void completeInPrecedenceGroup(SyntaxKind SK) override {};
+ void completePoundAvailablePlatform() override {};
void completeExprKeyPath(KeyPathExpr *KPE, SourceLoc DotLoc) override {}
void completeTypeSimpleBeginning() override {}
void completeTypeIdentifierWithDot(IdentTypeRepr *ITR) override {}
@@ -65,15 +65,15 @@
void completePlatformCondition() override {}
void completeGenericParams(TypeLoc TL) override {}
void completeAfterIfStmt(bool hasElse) override {}
- void completeAccessorBeginning() override{};
+ void completeAccessorBeginning() override {};
// }
- void completeStmtOrExpr() override{};
+ void completeStmtOrExpr() override {};
void completePostfixExprBeginning(CodeCompletionExpr *E) override;
void completeForEachSequenceBeginning(CodeCompletionExpr *E) override;
void completeCaseStmtBeginning() override;
- void completeAssignmentRHS(AssignExpr *E) override{};
+ void completeAssignmentRHS(AssignExpr *E) override {};
void completeCallArg(CodeCompletionExpr *E) override;
void completeReturnStmt(CodeCompletionExpr *E) override;
void completeYieldStmt(CodeCompletionExpr *E,
diff --git a/lib/Immediate/REPL.cpp b/lib/Immediate/REPL.cpp
index a18bf7a..87bf17c 100644
--- a/lib/Immediate/REPL.cpp
+++ b/lib/Immediate/REPL.cpp
@@ -1053,7 +1053,7 @@
unsigned BufferID =
CI.getSourceMgr().addMemBufferCopy(Line, "<REPL Input>");
Lexer L(CI.getASTContext().LangOpts,
- CI.getSourceMgr(), BufferID, nullptr, false /*not SIL*/);
+ CI.getSourceMgr(), BufferID, nullptr, LexerMode::Swift);
Token Tok;
L.lex(Tok);
assert(Tok.is(tok::colon));
diff --git a/lib/Parse/Lexer.cpp b/lib/Parse/Lexer.cpp
index 5555caf..2a08277 100644
--- a/lib/Parse/Lexer.cpp
+++ b/lib/Parse/Lexer.cpp
@@ -171,11 +171,11 @@
Lexer::Lexer(const PrincipalTag &, const LangOptions &LangOpts,
const SourceManager &SourceMgr, unsigned BufferID,
- DiagnosticEngine *Diags, bool InSILMode,
+ DiagnosticEngine *Diags, LexerMode LexMode,
HashbangMode HashbangAllowed, CommentRetentionMode RetainComments,
TriviaRetentionMode TriviaRetention)
: LangOpts(LangOpts), SourceMgr(SourceMgr), BufferID(BufferID),
- Diags(Diags), InSILMode(InSILMode),
+ Diags(Diags), LexMode(LexMode),
IsHashbangAllowed(HashbangAllowed == HashbangMode::Allowed),
RetainComments(RetainComments), TriviaRetention(TriviaRetention) {}
@@ -216,28 +216,28 @@
}
Lexer::Lexer(const LangOptions &Options, const SourceManager &SourceMgr,
- unsigned BufferID, DiagnosticEngine *Diags, bool InSILMode,
+ unsigned BufferID, DiagnosticEngine *Diags, LexerMode LexMode,
HashbangMode HashbangAllowed, CommentRetentionMode RetainComments,
TriviaRetentionMode TriviaRetention)
- : Lexer(PrincipalTag(), Options, SourceMgr, BufferID, Diags, InSILMode,
+ : Lexer(PrincipalTag(), Options, SourceMgr, BufferID, Diags, LexMode,
HashbangAllowed, RetainComments, TriviaRetention) {
unsigned EndOffset = SourceMgr.getRangeForBuffer(BufferID).getByteLength();
initialize(/*Offset=*/0, EndOffset);
}
Lexer::Lexer(const LangOptions &Options, const SourceManager &SourceMgr,
- unsigned BufferID, DiagnosticEngine *Diags, bool InSILMode,
+ unsigned BufferID, DiagnosticEngine *Diags, LexerMode LexMode,
HashbangMode HashbangAllowed, CommentRetentionMode RetainComments,
TriviaRetentionMode TriviaRetention, unsigned Offset,
unsigned EndOffset)
- : Lexer(PrincipalTag(), Options, SourceMgr, BufferID, Diags, InSILMode,
+ : Lexer(PrincipalTag(), Options, SourceMgr, BufferID, Diags, LexMode,
HashbangAllowed, RetainComments, TriviaRetention) {
initialize(Offset, EndOffset);
}
Lexer::Lexer(Lexer &Parent, State BeginState, State EndState)
: Lexer(PrincipalTag(), Parent.LangOpts, Parent.SourceMgr, Parent.BufferID,
- Parent.Diags, Parent.InSILMode,
+ Parent.Diags, Parent.LexMode,
Parent.IsHashbangAllowed
? HashbangMode::Allowed
: HashbangMode::Disallowed,
@@ -264,7 +264,7 @@
SourceMgr.findBufferContainingLoc(Loc)) &&
"location from the wrong buffer");
- Lexer L(LangOpts, SourceMgr, BufferID, Diags, InSILMode,
+ Lexer L(LangOpts, SourceMgr, BufferID, Diags, LexMode,
HashbangMode::Allowed, CommentRetentionMode::None,
TriviaRetentionMode::WithoutTrivia);
L.restoreState(State(Loc));
@@ -672,7 +672,8 @@
// Lex [a-zA-Z_$0-9[[:XID_Continue:]]]*
while (advanceIfValidContinuationOfIdentifier(CurPtr, BufferEnd));
- tok Kind = kindOfIdentifier(StringRef(TokStart, CurPtr-TokStart), InSILMode);
+ tok Kind = kindOfIdentifier(StringRef(TokStart, CurPtr-TokStart),
+ LexMode == LexerMode::SIL);
return formToken(Kind, TokStart);
}
@@ -944,9 +945,11 @@
return formToken(tok::identifier, tokStart);
}
- // We reserve $nonNumeric for persistent bindings in the debugger.
+ // We reserve $nonNumeric for persistent bindings in the debugger and implicit
+ // variables, like storage for lazy properties.
if (!isAllDigits) {
- if (!LangOpts.EnableDollarIdentifiers && !InSILBody)
+ if (!LangOpts.EnableDollarIdentifiers && !InSILBody &&
+ LexMode != LexerMode::SwiftInterface)
diagnose(tokStart, diag::expected_dollar_numeric);
// Even if we diagnose, we go ahead and form an identifier token,
@@ -2515,7 +2518,7 @@
// comments and normally we won't be at the beginning of a comment token
// (making this option irrelevant), or the caller lexed comments and
// we need to lex just the comment token.
- Lexer L(FakeLangOpts, SM, BufferID, nullptr, /*InSILMode=*/ false,
+ Lexer L(FakeLangOpts, SM, BufferID, nullptr, LexerMode::Swift,
HashbangMode::Allowed, CommentRetentionMode::ReturnAsTokens);
L.restoreState(State(Loc));
return L.peekNextToken();
@@ -2671,7 +2674,7 @@
// and the exact token produced.
LangOptions FakeLangOptions;
- Lexer L(FakeLangOptions, SM, BufferID, nullptr, /*InSILMode=*/false,
+ Lexer L(FakeLangOptions, SM, BufferID, nullptr, LexerMode::Swift,
HashbangMode::Allowed, CommentRetentionMode::None,
TriviaRetentionMode::WithoutTrivia, BufferStart, BufferEnd);
@@ -2799,7 +2802,7 @@
// comments and normally we won't be at the beginning of a comment token
// (making this option irrelevant), or the caller lexed comments and
// we need to lex just the comment token.
- Lexer L(FakeLangOpts, SM, BufferID, nullptr, /*InSILMode=*/ false,
+ Lexer L(FakeLangOpts, SM, BufferID, nullptr, LexerMode::Swift,
HashbangMode::Allowed, CommentRetentionMode::ReturnAsTokens);
L.restoreState(State(Loc));
L.skipToEndOfLine(/*EatNewline=*/true);
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index afb476c..1effd9d 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -59,7 +59,7 @@
if (Offset == 0 && EndOffset == 0)
EndOffset = SM.getRangeForBuffer(BufferID).getByteLength();
- Lexer L(LangOpts, SM, BufferID, Diags, /*InSILMode=*/false,
+ Lexer L(LangOpts, SM, BufferID, Diags, LexerMode::Swift,
HashbangMode::Allowed, RetainComments, TriviaRetention, Offset,
EndOffset);
@@ -345,6 +345,19 @@
//===----------------------------------------------------------------------===//
+static LexerMode sourceFileKindToLexerMode(SourceFileKind kind) {
+ switch (kind) {
+ case swift::SourceFileKind::Interface:
+ return LexerMode::SwiftInterface;
+ case swift::SourceFileKind::SIL:
+ return LexerMode::SIL;
+ case swift::SourceFileKind::Library:
+ case swift::SourceFileKind::Main:
+ case swift::SourceFileKind::REPL:
+ return LexerMode::Swift;
+ }
+}
+
Parser::Parser(unsigned BufferID, SourceFile &SF, SILParserTUStateBase *SIL,
PersistentParserState *PersistentState,
std::shared_ptr<SyntaxParseActions> SPActions,
@@ -361,7 +374,7 @@
std::unique_ptr<Lexer>(new Lexer(
SF.getASTContext().LangOpts, SF.getASTContext().SourceMgr,
BufferID, LexerDiags,
- /*InSILMode=*/SIL != nullptr,
+ sourceFileKindToLexerMode(SF.Kind),
SF.Kind == SourceFileKind::Main
? HashbangMode::Allowed
: HashbangMode::Disallowed,
@@ -399,7 +412,7 @@
void relexComment(CharSourceRange CommentRange,
llvm::SmallVectorImpl<Token> &Scratch) {
- Lexer L(Ctx.LangOpts, Ctx.SourceMgr, BufferID, nullptr, /*InSILMode=*/false,
+ Lexer L(Ctx.LangOpts, Ctx.SourceMgr, BufferID, nullptr, LexerMode::Swift,
HashbangMode::Disallowed,
CommentRetentionMode::ReturnAsTokens,
TriviaRetentionMode::WithoutTrivia,
@@ -1119,7 +1132,7 @@
std::unique_ptr<Lexer> Lex;
Lex.reset(new Lexer(Impl.LangOpts, SM,
BufferID, &Impl.Diags,
- /*InSILMode=*/false,
+ LexerMode::Swift,
HashbangMode::Allowed,
CommentRetentionMode::None,
TriviaRetentionMode::WithoutTrivia,
diff --git a/lib/SIL/LinearLifetimeChecker.cpp b/lib/SIL/LinearLifetimeChecker.cpp
index 858dd2b..ed11ee1 100644
--- a/lib/SIL/LinearLifetimeChecker.cpp
+++ b/lib/SIL/LinearLifetimeChecker.cpp
@@ -53,6 +53,11 @@
/// The blocks that we have already visited.
SmallPtrSetImpl<SILBasicBlock *> &visitedBlocks;
+ /// If non-null a list that we should place any detected leaking blocks for
+ /// our caller. The intention is that this can be used in a failing case to
+ /// put in missing destroys.
+ SmallVectorImpl<SILBasicBlock *> *leakingBlocks;
+
/// The set of blocks with consuming uses.
SmallPtrSet<SILBasicBlock *, 8> blocksWithConsumingUses;
@@ -66,12 +71,13 @@
/// A list of successor blocks that we must visit by the time the algorithm
/// terminates.
- SmallPtrSet<SILBasicBlock *, 8> successorBlocksThatMustBeVisited;
+ SmallSetVector<SILBasicBlock *, 8> successorBlocksThatMustBeVisited;
State(SILValue value, SmallPtrSetImpl<SILBasicBlock *> &visitedBlocks,
- ErrorBehaviorKind errorBehavior)
+ ErrorBehaviorKind errorBehavior,
+ SmallVectorImpl<SILBasicBlock *> *leakingBlocks)
: value(value), errorBehavior(errorBehavior),
- visitedBlocks(visitedBlocks) {}
+ visitedBlocks(visitedBlocks), leakingBlocks(leakingBlocks) {}
void initializeAllNonConsumingUses(
ArrayRef<BranchPropagatedUser> nonConsumingUsers);
@@ -331,7 +337,7 @@
// First remove BB from the SuccessorBlocksThatMustBeVisited list. This
// ensures that when the algorithm terminates, we know that BB was not the
// beginning of a non-covered path to the exit.
- successorBlocksThatMustBeVisited.erase(block);
+ successorBlocksThatMustBeVisited.remove(block);
// Then remove BB from BlocksWithNonLifetimeEndingUses so we know that
// this block was properly joint post-dominated by our lifetime ending
@@ -395,23 +401,41 @@
bool State::checkDataflowEndState(DeadEndBlocks &deBlocks) {
// Make sure that we visited all successor blocks that we needed to visit to
// make sure we didn't leak.
+ bool doesntHaveAnyLeaks = true;
+
if (!successorBlocksThatMustBeVisited.empty()) {
- return handleError([&] {
- llvm::errs()
- << "Function: '" << value->getFunction()->getName() << "'\n"
- << "Error! Found a leak due to a consuming post-dominance failure!\n"
- << " Value: " << *value << " Post Dominating Failure Blocks:\n";
- for (auto *succBlock : successorBlocksThatMustBeVisited) {
- llvm::errs() << " bb" << succBlock->getDebugID();
- }
- llvm::errs() << '\n';
- });
+ // If we are asked to store any leaking blocks, put them in the leaking
+ // blocks array.
+ if (leakingBlocks) {
+ copy(successorBlocksThatMustBeVisited,
+ std::back_inserter(*leakingBlocks));
+ }
+
+ // If we are supposed to error on leaks, do so now.
+ if (!errorBehavior.shouldReturnFalseOnLeak()) {
+ return handleError([&] {
+ llvm::errs() << "Function: '" << value->getFunction()->getName()
+ << "'\n"
+ << "Error! Found a leak due to a consuming post-dominance "
+ "failure!\n"
+ << " Value: " << *value
+ << " Post Dominating Failure Blocks:\n";
+ for (auto *succBlock : successorBlocksThatMustBeVisited) {
+ llvm::errs() << " bb" << succBlock->getDebugID();
+ }
+ llvm::errs() << '\n';
+ });
+ }
+
+ // Otherwise... see if we have any other failures. This signals the user
+ // wants us to tell it where to insert compensating destroys.
+ doesntHaveAnyLeaks = false;
}
// Make sure that we do not have any lifetime ending uses left to visit that
// are not transitively unreachable blocks.... so return early.
if (blocksWithNonConsumingUses.empty()) {
- return true;
+ return doesntHaveAnyLeaks;
}
// If we do have remaining blocks, then these non lifetime ending uses must be
@@ -436,7 +460,7 @@
// If all of our remaining blocks were dead uses, then return true. We are
// good.
- return true;
+ return doesntHaveAnyLeaks;
}
//===----------------------------------------------------------------------===//
@@ -447,10 +471,11 @@
SILValue value, ArrayRef<BranchPropagatedUser> consumingUses,
ArrayRef<BranchPropagatedUser> nonConsumingUses,
SmallPtrSetImpl<SILBasicBlock *> &visitedBlocks, DeadEndBlocks &deBlocks,
- ErrorBehaviorKind errorBehavior) {
+ ErrorBehaviorKind errorBehavior,
+ SmallVectorImpl<SILBasicBlock *> *leakingBlocks) {
assert(!consumingUses.empty() && "Must have at least one consuming user?!");
- State state(value, visitedBlocks, errorBehavior);
+ State state(value, visitedBlocks, errorBehavior, leakingBlocks);
// First add our non-consuming uses and their blocks to the
// blocksWithNonConsumingUses map. While we do this, if we have multiple uses
diff --git a/lib/SIL/SILConstants.cpp b/lib/SIL/SILConstants.cpp
index 38cbeb5..756b97e 100644
--- a/lib/SIL/SILConstants.cpp
+++ b/lib/SIL/SILConstants.cpp
@@ -82,6 +82,20 @@
return;
}
}
+ case RK_Enum: {
+ auto *decl = getEnumValue();
+ os << "enum: ";
+ decl->print(os);
+ return;
+ }
+ case RK_EnumWithPayload: {
+ auto *decl = getEnumValue();
+ os << "enum: ";
+ decl->print(os);
+ os << ", payload: ";
+ getEnumPayloadValue().print(os, indent);
+ return;
+ }
case RK_DirectAddress:
case RK_DerivedAddress: {
SmallVector<unsigned, 4> accessPath;
@@ -111,6 +125,10 @@
return Function;
case RK_Aggregate:
return Aggregate;
+ case RK_Enum:
+ return Enum;
+ case RK_EnumWithPayload:
+ return EnumWithPayload;
case RK_Integer:
case RK_IntegerInline:
return Integer;
@@ -133,6 +151,9 @@
case RK_Metatype:
case RK_Function:
assert(0 && "cloning this representation kind is not supported");
+ case RK_Enum:
+ // These have trivial inline storage, just return a copy.
+ return *this;
case RK_IntegerInline:
case RK_Integer:
return SymbolicValue::getInteger(getIntegerValue(), astContext);
@@ -146,6 +167,8 @@
results.push_back(elt.cloneInto(astContext));
return getAggregate(results, astContext);
}
+ case RK_EnumWithPayload:
+ return getEnumWithPayload(getEnumValue(), getEnumPayloadValue(), astContext);
case RK_DirectAddress:
case RK_DerivedAddress: {
SmallVector<unsigned, 4> accessPath;
@@ -355,6 +378,56 @@
}
//===----------------------------------------------------------------------===//
+// Enums
+//===----------------------------------------------------------------------===//
+
+namespace swift {
+
+/// This is the representation of a constant enum value with payload.
+struct EnumWithPayloadSymbolicValue final {
+ /// The enum case.
+ EnumElementDecl *enumDecl;
+ SymbolicValue payload;
+
+ EnumWithPayloadSymbolicValue(EnumElementDecl *decl, SymbolicValue payload)
+ : enumDecl(decl), payload(payload) {}
+
+private:
+ EnumWithPayloadSymbolicValue() = delete;
+ EnumWithPayloadSymbolicValue(const EnumWithPayloadSymbolicValue &) = delete;
+};
+} // end namespace swift
+
+/// This returns a constant Symbolic value for the enum case in `decl` with a
+/// payload.
+SymbolicValue
+SymbolicValue::getEnumWithPayload(EnumElementDecl *decl, SymbolicValue payload,
+ ASTContext &astContext) {
+ assert(decl && payload.isConstant());
+ auto rawMem = astContext.Allocate(sizeof(EnumWithPayloadSymbolicValue),
+ alignof(EnumWithPayloadSymbolicValue));
+ auto enumVal = ::new (rawMem) EnumWithPayloadSymbolicValue(decl, payload);
+
+ SymbolicValue result;
+ result.representationKind = RK_EnumWithPayload;
+ result.value.enumValWithPayload = enumVal;
+ return result;
+}
+
+EnumElementDecl *SymbolicValue::getEnumValue() const {
+ if (representationKind == RK_Enum)
+ return value.enumVal;
+
+ assert(representationKind == RK_EnumWithPayload);
+ return value.enumValWithPayload->enumDecl;
+}
+
+SymbolicValue SymbolicValue::getEnumPayloadValue() const {
+ assert(representationKind == RK_EnumWithPayload);
+ return value.enumValWithPayload->payload;
+}
+
+//===----------------------------------------------------------------------===//
// Addresses
//===----------------------------------------------------------------------===//
diff --git a/lib/SILOptimizer/Analysis/AccessSummaryAnalysis.cpp b/lib/SILOptimizer/Analysis/AccessSummaryAnalysis.cpp
index e7bced7..2407511 100644
--- a/lib/SILOptimizer/Analysis/AccessSummaryAnalysis.cpp
+++ b/lib/SILOptimizer/Analysis/AccessSummaryAnalysis.cpp
@@ -32,7 +32,7 @@
FunctionSummary &functionSummary = info->getSummary();
ArgumentSummary &argSummary =
functionSummary.getAccessForArgument(index);
- index++;
+ ++index;
auto *functionArg = cast<SILFunctionArgument>(arg);
// Only summarize @inout_aliasable arguments.
@@ -426,7 +426,7 @@
os << ", ";
}
os << subAccess.getDescription(BaseType, M);
- index++;
+ ++index;
}
os << "]";
@@ -597,7 +597,7 @@
const IndexTrieNode *iter = subPath;
while (iter) {
- length++;
+ ++length;
iter = iter->getParent();
}
@@ -631,7 +631,7 @@
unsigned argCount = getArgumentCount();
os << "(";
- for (unsigned i = 0; i < argCount; i++) {
+ for (unsigned i = 0; i < argCount; ++i) {
if (i > 0) {
os << ", ";
}
diff --git a/lib/SILOptimizer/IPO/CapturePromotion.cpp b/lib/SILOptimizer/IPO/CapturePromotion.cpp
index 7e681f2..48a7ff5 100644
--- a/lib/SILOptimizer/IPO/CapturePromotion.cpp
+++ b/lib/SILOptimizer/IPO/CapturePromotion.cpp
@@ -1212,10 +1212,12 @@
/// Change the base in mark_dependence.
static void
mapMarkDependenceArguments(SingleValueInstruction *root,
- llvm::DenseMap<SILValue, SILValue> &map) {
- for (auto *Use : root->getUses()) {
+ llvm::DenseMap<SILValue, SILValue> &map,
+ SmallVectorImpl<SILInstruction *> &Delete) {
+ SmallVector<Operand *, 16> Uses(root->getUses());
+ for (auto *Use : Uses) {
if (auto *MD = dyn_cast<MarkDependenceInst>(Use->getUser())) {
- mapMarkDependenceArguments(MD, map);
+ mapMarkDependenceArguments(MD, map, Delete);
auto iter = map.find(MD->getBase());
if (iter != map.end()) {
MD->setBase(iter->second);
@@ -1223,7 +1225,7 @@
// Remove mark_dependence on trivial values.
if (MD->getBase()->getType().isTrivial(MD->getModule())) {
MD->replaceAllUsesWith(MD->getValue());
- MD->eraseFromParent();
+ Delete.push_back(MD);
}
}
}
@@ -1332,7 +1334,10 @@
}
}
// Map the mark dependence arguments.
- mapMarkDependenceArguments(NewPAI, capturedMap);
+ SmallVector<SILInstruction *, 16> Delete;
+ mapMarkDependenceArguments(NewPAI, capturedMap, Delete);
+ for (auto *inst : Delete)
+ inst->eraseFromParent();
}
return ClonedFn;
diff --git a/lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp b/lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp
index dc87f64..06a16fc 100644
--- a/lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp
+++ b/lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp
@@ -238,45 +238,39 @@
continue;
}
-#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
- if (isa<Load##Name##Inst>(User)) { \
- Uses.emplace_back(User, PMOUseKind::Load); \
- continue; \
- }
-#include "swift/AST/ReferenceStorage.def"
-
// Stores *to* the allocation are writes.
- if (isa<StoreInst>(User) && UI->getOperandNumber() == 1) {
- if (PointeeType.is<TupleType>()) {
- UsesToScalarize.push_back(User);
+ if (auto *si = dyn_cast<StoreInst>(User)) {
+ if (UI->getOperandNumber() == StoreInst::Dest) {
+ if (PointeeType.is<TupleType>()) {
+ UsesToScalarize.push_back(User);
+ continue;
+ }
+
+ auto kind = ([&]() -> PMOUseKind {
+ switch (si->getOwnershipQualifier()) {
+ // Coming out of SILGen, we assume that raw stores are
+ // initializations, unless they have trivial type (which we classify
+ // as InitOrAssign).
+ case StoreOwnershipQualifier::Unqualified:
+ if (PointeeType.isTrivial(User->getModule()))
+ return PMOUseKind::InitOrAssign;
+ return PMOUseKind::Initialization;
+
+ case StoreOwnershipQualifier::Init:
+ return PMOUseKind::Initialization;
+
+ case StoreOwnershipQualifier::Assign:
+ return PMOUseKind::Assign;
+
+ case StoreOwnershipQualifier::Trivial:
+ return PMOUseKind::InitOrAssign;
+ }
+ })();
+ Uses.emplace_back(si, kind);
continue;
}
-
- // Coming out of SILGen, we assume that raw stores are initializations,
- // unless they have trivial type (which we classify as InitOrAssign).
- auto Kind = ([&]() -> PMOUseKind {
- if (PointeeType.isTrivial(User->getModule()))
- return PMOUseKind::InitOrAssign;
- return PMOUseKind::Initialization;
- })();
- Uses.emplace_back(User, Kind);
- continue;
}
-#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
- if (auto *SI = dyn_cast<Store##Name##Inst>(User)) { \
- if (UI->getOperandNumber() == 1) { \
- PMOUseKind Kind; \
- if (SI->isInitializationOfDest()) \
- Kind = PMOUseKind::Initialization; \
- else \
- Kind = PMOUseKind::Assign; \
- Uses.emplace_back(User, Kind); \
- continue; \
- } \
- }
-#include "swift/AST/ReferenceStorage.def"
-
if (auto *CAI = dyn_cast<CopyAddrInst>(User)) {
// If this is a copy of a tuple, we should scalarize it so that we don't
// have an access that crosses elements.
@@ -294,6 +288,8 @@
auto Kind = ([&]() -> PMOUseKind {
if (UI->getOperandNumber() == CopyAddrInst::Src)
return PMOUseKind::Load;
+ if (PointeeType.isTrivial(CAI->getModule()))
+ return PMOUseKind::InitOrAssign;
if (CAI->isInitializationOfDest())
return PMOUseKind::Initialization;
return PMOUseKind::Assign;
diff --git a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp
index 283f43f..780a8a3 100644
--- a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp
+++ b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp
@@ -13,6 +13,9 @@
#define DEBUG_TYPE "predictable-memopt"
#include "PMOMemoryUseCollector.h"
+#include "swift/SIL/BasicBlockUtils.h"
+#include "swift/SIL/BranchPropagatedUser.h"
+#include "swift/SIL/OwnershipUtils.h"
#include "swift/SIL/SILBuilder.h"
#include "swift/SILOptimizer/PassManager/Passes.h"
#include "swift/SILOptimizer/PassManager/Transforms.h"
@@ -162,13 +165,12 @@
struct AvailableValue {
friend class AvailableValueAggregator;
- /// If this gets too expensive in terms of copying, we can use an arena and a
- /// FrozenPtrSet like we do in ARC.
- using SetVector = llvm::SmallSetVector<SILInstruction *, 1>;
-
SILValue Value;
unsigned SubElementNumber;
- SetVector InsertionPoints;
+
+ /// If this gets too expensive in terms of copying, we can use an arena and a
+ /// FrozenPtrSet like we do in ARC.
+ SmallSetVector<StoreInst *, 1> InsertionPoints;
/// Just for updating.
SmallVectorImpl<PMOMemoryUse> *Uses;
@@ -181,7 +183,7 @@
/// *NOTE* We assume that all available values start with a singular insertion
/// point and insertion points are added by merging.
AvailableValue(SILValue Value, unsigned SubElementNumber,
- SILInstruction *InsertPoint)
+ StoreInst *InsertPoint)
: Value(Value), SubElementNumber(SubElementNumber), InsertionPoints() {
InsertionPoints.insert(InsertPoint);
}
@@ -221,7 +223,7 @@
SILValue getValue() const { return Value; }
SILType getType() const { return Value->getType(); }
unsigned getSubElementNumber() const { return SubElementNumber; }
- ArrayRef<SILInstruction *> getInsertionPoints() const {
+ ArrayRef<StoreInst *> getInsertionPoints() const {
return InsertionPoints.getArrayRef();
}
@@ -230,16 +232,14 @@
InsertionPoints.set_union(Other.InsertionPoints);
}
- void addInsertionPoint(SILInstruction *I) & { InsertionPoints.insert(I); }
+ void addInsertionPoint(StoreInst *I) & { InsertionPoints.insert(I); }
- /// TODO: This needs a better name.
AvailableValue emitStructExtract(SILBuilder &B, SILLocation Loc, VarDecl *D,
unsigned SubElementNumber) const {
SILValue NewValue = B.emitStructExtract(Loc, Value, D);
return {NewValue, SubElementNumber, InsertionPoints};
}
- /// TODO: This needs a better name.
AvailableValue emitTupleExtract(SILBuilder &B, SILLocation Loc,
unsigned EltNo,
unsigned SubElementNumber) const {
@@ -247,13 +247,25 @@
return {NewValue, SubElementNumber, InsertionPoints};
}
+ AvailableValue emitBeginBorrow(SILBuilder &b, SILLocation loc) const {
+ // If we do not have ownership or already are guaranteed, just return a copy
+ // of our state.
+ if (!b.hasOwnership() || Value.getOwnershipKind().isCompatibleWith(
+ ValueOwnershipKind::Guaranteed)) {
+ return {Value, SubElementNumber, InsertionPoints};
+ }
+
+ // Otherwise, return newValue.
+ return {b.createBeginBorrow(loc, Value), SubElementNumber, InsertionPoints};
+ }
+
void dump() const LLVM_ATTRIBUTE_USED;
void print(llvm::raw_ostream &os) const;
private:
/// Private constructor.
AvailableValue(SILValue Value, unsigned SubElementNumber,
- const SetVector &InsertPoints)
+ const decltype(InsertionPoints) &InsertPoints)
: Value(Value), SubElementNumber(SubElementNumber),
InsertionPoints(InsertPoints) {}
};
@@ -304,10 +316,17 @@
SILType EltTy = ValTy.getTupleElementType(EltNo);
unsigned NumSubElt = getNumSubElements(EltTy, B.getModule());
if (SubElementNumber < NumSubElt) {
- auto NewVal = Val.emitTupleExtract(B, Loc, EltNo, SubElementNumber);
- return nonDestructivelyExtractSubElement(NewVal, B, Loc);
+ auto BorrowedVal = Val.emitBeginBorrow(B, Loc);
+ auto NewVal =
+ BorrowedVal.emitTupleExtract(B, Loc, EltNo, SubElementNumber);
+ SILValue result = nonDestructivelyExtractSubElement(NewVal, B, Loc);
+ // If our original value wasn't guaranteed and we did actually perform a
+ // borrow as a result, insert the end_borrow.
+ if (BorrowedVal.getValue() != Val.getValue())
+ B.createEndBorrow(Loc, BorrowedVal.getValue());
+ return result;
}
-
+
SubElementNumber -= NumSubElt;
}
@@ -321,8 +340,15 @@
unsigned NumSubElt = getNumSubElements(fieldType, B.getModule());
if (SubElementNumber < NumSubElt) {
- auto NewVal = Val.emitStructExtract(B, Loc, D, SubElementNumber);
- return nonDestructivelyExtractSubElement(NewVal, B, Loc);
+ auto BorrowedVal = Val.emitBeginBorrow(B, Loc);
+ auto NewVal =
+ BorrowedVal.emitStructExtract(B, Loc, D, SubElementNumber);
+ SILValue result = nonDestructivelyExtractSubElement(NewVal, B, Loc);
+ // If our original value wasn't guaranteed and we did actually perform a
+ // borrow as a result, insert the end_borrow.
+ if (BorrowedVal.getValue() != Val.getValue())
+ B.createEndBorrow(Loc, BorrowedVal.getValue());
+ return result;
}
SubElementNumber -= NumSubElt;
@@ -330,10 +356,16 @@
}
llvm_unreachable("Didn't find field");
}
-
- // Otherwise, we're down to a scalar.
+
+ // Otherwise, we're down to a scalar. If we have ownership enabled,
+ // we return a copy. Otherwise, there we can ignore ownership
+ // issues. This is ok since in [ossa] we are going to eliminate a
+ // load [copy] or a load [trivial], while in non-[ossa] SIL we will
+ // be replacing unqualified loads.
assert(SubElementNumber == 0 && "Miscalculation indexing subelements");
- return Val.getValue();
+ if (!B.hasOwnership())
+ return Val.getValue();
+ return B.emitCopyValueOperation(Loc, Val.getValue());
}
//===----------------------------------------------------------------------===//
@@ -361,13 +393,23 @@
SILLocation Loc;
MutableArrayRef<AvailableValue> AvailableValueList;
SmallVectorImpl<PMOMemoryUse> &Uses;
+ DeadEndBlocks &deadEndBlocks;
+ bool isTake;
+
+ /// Keep track of all instructions that we have added. Once we are done
+ /// promoting a value, we need to make sure that if we need to balance any
+ /// copies (to avoid leaks), we do so. This is not used if we are performing a
+ /// take.
+ SmallVector<SILInstruction *, 16> insertedInsts;
public:
AvailableValueAggregator(SILInstruction *Inst,
MutableArrayRef<AvailableValue> AvailableValueList,
- SmallVectorImpl<PMOMemoryUse> &Uses)
+ SmallVectorImpl<PMOMemoryUse> &Uses,
+ DeadEndBlocks &deadEndBlocks, bool isTake)
: M(Inst->getModule()), B(Inst), Loc(Inst->getLoc()),
- AvailableValueList(AvailableValueList), Uses(Uses) {}
+ AvailableValueList(AvailableValueList), Uses(Uses),
+ deadEndBlocks(deadEndBlocks), isTake(isTake) {}
// This is intended to be passed by reference only once constructed.
AvailableValueAggregator(const AvailableValueAggregator &) = delete;
@@ -376,19 +418,26 @@
operator=(const AvailableValueAggregator &) = delete;
AvailableValueAggregator &operator=(AvailableValueAggregator &&) = delete;
- SILValue aggregateValues(SILType LoadTy, SILValue Address, unsigned FirstElt);
+ SILValue aggregateValues(SILType LoadTy, SILValue Address, unsigned FirstElt,
+ bool isTopLevel = true);
+ bool canTake(SILType loadTy, unsigned firstElt) const;
+
+ /// If as a result of us copying values, we may have unconsumed destroys, find
+ /// the appropriate location and place the values there. Only used when
+ /// ownership is enabled.
+ void addMissingDestroysForCopiedValues(LoadInst *li);
void print(llvm::raw_ostream &os) const;
void dump() const LLVM_ATTRIBUTE_USED;
-
private:
- SILValue aggregateFullyAvailableValue(SILType LoadTy, unsigned FirstElt);
- SILValue aggregateTupleSubElts(TupleType *TT, SILType LoadTy,
- SILValue Address, unsigned FirstElt);
- SILValue aggregateStructSubElts(StructDecl *SD, SILType LoadTy,
- SILValue Address, unsigned FirstElt);
- SILValue handlePrimitiveValue(SILType LoadTy, SILValue Address,
- unsigned FirstElt);
+ SILValue aggregateFullyAvailableValue(SILType loadTy, unsigned firstElt);
+ SILValue aggregateTupleSubElts(TupleType *tt, SILType loadTy,
+ SILValue address, unsigned firstElt);
+ SILValue aggregateStructSubElts(StructDecl *sd, SILType loadTy,
+ SILValue address, unsigned firstElt);
+ SILValue handlePrimitiveValue(SILType loadTy, SILValue address,
+ unsigned firstElt);
+ bool isFullyAvailable(SILType loadTy, unsigned firstElt) const;
};
} // end anonymous namespace
@@ -403,11 +452,78 @@
}
}
+bool AvailableValueAggregator::isFullyAvailable(SILType loadTy,
+ unsigned firstElt) const {
+ if (firstElt >= AvailableValueList.size()) { // #Elements may be zero.
+ return false;
+ }
+
+ auto &firstVal = AvailableValueList[firstElt];
+
+ // Make sure that the first element is available and is the correct type.
+ if (!firstVal || firstVal.getType() != loadTy)
+ return false;
+
+ return llvm::all_of(range(getNumSubElements(loadTy, M)),
+ [&](unsigned index) -> bool {
+ auto &val = AvailableValueList[firstElt + index];
+ return val.getValue() == firstVal.getValue() &&
+ val.getSubElementNumber() == index;
+ });
+}
+
+// We can only take if we never have to split a larger value to promote this
+// address.
+bool AvailableValueAggregator::canTake(SILType loadTy,
+ unsigned firstElt) const {
+ // If we do not have ownership, we can always take since we do not need to
+ // keep any ownership invariants up to date. In the future, we should be able
+ // to chop up larger values before they are being stored.
+ if (!B.hasOwnership())
+ return true;
+
+ // If we are trivially fully available, just return true.
+ if (isFullyAvailable(loadTy, firstElt))
+ return true;
+
+ // Otherwise see if we are an aggregate with fully available leaf types.
+ if (TupleType *tt = loadTy.getAs<TupleType>()) {
+ return llvm::all_of(indices(tt->getElements()), [&](unsigned eltNo) {
+ SILType eltTy = loadTy.getTupleElementType(eltNo);
+ unsigned numSubElt = getNumSubElements(eltTy, M);
+ bool success = canTake(eltTy, firstElt);
+ firstElt += numSubElt;
+ return success;
+ });
+ }
+
+ if (auto *sd = getFullyReferenceableStruct(loadTy)) {
+ return llvm::all_of(sd->getStoredProperties(), [&](VarDecl *decl) -> bool {
+ SILType eltTy = loadTy.getFieldType(decl, M);
+ unsigned numSubElt = getNumSubElements(eltTy, M);
+ bool success = canTake(eltTy, firstElt);
+ firstElt += numSubElt;
+ return success;
+ });
+ }
+
+ // Otherwise, fail. The value is not fully available at its leafs. We can not
+ // perform a take.
+ return false;
+}
+
/// Given a bunch of primitive subelement values, build out the right aggregate
/// type (LoadTy) by emitting tuple and struct instructions as necessary.
SILValue AvailableValueAggregator::aggregateValues(SILType LoadTy,
SILValue Address,
- unsigned FirstElt) {
+ unsigned FirstElt,
+ bool isTopLevel) {
+ // If we are performing a take, make sure that we have available values for
+ // /all/ of our values. Otherwise, bail.
+ if (isTopLevel && isTake && !canTake(LoadTy, FirstElt)) {
+ return SILValue();
+ }
+
// Check to see if the requested value is fully available, as an aggregate.
// This is a super-common case for single-element structs, but is also a
// general answer for arbitrary structs and tuples as well.
@@ -432,29 +548,72 @@
// aggregate. This is a super-common case for single-element structs, but is
// also a general answer for arbitrary structs and tuples as well.
SILValue
-AvailableValueAggregator::aggregateFullyAvailableValue(SILType LoadTy,
- unsigned FirstElt) {
- if (FirstElt >= AvailableValueList.size()) { // #Elements may be zero.
+AvailableValueAggregator::aggregateFullyAvailableValue(SILType loadTy,
+ unsigned firstElt) {
+ // Check if our underlying type is fully available. If it isn't, bail.
+ if (!isFullyAvailable(loadTy, firstElt))
return SILValue();
+
+ // Ok, grab out first value. (note: any actually will do).
+ auto &firstVal = AvailableValueList[firstElt];
+
+ // Ok, we know that all of our available values are all parts of the same
+ // value. Without ownership, we can just return the underlying first value.
+ if (!B.hasOwnership())
+ return firstVal.getValue();
+
+ // Otherwise, we need to put in a copy. This is b/c we only propagate along +1
+ // values and we are eliminating a load [copy].
+ ArrayRef<StoreInst *> insertPts = firstVal.getInsertionPoints();
+ if (insertPts.size() == 1) {
+ // Use the scope and location of the store at the insertion point.
+ SILBuilderWithScope builder(insertPts[0], &insertedInsts);
+ SILLocation loc = insertPts[0]->getLoc();
+ // If we have a take, just return the value.
+ if (isTake)
+ return firstVal.getValue();
+ // Otherwise, return a copy of the value.
+ return builder.emitCopyValueOperation(loc, firstVal.getValue());
}
- auto &FirstVal = AvailableValueList[FirstElt];
+ // If we have multiple insertion points, put copies at each point and use the
+ // SSA updater to get a value. The reason why this is safe is that we can only
+ // have multiple insertion points if we are storing exactly the same value
+ // implying that we can just copy firstVal at each insertion point.
+ SILSSAUpdater updater(B.getModule());
+ updater.Initialize(loadTy);
- // Make sure that the first element is available and is the correct type.
- if (!FirstVal || FirstVal.getType() != LoadTy)
- return SILValue();
+ Optional<SILValue> singularValue;
+ for (auto *insertPt : insertPts) {
+ // Use the scope and location of the store at the insertion point.
+ SILBuilderWithScope builder(insertPt, &insertedInsts);
+ SILLocation loc = insertPt->getLoc();
+ SILValue eltVal = firstVal.getValue();
- // If the first element of this value is available, check that any extra
- // available values are from the same place as our first value.
- if (llvm::any_of(range(getNumSubElements(LoadTy, M)),
- [&](unsigned Index) -> bool {
- auto &Val = AvailableValueList[FirstElt + Index];
- return Val.getValue() != FirstVal.getValue() ||
- Val.getSubElementNumber() != Index;
- }))
- return SILValue();
+ // If we are not taking, copy the element value.
+ if (!isTake) {
+ eltVal = builder.emitCopyValueOperation(loc, eltVal);
+ }
- return FirstVal.getValue();
+ if (!singularValue.hasValue()) {
+ singularValue = eltVal;
+ } else if (*singularValue != eltVal) {
+ singularValue = SILValue();
+ }
+
+ // And then put the value into the SSA updater.
+ updater.AddAvailableValue(insertPt->getParent(), eltVal);
+ }
+
+ // If we only are tracking a singular value, we do not need to construct
+ // SSA. Just return that value.
+ if (auto val = singularValue.getValueOr(SILValue()))
+ return val;
+
+ // Finally, grab the value from the SSA updater.
+ SILValue result = updater.GetValueInMiddleOfBlock(B.getInsertionBB());
+ assert(result.getOwnershipKind().isCompatibleWith(ValueOwnershipKind::Owned));
+ return result;
}
SILValue AvailableValueAggregator::aggregateTupleSubElts(TupleType *TT,
@@ -470,86 +629,166 @@
// If we are missing any of the available values in this struct element,
// compute an address to load from.
SILValue EltAddr;
- if (anyMissing(FirstElt, NumSubElt, AvailableValueList))
+ if (anyMissing(FirstElt, NumSubElt, AvailableValueList)) {
+ assert(!isTake && "When taking, values should never be missing?!");
EltAddr =
B.createTupleElementAddr(Loc, Address, EltNo, EltTy.getAddressType());
+ }
- ResultElts.push_back(aggregateValues(EltTy, EltAddr, FirstElt));
+ ResultElts.push_back(
+ aggregateValues(EltTy, EltAddr, FirstElt, /*isTopLevel*/ false));
FirstElt += NumSubElt;
}
return B.createTuple(Loc, LoadTy, ResultElts);
}
-SILValue AvailableValueAggregator::aggregateStructSubElts(StructDecl *SD,
- SILType LoadTy,
- SILValue Address,
- unsigned FirstElt) {
- SmallVector<SILValue, 4> ResultElts;
+SILValue AvailableValueAggregator::aggregateStructSubElts(StructDecl *sd,
+ SILType loadTy,
+ SILValue address,
+ unsigned firstElt) {
+ SmallVector<SILValue, 4> resultElts;
- for (auto *FD : SD->getStoredProperties()) {
- SILType EltTy = LoadTy.getFieldType(FD, M);
- unsigned NumSubElt = getNumSubElements(EltTy, M);
+ for (auto *decl : sd->getStoredProperties()) {
+ SILType eltTy = loadTy.getFieldType(decl, M);
+ unsigned numSubElt = getNumSubElements(eltTy, M);
// If we are missing any of the available values in this struct element,
// compute an address to load from.
- SILValue EltAddr;
- if (anyMissing(FirstElt, NumSubElt, AvailableValueList))
- EltAddr =
- B.createStructElementAddr(Loc, Address, FD, EltTy.getAddressType());
+ SILValue eltAddr;
+ if (anyMissing(firstElt, numSubElt, AvailableValueList)) {
+ assert(!isTake && "When taking, values should never be missing?!");
+ eltAddr =
+ B.createStructElementAddr(Loc, address, decl, eltTy.getAddressType());
+ }
- ResultElts.push_back(aggregateValues(EltTy, EltAddr, FirstElt));
- FirstElt += NumSubElt;
+ resultElts.push_back(
+ aggregateValues(eltTy, eltAddr, firstElt, /*isTopLevel*/ false));
+ firstElt += numSubElt;
}
- return B.createStruct(Loc, LoadTy, ResultElts);
+
+ return B.createStruct(Loc, loadTy, resultElts);
}
// We have looked through all of the aggregate values and finally found a
// "primitive value". If the value is available, use it (extracting if we need
// to), otherwise emit a load of the value with the appropriate qualifier.
-SILValue AvailableValueAggregator::handlePrimitiveValue(SILType LoadTy,
- SILValue Address,
- unsigned FirstElt) {
- auto &Val = AvailableValueList[FirstElt];
+SILValue AvailableValueAggregator::handlePrimitiveValue(SILType loadTy,
+ SILValue address,
+ unsigned firstElt) {
+ auto &val = AvailableValueList[firstElt];
// If the value is not available, load the value and update our use list.
- if (!Val) {
- auto *Load =
- B.createLoad(Loc, Address, LoadOwnershipQualifier::Unqualified);
- Uses.emplace_back(Load, PMOUseKind::Load);
- return Load;
+ if (!val) {
+ assert(!isTake && "Should only take fully available values?!");
+ LoadInst *load = ([&]() {
+ if (B.hasOwnership()) {
+ return B.createTrivialLoadOr(Loc, address,
+ LoadOwnershipQualifier::Copy);
+ }
+ return B.createLoad(Loc, address, LoadOwnershipQualifier::Unqualified);
+ }());
+ Uses.emplace_back(load, PMOUseKind::Load);
+ return load;
}
// If we have 1 insertion point, just extract the value and return.
//
// This saves us from having to spend compile time in the SSA updater in this
// case.
- ArrayRef<SILInstruction *> InsertPts = Val.getInsertionPoints();
- if (InsertPts.size() == 1) {
+ ArrayRef<StoreInst *> insertPts = val.getInsertionPoints();
+ if (insertPts.size() == 1) {
// Use the scope and location of the store at the insertion point.
- SILBuilderWithScope Builder(InsertPts[0]);
- SILLocation Loc = InsertPts[0]->getLoc();
- SILValue EltVal = nonDestructivelyExtractSubElement(Val, Builder, Loc);
- assert(EltVal->getType() == LoadTy && "Subelement types mismatch");
- return EltVal;
+ SILBuilderWithScope builder(insertPts[0], &insertedInsts);
+ SILLocation loc = insertPts[0]->getLoc();
+ SILValue eltVal = nonDestructivelyExtractSubElement(val, builder, loc);
+ assert(
+ !builder.hasOwnership() ||
+ eltVal.getOwnershipKind().isCompatibleWith(ValueOwnershipKind::Owned));
+ assert(eltVal->getType() == loadTy && "Subelement types mismatch");
+ return eltVal;
}
// If we have an available value, then we want to extract the subelement from
// the borrowed aggregate before each insertion point.
- SILSSAUpdater Updater(B.getModule());
- Updater.Initialize(LoadTy);
- for (auto *I : Val.getInsertionPoints()) {
+ SILSSAUpdater updater(B.getModule());
+ updater.Initialize(loadTy);
+
+ Optional<SILValue> singularValue;
+ for (auto *i : insertPts) {
// Use the scope and location of the store at the insertion point.
- SILBuilderWithScope Builder(I);
- SILLocation Loc = I->getLoc();
- SILValue EltVal = nonDestructivelyExtractSubElement(Val, Builder, Loc);
- Updater.AddAvailableValue(I->getParent(), EltVal);
+ SILBuilderWithScope builder(i, &insertedInsts);
+ SILLocation loc = i->getLoc();
+ SILValue eltVal = nonDestructivelyExtractSubElement(val, builder, loc);
+ assert(
+ !builder.hasOwnership() ||
+ eltVal.getOwnershipKind().isCompatibleWith(ValueOwnershipKind::Owned));
+
+ if (!singularValue.hasValue()) {
+ singularValue = eltVal;
+ } else if (*singularValue != eltVal) {
+ singularValue = SILValue();
+ }
+
+ updater.AddAvailableValue(i->getParent(), eltVal);
}
+ // If we only are tracking a singular value, we do not need to construct
+ // SSA. Just return that value.
+ if (auto val = singularValue.getValueOr(SILValue()))
+ return val;
+
// Finally, grab the value from the SSA updater.
- SILValue EltVal = Updater.GetValueInMiddleOfBlock(B.getInsertionBB());
- assert(EltVal->getType() == LoadTy && "Subelement types mismatch");
- return EltVal;
+ SILValue eltVal = updater.GetValueInMiddleOfBlock(B.getInsertionBB());
+ assert(!B.hasOwnership() ||
+ eltVal.getOwnershipKind().isCompatibleWith(ValueOwnershipKind::Owned));
+ assert(eltVal->getType() == loadTy && "Subelement types mismatch");
+ return eltVal;
+}
+
+void AvailableValueAggregator::addMissingDestroysForCopiedValues(LoadInst *li) {
+ // If ownership is not enabled... bail. We do not need to do this since we do
+ // not need to insert an extra copy unless we have ownership since without
+ // ownership stores do not consume.
+ if (!B.hasOwnership())
+ return;
+
+ SmallVector<BranchPropagatedUser, 1> consumingUses;
+ SmallPtrSet<SILBasicBlock *, 8> visitedBlocks;
+ SmallVector<SILBasicBlock *, 8> leakingBlocks;
+ while (!insertedInsts.empty()) {
+ auto *cvi = dyn_cast<CopyValueInst>(insertedInsts.pop_back_val());
+ if (!cvi)
+ continue;
+
+ // Clear our worklist.
+ consumingUses.clear();
+ visitedBlocks.clear();
+ leakingBlocks.clear();
+
+ // The linear lifetime checker doesn't care if the passed in load is
+ // actually a user of our copy_value. What we care about is that the load is
+ // guaranteed to be in the block where we have reformed the tuple in a
+ // consuming manner. This means if we add it as the consuming use of the
+ // copy, we can find the leaking places if any exist.
+ consumingUses.push_back(li);
+
+ // Then perform the linear lifetime check. If we succeed, continue. We have
+ // no further work to do.
+ auto errorKind =
+ ownership::ErrorBehaviorKind::ReturnFalseOnLeakAssertOtherwise;
+ if (valueHasLinearLifetime(cvi, consumingUses, {}, visitedBlocks,
+ deadEndBlocks, errorKind, &leakingBlocks))
+ continue;
+
+ // Ok, we found some leaking blocks. Insert destroys at the
+ // beginning of these blocks for our copy_value.
+ auto loc = RegularLocation::getAutoGeneratedLocation();
+ for (auto *bb : leakingBlocks) {
+ SILBuilderWithScope b(bb->begin());
+ b.emitDestroyValueOperation(loc, cvi);
+ }
+ }
}
//===----------------------------------------------------------------------===//
@@ -571,7 +810,7 @@
/// The set of uses that we are tracking. This is only here so we can update
/// when exploding copy_addr. It would be great if we did not have to store
/// this.
- llvm::SmallVectorImpl<PMOMemoryUse> &Uses;
+ SmallVectorImpl<PMOMemoryUse> &Uses;
/// The set of blocks with local definitions.
///
@@ -590,7 +829,7 @@
public:
AvailableValueDataflowContext(AllocationInst *TheMemory,
unsigned NumMemorySubElements,
- llvm::SmallVectorImpl<PMOMemoryUse> &Uses);
+ SmallVectorImpl<PMOMemoryUse> &Uses);
/// Try to compute available values for "TheMemory" at the instruction \p
/// StartingFrom. We only compute the values for set bits in \p
@@ -669,32 +908,33 @@
assert(StartSubElt != ~0U && "Store within enum projection not handled");
SILType ValTy = SI->getSrc()->getType();
- for (unsigned i = 0, e = getNumSubElements(ValTy, getModule()); i != e;
- ++i) {
+ for (unsigned i : range(getNumSubElements(ValTy, getModule()))) {
// If this element is not required, don't fill it in.
if (!RequiredElts[StartSubElt+i]) continue;
-
+
+ // This element is now provided.
+ RequiredElts[StartSubElt + i] = false;
+
// If there is no result computed for this subelement, record it. If
// there already is a result, check it for conflict. If there is no
// conflict, then we're ok.
auto &Entry = Result[StartSubElt+i];
if (!Entry) {
- Entry = {SI->getSrc(), i, Inst};
- } else {
- // TODO: This is /really/, /really/, conservative. This basically means
- // that if we do not have an identical store, we will not promote.
- if (Entry.getValue() != SI->getSrc() ||
- Entry.getSubElementNumber() != i) {
- ConflictingValues[StartSubElt + i] = true;
- } else {
- Entry.addInsertionPoint(Inst);
- }
+ Entry = {SI->getSrc(), i, SI};
+ continue;
}
- // This element is now provided.
- RequiredElts[StartSubElt+i] = false;
+ // TODO: This is /really/, /really/, conservative. This basically means
+ // that if we do not have an identical store, we will not promote.
+ if (Entry.getValue() != SI->getSrc() ||
+ Entry.getSubElementNumber() != i) {
+ ConflictingValues[StartSubElt + i] = true;
+ continue;
+ }
+
+ Entry.addInsertionPoint(SI);
}
-
+
return;
}
@@ -707,8 +947,7 @@
SILType ValTy = CAI->getDest()->getType();
bool AnyRequired = false;
- for (unsigned i = 0, e = getNumSubElements(ValTy, getModule()); i != e;
- ++i) {
+ for (unsigned i : range(getNumSubElements(ValTy, getModule()))) {
// If this element is not required, don't fill it in.
AnyRequired = RequiredElts[StartSubElt+i];
if (AnyRequired) break;
@@ -911,7 +1150,7 @@
assert((LoadUse.isValid() || StoreUse.isValid()) &&
"we should have a load or a store, possibly both");
assert(StoreUse.isInvalid() || StoreUse.Kind == Assign ||
- StoreUse.Kind == Initialization);
+ StoreUse.Kind == Initialization || StoreUse.Kind == InitOrAssign);
// Now that we've emitted a bunch of instructions, including a load and store
// but also including other stuff, update the internal state of
@@ -930,6 +1169,12 @@
// something else), track it as an access.
if (StoreUse.isValid()) {
StoreUse.Inst = NewInst;
+ // If our store use by the copy_addr is an assign, then we know that
+ // before we store the new value, we loaded the old value implying that
+ // our store is technically initializing memory when it occurs. So
+ // change the kind to Initialization.
+ if (StoreUse.Kind == Assign)
+ StoreUse.Kind = Initialization;
NonLoadUses[NewInst] = Uses.size();
Uses.push_back(StoreUse);
}
@@ -948,11 +1193,6 @@
}
continue;
-#define ALWAYS_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
- case SILInstructionKind::Name##RetainInst: \
- case SILInstructionKind::Name##ReleaseInst: \
- case SILInstructionKind::StrongRetain##Name##Inst:
-#include "swift/AST/ReferenceStorage.def"
case SILInstructionKind::RetainValueInst:
case SILInstructionKind::StrongRetainInst:
case SILInstructionKind::StrongReleaseInst:
@@ -1016,16 +1256,19 @@
SmallVectorImpl<PMOMemoryUse> &Uses;
SmallVectorImpl<SILInstruction *> &Releases;
+ DeadEndBlocks &deadEndBlocks;
+
/// A structure that we use to compute our available values.
AvailableValueDataflowContext DataflowContext;
public:
AllocOptimize(AllocationInst *memory, SmallVectorImpl<PMOMemoryUse> &uses,
- SmallVectorImpl<SILInstruction *> &releases)
+ SmallVectorImpl<SILInstruction *> &releases,
+ DeadEndBlocks &deadEndBlocks)
: Module(memory->getModule()), TheMemory(memory),
MemoryType(getMemoryType(memory)),
NumMemorySubElements(getNumSubElements(MemoryType, Module)), Uses(uses),
- Releases(releases),
+ Releases(releases), deadEndBlocks(deadEndBlocks),
DataflowContext(TheMemory, NumMemorySubElements, uses) {}
bool optimizeMemoryAccesses();
@@ -1033,10 +1276,10 @@
private:
bool promoteLoad(SILInstruction *Inst);
- void promoteDestroyAddr(DestroyAddrInst *DAI,
- MutableArrayRef<AvailableValue> Values);
- bool canPromoteDestroyAddr(DestroyAddrInst *DAI,
- SmallVectorImpl<AvailableValue> &AvailableValues);
+ void promoteDestroyAddr(DestroyAddrInst *dai,
+ MutableArrayRef<AvailableValue> values);
+ bool canPromoteDestroyAddr(DestroyAddrInst *dai,
+ SmallVectorImpl<AvailableValue> &availableValues);
};
} // end anonymous namespace
@@ -1046,9 +1289,11 @@
/// instruction is loading from. If we can not optimize \p Inst, then just
/// return an empty SILValue.
static SILValue tryFindSrcAddrForLoad(SILInstruction *Inst) {
- // We only handle load [copy], load [trivial] and copy_addr right now.
+ // We only handle load [copy], load [trivial], load and copy_addr right
+ // now. Notably we do not support load [take] when promoting loads.
if (auto *LI = dyn_cast<LoadInst>(Inst))
- return LI->getOperand();
+ if (LI->getOwnershipQualifier() != LoadOwnershipQualifier::Take)
+ return LI->getOperand();
// If this is a CopyAddr, verify that the element type is loadable. If not,
// we can't explode to a load.
@@ -1125,9 +1370,18 @@
// type as the load did, and emit smaller loads for any subelements that were
// not available.
auto *Load = cast<LoadInst>(Inst);
- AvailableValueAggregator Agg(Load, AvailableValues, Uses);
+ AvailableValueAggregator Agg(Load, AvailableValues, Uses, deadEndBlocks,
+ false /*isTake*/);
SILValue NewVal = Agg.aggregateValues(LoadTy, Load->getOperand(), FirstElt);
+ // If we inserted any copies, we created the copies at our stores. We know
+ // that in our load block, we will reform the aggregate as appropriate at the
+ // load implying that the value /must/ be fully consumed. Thus any leaking
+ // blocks that we may have can be found by performing a linear lifetime check
+ // over all copies that we found using the load as the "consuming uses" (just
+ // for the purposes of identifying the consuming block).
+ Agg.addMissingDestroysForCopiedValues(Load);
+
++NumLoadPromoted;
// Simply replace the load.
@@ -1144,78 +1398,90 @@
/// Return true if we can promote the given destroy.
bool AllocOptimize::canPromoteDestroyAddr(
- DestroyAddrInst *DAI, SmallVectorImpl<AvailableValue> &AvailableValues) {
- SILValue Address = DAI->getOperand();
+ DestroyAddrInst *dai, SmallVectorImpl<AvailableValue> &availableValues) {
+ SILValue address = dai->getOperand();
// We cannot promote destroys of address-only types, because we can't expose
// the load.
- SILType LoadTy = Address->getType().getObjectType();
- if (LoadTy.isAddressOnly(Module))
+ SILType loadTy = address->getType().getObjectType();
+ if (loadTy.isAddressOnly(Module))
return false;
// If the box has escaped at this instruction, we can't safely promote the
// load.
- if (DataflowContext.hasEscapedAt(DAI))
+ if (DataflowContext.hasEscapedAt(dai))
return false;
// Compute the access path down to the field so we can determine precise
// def/use behavior.
- unsigned FirstElt = computeSubelement(Address, TheMemory);
- assert(FirstElt != ~0U && "destroy within enum projection is not valid");
- unsigned NumLoadSubElements = getNumSubElements(LoadTy, Module);
-
- // Set up the bitvector of elements being demanded by the load.
- SmallBitVector RequiredElts(NumMemorySubElements);
- RequiredElts.set(FirstElt, FirstElt+NumLoadSubElements);
+ unsigned firstElt = computeSubelement(address, TheMemory);
+ assert(firstElt != ~0U && "destroy within enum projection is not valid");
+ unsigned numLoadSubElements = getNumSubElements(loadTy, Module);
// Find out if we have any available values. If no bits are demanded, we
// trivially succeed. This can happen when there is a load of an empty struct.
- if (NumLoadSubElements == 0)
+ if (numLoadSubElements == 0)
return true;
+ // Set up the bitvector of elements being demanded by the load.
+ SmallBitVector requiredElts(NumMemorySubElements);
+ requiredElts.set(firstElt, firstElt + numLoadSubElements);
+
// Compute our available values. If we do not have any available values,
// return false. We have nothing further to do.
- llvm::SmallVector<AvailableValue, 8> TmpList;
- TmpList.resize(NumMemorySubElements);
- if (!DataflowContext.computeAvailableValues(DAI, FirstElt, NumLoadSubElements,
- RequiredElts, TmpList))
+ SmallVector<AvailableValue, 8> tmpList;
+ tmpList.resize(NumMemorySubElements);
+ if (!DataflowContext.computeAvailableValues(dai, firstElt, numLoadSubElements,
+ requiredElts, tmpList))
return false;
- // Now that we have our final list, move the temporary lists contents into
- // AvailableValues.
- std::move(TmpList.begin(), TmpList.end(),
- std::back_inserter(AvailableValues));
+ // Now check that we can perform a take upon our available values. This
+ // implies today that our value is fully available. If the value is not fully
+ // available, we would need to split stores to promote this destroy_addr. We
+ // do not support that yet.
+ AvailableValueAggregator agg(dai, tmpList, Uses, deadEndBlocks,
+ true /*isTake*/);
+ if (!agg.canTake(loadTy, firstElt))
+ return false;
+
+ // Ok, we can promote this destroy_addr... move the temporary lists contents
+ // into the final AvailableValues list.
+ std::move(tmpList.begin(), tmpList.end(),
+ std::back_inserter(availableValues));
return true;
}
-/// promoteDestroyAddr - DestroyAddr is a composed operation merging
-/// load+strong_release. If the implicit load's value is available, explode it.
-///
-/// Note that we handle the general case of a destroy_addr of a piece of the
-/// memory object, not just destroy_addrs of the entire thing.
+// DestroyAddr is a composed operation merging load [take] + destroy_value. If
+// the implicit load's value is available, explode it.
+//
+// NOTE: We only do this if we have a fully available value.
+//
+// Note that we handle the general case of a destroy_addr of a piece of the
+// memory object, not just destroy_addrs of the entire thing.
void AllocOptimize::promoteDestroyAddr(
- DestroyAddrInst *DAI, MutableArrayRef<AvailableValue> AvailableValues) {
- SILValue Address = DAI->getOperand();
- SILType LoadTy = Address->getType().getObjectType();
+ DestroyAddrInst *dai, MutableArrayRef<AvailableValue> availableValues) {
+ SILValue address = dai->getOperand();
+ SILType loadTy = address->getType().getObjectType();
// Compute the access path down to the field so we can determine precise
// def/use behavior.
- unsigned FirstElt = computeSubelement(Address, TheMemory);
+ unsigned firstElt = computeSubelement(address, TheMemory);
// Aggregate together all of the subelements into something that has the same
// type as the load did, and emit smaller) loads for any subelements that were
// not available.
- AvailableValueAggregator Agg(DAI, AvailableValues, Uses);
- SILValue NewVal = Agg.aggregateValues(LoadTy, Address, FirstElt);
+ AvailableValueAggregator agg(dai, availableValues, Uses, deadEndBlocks,
+ true /*isTake*/);
+ SILValue newVal = agg.aggregateValues(loadTy, address, firstElt);
++NumDestroyAddrPromoted;
-
- LLVM_DEBUG(llvm::dbgs() << " *** Promoting destroy_addr: " << *DAI << "\n");
- LLVM_DEBUG(llvm::dbgs() << " To value: " << *NewVal << "\n");
- SILBuilderWithScope(DAI).emitDestroyValueOperation(DAI->getLoc(), NewVal);
- DAI->eraseFromParent();
+ LLVM_DEBUG(llvm::dbgs() << " *** Promoting destroy_addr: " << *dai << "\n");
+ LLVM_DEBUG(llvm::dbgs() << " To value: " << *newVal << "\n");
+
+ SILBuilderWithScope(dai).emitDestroyValueOperation(dai->getLoc(), newVal);
+ dai->eraseFromParent();
}
namespace {
@@ -1286,6 +1552,9 @@
switch (u.Kind) {
case PMOUseKind::Assign:
+ // Until we can promote the value being destroyed by the assign, we can
+ // not remove deallocations with such assigns.
+ return false;
case PMOUseKind::InitOrAssign:
break; // These don't prevent removal.
case PMOUseKind::Initialization:
@@ -1413,6 +1682,8 @@
static bool optimizeMemoryAccesses(SILFunction &fn) {
bool changed = false;
+ DeadEndBlocks deadEndBlocks(&fn);
+
for (auto &bb : fn) {
auto i = bb.begin(), e = bb.end();
while (i != e) {
@@ -1439,7 +1710,7 @@
continue;
}
- AllocOptimize allocOptimize(alloc, uses, destroys);
+ AllocOptimize allocOptimize(alloc, uses, destroys, deadEndBlocks);
changed |= allocOptimize.optimizeMemoryAccesses();
// Move onto the next instruction. We know this is safe since we do not
@@ -1453,6 +1724,8 @@
static bool eliminateDeadAllocations(SILFunction &fn) {
bool changed = false;
+ DeadEndBlocks deadEndBlocks(&fn);
+
for (auto &bb : fn) {
auto i = bb.begin(), e = bb.end();
while (i != e) {
@@ -1480,7 +1753,7 @@
continue;
}
- AllocOptimize allocOptimize(alloc, uses, destroys);
+ AllocOptimize allocOptimize(alloc, uses, destroys, deadEndBlocks);
changed |= allocOptimize.tryToRemoveDeadAllocation();
// Move onto the next instruction. We know this is safe since we do not
diff --git a/lib/SILOptimizer/Utils/ConstExpr.cpp b/lib/SILOptimizer/Utils/ConstExpr.cpp
index 914bd86..648ccd3 100644
--- a/lib/SILOptimizer/Utils/ConstExpr.cpp
+++ b/lib/SILOptimizer/Utils/ConstExpr.cpp
@@ -287,6 +287,25 @@
return calculatedValues[apply];
}
+ if (auto *enumVal = dyn_cast<EnumInst>(value)) {
+ if (!enumVal->hasOperand())
+ return SymbolicValue::getEnum(enumVal->getElement());
+
+ auto payload = getConstantValue(enumVal->getOperand());
+ if (!payload.isConstant())
+ return payload;
+ return SymbolicValue::getEnumWithPayload(enumVal->getElement(), payload,
+ evaluator.getASTContext());
+ }
+
+ // This one returns the address of its enum payload.
+ if (auto *dai = dyn_cast<UncheckedTakeEnumDataAddrInst>(value)) {
+ auto enumVal = getConstAddrAndLoadResult(dai->getOperand());
+ if (!enumVal.isConstant())
+ return enumVal;
+ return createMemoryObject(value, enumVal.getEnumPayloadValue());
+ }
+
// This instruction is a marker that returns its first operand.
if (auto *bai = dyn_cast<BeginAccessInst>(value))
return getConstantValue(bai->getOperand());
@@ -1244,6 +1263,34 @@
continue;
}
+ if (isa<SwitchEnumAddrInst>(inst) || isa<SwitchEnumInst>(inst)) {
+ SymbolicValue value;
+ SwitchEnumInstBase *switchInst = dyn_cast<SwitchEnumInst>(inst);
+ if (switchInst) {
+ value = state.getConstantValue(switchInst->getOperand());
+ } else {
+ switchInst = cast<SwitchEnumAddrInst>(inst);
+ value = state.getConstAddrAndLoadResult(switchInst->getOperand());
+ }
+ if (!value.isConstant())
+ return value;
+ assert(value.getKind() == SymbolicValue::Enum ||
+ value.getKind() == SymbolicValue::EnumWithPayload);
+ // Set up basic block arguments.
+ auto *caseBB = switchInst->getCaseDestination(value.getEnumValue());
+ if (caseBB->getNumArguments() > 0) {
+ assert(value.getKind() == SymbolicValue::EnumWithPayload);
+ // When there are multiple payload components, they form a single
+ // tuple-typed argument.
+ assert(caseBB->getNumArguments() == 1);
+ auto argument = value.getEnumPayloadValue();
+ assert(argument.isConstant());
+ state.setValue(caseBB->getArgument(0), argument);
+ }
+ nextInst = caseBB->begin();
+ continue;
+ }
+
LLVM_DEBUG(llvm::dbgs()
<< "ConstExpr: Unknown Terminator: " << *inst << "\n");
diff --git a/lib/Sema/CSDiag.cpp b/lib/Sema/CSDiag.cpp
index a0b342d..b5f16b9 100644
--- a/lib/Sema/CSDiag.cpp
+++ b/lib/Sema/CSDiag.cpp
@@ -2113,8 +2113,9 @@
// Only try to insert a converting construction if the protocol is a
// literal protocol and not some other known protocol.
switch (kind) {
-#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, _) \
- case KnownProtocolKind::name: break;
+#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, _, __, ___) \
+ case KnownProtocolKind::name: \
+ break;
#define PROTOCOL_WITH_NAME(name, _) \
case KnownProtocolKind::name: return false;
#include "swift/AST/KnownProtocols.def"
@@ -2136,8 +2137,9 @@
// Only try to insert a converting construction if the protocol is a
// literal protocol and not some other known protocol.
switch (kind) {
-#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, _) \
- case KnownProtocolKind::name: break;
+#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(name, _, __, ___) \
+ case KnownProtocolKind::name: \
+ break;
#define PROTOCOL_WITH_NAME(name, _) \
case KnownProtocolKind::name: return false;
#include "swift/AST/KnownProtocols.def"
diff --git a/lib/Sema/CSSimplify.cpp b/lib/Sema/CSSimplify.cpp
index 6e6002f..984cd19 100644
--- a/lib/Sema/CSSimplify.cpp
+++ b/lib/Sema/CSSimplify.cpp
@@ -5390,11 +5390,14 @@
// Record the fix.
- // Increase the score. If this would make the current solution worse than
- // the best solution we've seen already, stop now.
- increaseScore(SK_Fix);
- if (worseThanBestSolution())
- return true;
+ // If this is just a warning it's shouldn't affect the solver.
+ if (!fix->isWarning()) {
+ // Otherswise increase the score. If this would make the current
+ // solution worse than the best solution we've seen already, stop now.
+ increaseScore(SK_Fix);
+ if (worseThanBestSolution())
+ return true;
+ }
if (isAugmentingFix(fix)) {
// Always useful, unless duplicate of exactly the same fix and location.
diff --git a/lib/Sema/CodeSynthesis.cpp b/lib/Sema/CodeSynthesis.cpp
index bbf6387..a2ff5f4 100644
--- a/lib/Sema/CodeSynthesis.cpp
+++ b/lib/Sema/CodeSynthesis.cpp
@@ -1346,8 +1346,9 @@
assert(!VD->isStatic() && "Static vars are already lazy on their own");
// Create the storage property as an optional of VD's type.
- SmallString<64> NameBuf = VD->getName().str();
- NameBuf += ".storage";
+ SmallString<64> NameBuf;
+ NameBuf += "$__lazy_storage_$_";
+ NameBuf += VD->getName().str();
auto StorageName = Context.getIdentifier(NameBuf);
auto StorageTy = OptionalType::get(VD->getType());
auto StorageInterfaceTy = OptionalType::get(VD->getInterfaceType());
diff --git a/lib/Sema/ConstraintSystem.cpp b/lib/Sema/ConstraintSystem.cpp
index b28f58e..a7ac92a 100644
--- a/lib/Sema/ConstraintSystem.cpp
+++ b/lib/Sema/ConstraintSystem.cpp
@@ -304,7 +304,7 @@
switch (kind) {
#define PROTOCOL_WITH_NAME(Id, Name) \
case KnownProtocolKind::Id: llvm_unreachable("Not a literal protocol");
-#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name)
+#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name, __, ___)
#include "swift/AST/KnownProtocols.def"
case KnownProtocolKind::ExpressibleByArrayLiteral: index = 0; break;
@@ -334,7 +334,7 @@
switch (kind) {
#define PROTOCOL_WITH_NAME(Id, Name) \
case KnownProtocolKind::Id: llvm_unreachable("Not a literal protocol");
-#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name)
+#define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, Name, __, ___)
#include "swift/AST/KnownProtocols.def"
case KnownProtocolKind::ExpressibleByArrayLiteral:
diff --git a/lib/Sema/TypeCheckDeclObjC.cpp b/lib/Sema/TypeCheckDeclObjC.cpp
index 6196585..4680104 100644
--- a/lib/Sema/TypeCheckDeclObjC.cpp
+++ b/lib/Sema/TypeCheckDeclObjC.cpp
@@ -523,6 +523,7 @@
if (!ResultType->hasError() &&
!ResultType->isVoid() &&
!ResultType->isUninhabited() &&
+ !ResultType->hasDynamicSelfType() &&
!ResultType->isRepresentableIn(ForeignLanguage::ObjectiveC,
const_cast<FuncDecl *>(FD))) {
if (Diagnose) {
diff --git a/lib/Sema/TypeCheckExpr.cpp b/lib/Sema/TypeCheckExpr.cpp
index 157d98c..8375cbe 100644
--- a/lib/Sema/TypeCheckExpr.cpp
+++ b/lib/Sema/TypeCheckExpr.cpp
@@ -20,6 +20,7 @@
#include "swift/AST/Decl.h"
#include "swift/AST/Initializer.h"
#include "swift/AST/ParameterList.h"
+#include "swift/AST/TypeCheckRequests.h"
#include "swift/Parse/Lexer.h"
using namespace swift;
@@ -633,7 +634,7 @@
return closure;
}
-static Type lookupDefaultLiteralType(TypeChecker &TC, DeclContext *dc,
+static Type lookupDefaultLiteralType(TypeChecker &TC, const DeclContext *dc,
StringRef name) {
auto lookupOptions = defaultUnqualifiedLookupOptions;
if (isa<AbstractFunctionDecl>(dc))
@@ -655,116 +656,59 @@
return cast<TypeAliasDecl>(TD)->getDeclaredInterfaceType();
}
+static Optional<KnownProtocolKind>
+getKnownProtocolKindIfAny(const ProtocolDecl *protocol) {
+ TypeChecker &tc = TypeChecker::createForContext(protocol->getASTContext());
+
+// clang-format off
+ #define EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME(Id, _, __, ___) \
+ if (protocol == tc.getProtocol(SourceLoc(), KnownProtocolKind::Id)) \
+ return KnownProtocolKind::Id;
+ #include "swift/AST/KnownProtocols.def"
+ #undef EXPRESSIBLE_BY_LITERAL_PROTOCOL_WITH_NAME
+ // clang-format on
+
+ return None;
+}
+
Type TypeChecker::getDefaultType(ProtocolDecl *protocol, DeclContext *dc) {
- Type *type = nullptr;
- const char *name = nullptr;
- bool performLocalLookup = true;
+ if (auto knownProtocolKindIfAny = getKnownProtocolKindIfAny(protocol)) {
+ Type t = evaluateOrDefault(
+ Context.evaluator,
+ DefaultTypeRequest{knownProtocolKindIfAny.getValue(), dc}, nullptr);
+ return t;
+ }
+ return nullptr;
+}
- // ExpressibleByUnicodeScalarLiteral -> UnicodeScalarType
- if (protocol ==
- getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByUnicodeScalarLiteral)) {
- type = &UnicodeScalarType;
- name = "UnicodeScalarType";
- }
- // ExpressibleByExtendedGraphemeClusterLiteral -> ExtendedGraphemeClusterType
- else if (protocol ==
- getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByExtendedGraphemeClusterLiteral)) {
- type = &ExtendedGraphemeClusterType;
- name = "ExtendedGraphemeClusterType";
- }
- // ExpressibleByStringLiteral -> StringLiteralType
- // ExpressibleByStringInterpolation -> StringLiteralType
- else if (protocol == getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByStringLiteral) ||
- protocol == getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByStringInterpolation)) {
- type = &StringLiteralType;
- name = "StringLiteralType";
- }
- // ExpressibleByIntegerLiteral -> IntegerLiteralType
- else if (protocol == getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByIntegerLiteral)) {
- type = &IntLiteralType;
- name = "IntegerLiteralType";
- }
- // ExpressibleByFloatLiteral -> FloatLiteralType
- else if (protocol == getProtocol(SourceLoc(),
- KnownProtocolKind::ExpressibleByFloatLiteral)){
- type = &FloatLiteralType;
- name = "FloatLiteralType";
- }
- // ExpressibleByBooleanLiteral -> BoolLiteralType
- else if (protocol == getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByBooleanLiteral)){
- type = &BooleanLiteralType;
- name = "BooleanLiteralType";
- }
- // ExpressibleByArrayLiteral -> Array
- else if (protocol == getProtocol(SourceLoc(),
- KnownProtocolKind::ExpressibleByArrayLiteral)){
- type = &ArrayLiteralType;
- name = "Array";
- performLocalLookup = false;
- }
- // ExpressibleByDictionaryLiteral -> Dictionary
- else if (protocol == getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByDictionaryLiteral)) {
- type = &DictionaryLiteralType;
- name = "Dictionary";
- performLocalLookup = false;
- }
- // _ExpressibleByColorLiteral -> _ColorLiteralType
- else if (protocol == getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByColorLiteral)) {
- type = &ColorLiteralType;
- name = "_ColorLiteralType";
- }
- // _ExpressibleByImageLiteral -> _ImageLiteralType
- else if (protocol == getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByImageLiteral)) {
- type = &ImageLiteralType;
- name = "_ImageLiteralType";
- }
- // _ExpressibleByFileReferenceLiteral -> _FileReferenceLiteralType
- else if (protocol == getProtocol(
- SourceLoc(),
- KnownProtocolKind::ExpressibleByFileReferenceLiteral)) {
- type = &FileReferenceLiteralType;
- name = "_FileReferenceLiteralType";
- }
-
- if (!type)
+llvm::Expected<Type>
+swift::DefaultTypeRequest::evaluate(Evaluator &evaluator,
+ KnownProtocolKind knownProtocolKind,
+ const DeclContext *dc) const {
+ const char *const name = getTypeName(knownProtocolKind);
+ if (!name)
return nullptr;
- // If we haven't found the type yet, look for it now.
- if (!*type) {
- if (performLocalLookup)
- *type = lookupDefaultLiteralType(*this, dc, name);
+ TypeChecker &tc = getTypeChecker();
- if (!*type)
- *type = lookupDefaultLiteralType(*this, getStdlibModule(dc), name);
+ Type type;
+ if (getPerformLocalLookup(knownProtocolKind))
+ type = lookupDefaultLiteralType(tc, dc, name);
- // Strip off one level of sugar; we don't actually want to print
- // the name of the typealias itself anywhere.
- if (type && *type) {
- if (auto boundTypeAlias =
- dyn_cast<TypeAliasType>(type->getPointer()))
- *type = boundTypeAlias->getSinglyDesugaredType();
- }
+ if (!type)
+ type = lookupDefaultLiteralType(tc, tc.getStdlibModule(dc), name);
+
+ // Strip off one level of sugar; we don't actually want to print
+ // the name of the typealias itself anywhere.
+ if (type) {
+ if (auto boundTypeAlias = dyn_cast<TypeAliasType>(type.getPointer()))
+ type = boundTypeAlias->getSinglyDesugaredType();
}
+ return type;
+}
- return *type;
+TypeChecker &DefaultTypeRequest::getTypeChecker() const {
+ return TypeChecker::createForContext(getDeclContext()->getASTContext());
}
Expr *TypeChecker::foldSequence(SequenceExpr *expr, DeclContext *dc) {
diff --git a/lib/Sema/TypeCheckSwitchStmt.cpp b/lib/Sema/TypeCheckSwitchStmt.cpp
index 940b177..60ee858 100644
--- a/lib/Sema/TypeCheckSwitchStmt.cpp
+++ b/lib/Sema/TypeCheckSwitchStmt.cpp
@@ -506,12 +506,35 @@
PAIRCASE (SpaceKind::Disjunct, SpaceKind::UnknownCase): {
SmallVector<Space, 4> smallSpaces;
for (auto s : this->getSpaces()) {
- if (auto diff = s.minus(other, TC, DC, minusCount))
- smallSpaces.push_back(*diff);
- else
+ auto diff = s.minus(other, TC, DC, minusCount);
+ if (!diff)
return None;
+ if (diff->getKind() == SpaceKind::Disjunct) {
+ smallSpaces.append(diff->getSpaces().begin(),
+ diff->getSpaces().end());
+ } else {
+ smallSpaces.push_back(*diff);
+ }
}
- return Space::forDisjunct(smallSpaces);
+
+ // Remove any of the later spaces that are contained entirely in an
+ // earlier one. Since we're not sorting by size, this isn't
+ // guaranteed to give us a minimal set, but it'll still reduce the
+ // general (A, B, C) - ((.a1, .b1, .c1) | (.a1, .b1, .c2)) problem.
+ // This is a quadratic operation but it saves us a LOT of work
+ // overall.
+ SmallVector<Space, 4> usefulSmallSpaces;
+ for (const Space &space : smallSpaces) {
+ bool alreadyHandled = llvm::any_of(usefulSmallSpaces,
+ [&](const Space &previousSpace) {
+ return space.isSubspace(previousSpace, TC, DC);
+ });
+ if (alreadyHandled)
+ continue;
+ usefulSmallSpaces.push_back(space);
+ }
+
+ return Space::forDisjunct(usefulSmallSpaces);
}
PAIRCASE (SpaceKind::Constructor, SpaceKind::Type):
return Space();
@@ -923,26 +946,31 @@
Space projection = projectPattern(TC, caseItem.getPattern());
- if (!projection.isEmpty() &&
- projection.isSubspace(Space::forDisjunct(spaces), TC, DC)) {
+ bool isRedundant = !projection.isEmpty() &&
+ llvm::any_of(spaces, [&](const Space &handled) {
+ return projection.isSubspace(handled, TC, DC);
+ });
+ if (isRedundant) {
TC.diagnose(caseItem.getStartLoc(),
diag::redundant_particular_case)
.highlight(caseItem.getSourceRange());
continue;
- } else {
- Expr *cachedExpr = nullptr;
- if (checkRedundantLiteral(caseItem.getPattern(), cachedExpr)) {
- assert(cachedExpr && "Cache found hit but no expr?");
- TC.diagnose(caseItem.getStartLoc(),
- diag::redundant_particular_literal_case)
- .highlight(caseItem.getSourceRange());
- TC.diagnose(cachedExpr->getLoc(),
- diag::redundant_particular_literal_case_here)
- .highlight(cachedExpr->getSourceRange());
- continue;
- }
}
- spaces.push_back(projection);
+
+ Expr *cachedExpr = nullptr;
+ if (checkRedundantLiteral(caseItem.getPattern(), cachedExpr)) {
+ assert(cachedExpr && "Cache found hit but no expr?");
+ TC.diagnose(caseItem.getStartLoc(),
+ diag::redundant_particular_literal_case)
+ .highlight(caseItem.getSourceRange());
+ TC.diagnose(cachedExpr->getLoc(),
+ diag::redundant_particular_literal_case_here)
+ .highlight(cachedExpr->getSourceRange());
+ continue;
+ }
+
+ if (!projection.isEmpty())
+ spaces.push_back(projection);
}
}
diff --git a/lib/Sema/TypeCheckType.cpp b/lib/Sema/TypeCheckType.cpp
index 7053be5..182e2ee 100644
--- a/lib/Sema/TypeCheckType.cpp
+++ b/lib/Sema/TypeCheckType.cpp
@@ -216,8 +216,9 @@
return DependentMemberType::get(baseTy, assocType);
}
- // Otherwise, the nested type comes from a concrete type. Substitute the
- // base type into it.
+ // Otherwise, the nested type comes from a concrete type,
+ // or it's a typealias declared in protocol or protocol extension.
+ // Substitute the base type into it.
auto concrete = ref->getBoundDecl();
auto lazyResolver = ctx.getLazyResolver();
if (lazyResolver)
@@ -225,14 +226,26 @@
if (!concrete->hasInterfaceType())
return ErrorType::get(ctx);
- if (concrete->getDeclContext()->getSelfClassDecl()) {
- // We found a member of a class from a protocol or protocol
- // extension.
- //
- // Get the superclass of the 'Self' type parameter.
- baseTy = (baseEquivClass->concreteType
- ? baseEquivClass->concreteType
- : baseEquivClass->superclass);
+ // Make sure that base type didn't get replaced along the way.
+ assert(baseTy->isTypeParameter());
+
+ // There are two situations possible here:
+ //
+ // 1. Member comes from the protocol, which means that it has been
+ // found through a conformance constraint placed on base e.g. `T: P`.
+ // In this case member is a `typealias` declaration located in
+ // protocol or protocol extension.
+ //
+ // 2. Member comes from struct/enum/class type, which means that it
+ // has been found through same-type constraint on base e.g. `T == Q`.
+ //
+ // If this is situation #2 we need to make sure to switch base to
+ // a concrete type (according to equivalence class) otherwise we'd
+ // end up using incorrect generic signature while attempting to form
+ // a substituted type for the member we found.
+ if (!concrete->getDeclContext()->getSelfProtocolDecl()) {
+ baseTy = baseEquivClass->concreteType ? baseEquivClass->concreteType
+ : baseEquivClass->superclass;
assert(baseTy);
}
@@ -1996,7 +2009,21 @@
hasFunctionAttr = true;
break;
}
-
+
+ // If we have an @autoclosure then try resolving the top level type repr
+ // first as it may be pointing to a typealias
+ if (attrs.has(TAK_autoclosure)) {
+ if (auto CITR = dyn_cast<ComponentIdentTypeRepr>(repr)) {
+ auto typeAliasResolver = TypeResolverContext::TypeAliasDecl;
+ if (auto type = resolveTopLevelIdentTypeComponent(resolution, CITR,
+ typeAliasResolver)) {
+ if (auto TAT = dyn_cast<TypeAliasType>(type.getPointer())) {
+ repr = TAT->getDecl()->getUnderlyingTypeLoc().getTypeRepr();
+ }
+ }
+ }
+ }
+
// Function attributes require a syntactic function type.
auto *fnRepr = dyn_cast<FunctionTypeRepr>(repr);
diff --git a/lib/Sema/TypeChecker.h b/lib/Sema/TypeChecker.h
index 6938e35..9bce0e9 100644
--- a/lib/Sema/TypeChecker.h
+++ b/lib/Sema/TypeChecker.h
@@ -662,18 +662,7 @@
}
private:
- Type IntLiteralType;
Type MaxIntegerType;
- Type FloatLiteralType;
- Type BooleanLiteralType;
- Type UnicodeScalarType;
- Type ExtendedGraphemeClusterType;
- Type StringLiteralType;
- Type ArrayLiteralType;
- Type DictionaryLiteralType;
- Type ColorLiteralType;
- Type ImageLiteralType;
- Type FileReferenceLiteralType;
Type StringType;
Type SubstringType;
Type IntType;
diff --git a/lib/Serialization/ModuleFile.cpp b/lib/Serialization/ModuleFile.cpp
index bc95e91..f8005e9 100644
--- a/lib/Serialization/ModuleFile.cpp
+++ b/lib/Serialization/ModuleFile.cpp
@@ -1016,7 +1016,7 @@
new ModuleFile::GroupNameTable);
auto Data = reinterpret_cast<const uint8_t *>(BlobData.data());
unsigned GroupCount = endian::readNext<uint32_t, little, unaligned>(Data);
- for (unsigned I = 0; I < GroupCount; I++) {
+ for (unsigned I = 0; I < GroupCount; ++I) {
auto RawSize = endian::readNext<uint32_t, little, unaligned>(Data);
auto RawText = StringRef(reinterpret_cast<const char *>(Data), RawSize);
Data += RawSize;
diff --git a/stdlib/public/Darwin/Foundation/NSStringAPI.swift b/stdlib/public/Darwin/Foundation/NSStringAPI.swift
index 68815cb..27aaeed 100644
--- a/stdlib/public/Darwin/Foundation/NSStringAPI.swift
+++ b/stdlib/public/Darwin/Foundation/NSStringAPI.swift
@@ -178,6 +178,10 @@
/// Creates a string by copying the data from a given
/// C array of UTF8-encoded bytes.
public init?(utf8String bytes: UnsafePointer<CChar>) {
+ if let str = String(validatingUTF8: bytes) {
+ self = str
+ return
+ }
if let ns = NSString(utf8String: bytes) {
self = String._unconditionallyBridgeFromObjectiveC(ns)
} else {
@@ -202,12 +206,18 @@
/// - Parameters:
/// - bytes: A sequence of bytes to interpret using `encoding`.
/// - encoding: The ecoding to use to interpret `bytes`.
- public init? <S: Sequence>(bytes: __shared S, encoding: Encoding)
- where S.Iterator.Element == UInt8 {
+ public init?<S: Sequence>(bytes: __shared S, encoding: Encoding)
+ where S.Iterator.Element == UInt8 {
let byteArray = Array(bytes)
+ if encoding == .utf8,
+ let str = byteArray.withUnsafeBufferPointer({ String._tryFromUTF8($0) })
+ {
+ self = str
+ return
+ }
+
if let ns = NSString(
bytes: byteArray, length: byteArray.count, encoding: encoding.rawValue) {
-
self = String._unconditionallyBridgeFromObjectiveC(ns)
} else {
return nil
@@ -365,6 +375,10 @@
cString: UnsafePointer<CChar>,
encoding enc: Encoding
) {
+ if enc == .utf8, let str = String(validatingUTF8: cString) {
+ self = str
+ return
+ }
if let ns = NSString(cString: cString, encoding: enc.rawValue) {
self = String._unconditionallyBridgeFromObjectiveC(ns)
} else {
@@ -381,6 +395,14 @@
/// Returns a `String` initialized by converting given `data` into
/// Unicode characters using a given `encoding`.
public init?(data: __shared Data, encoding: Encoding) {
+ if encoding == .utf8,
+ let str = data.withUnsafeBytes({
+ String._tryFromUTF8($0.bindMemory(to: UInt8.self))
+ }) {
+ self = str
+ return
+ }
+
guard let s = NSString(data: data, encoding: encoding.rawValue) else { return nil }
self = String._unconditionallyBridgeFromObjectiveC(s)
}
diff --git a/stdlib/public/core/StringCreate.swift b/stdlib/public/core/StringCreate.swift
index 262f0ee..d647c12 100644
--- a/stdlib/public/core/StringCreate.swift
+++ b/stdlib/public/core/StringCreate.swift
@@ -15,13 +15,33 @@
internal func _allASCII(_ input: UnsafeBufferPointer<UInt8>) -> Bool {
// NOTE: Avoiding for-in syntax to avoid bounds checks
//
- // TODO(String performance): Vectorize and/or incorporate into validity
- // checking, perhaps both.
+ // TODO(String performance): SIMD-ize
//
let ptr = input.baseAddress._unsafelyUnwrappedUnchecked
var i = 0
- while i < input.count {
- guard ptr[i] <= 0x7F else { return false }
+
+ let count = input.count
+ let stride = MemoryLayout<UInt>.stride
+ let address = Int(bitPattern: ptr)
+
+ let wordASCIIMask = UInt(truncatingIfNeeded: 0x8080_8080_8080_8080 as UInt64)
+ let byteASCIIMask = UInt8(truncatingIfNeeded: wordASCIIMask)
+
+ while (address &+ i) % stride != 0 && i < count {
+ guard ptr[i] & byteASCIIMask == 0 else { return false }
+ i &+= 1
+ }
+
+ while (i &+ stride) <= count {
+ let word: UInt = UnsafePointer(
+ bitPattern: address &+ i
+ )._unsafelyUnwrappedUnchecked.pointee
+ guard word & wordASCIIMask == 0 else { return false }
+ i &+= stride
+ }
+
+ while i < count {
+ guard ptr[i] & byteASCIIMask == 0 else { return false }
i &+= 1
}
return true
@@ -42,12 +62,10 @@
return storage.asString
}
- @usableFromInline
- internal static func _tryFromUTF8(
- _ input: UnsafeBufferPointer<UInt8>
- ) -> String? {
+ public // SPI(Foundation)
+ static func _tryFromUTF8(_ input: UnsafeBufferPointer<UInt8>) -> String? {
guard case .success(let extraInfo) = validateUTF8(input) else {
- return nil
+ return nil
}
return String._uncheckedFromUTF8(input, isASCII: extraInfo.isASCII)
diff --git a/stdlib/public/core/StringLegacy.swift b/stdlib/public/core/StringLegacy.swift
index 6d5bf58..e4ab137 100644
--- a/stdlib/public/core/StringLegacy.swift
+++ b/stdlib/public/core/StringLegacy.swift
@@ -64,10 +64,10 @@
/// print(cafe.hasPrefix("café"))
/// // Prints "false"
///
- /// The Unicode-safe comparison matches Unicode scalar values rather than the
- /// code points used to compose them. The example below uses two strings
- /// with different forms of the `"é"` character---the first uses the composed
- /// form and the second uses the decomposed form.
+ /// The Unicode-safe comparison matches Unicode extended grapheme clusters
+ /// rather than the code points used to compose them. The example below uses
+ /// two strings with different forms of the `"é"` character---the first uses
+ /// the composed form and the second uses the decomposed form.
///
/// // Unicode safe
/// let composedCafe = "Café"
@@ -98,10 +98,10 @@
/// print(plans.hasSuffix("Café"))
/// // Prints "false"
///
- /// The Unicode-safe comparison matches Unicode scalar values rather than the
- /// code points used to compose them. The example below uses two strings
- /// with different forms of the `"é"` character---the first uses the composed
- /// form and the second uses the decomposed form.
+ /// The Unicode-safe comparison matches Unicode extended grapheme clusters
+ /// rather than the code points used to compose them. The example below uses
+ /// two strings with different forms of the `"é"` character---the first uses
+ /// the composed form and the second uses the decomposed form.
///
/// // Unicode safe
/// let composedCafe = "café"
diff --git a/stdlib/public/core/StringUTF8Validation.swift b/stdlib/public/core/StringUTF8Validation.swift
index a5dba8f..bfb4d14 100644
--- a/stdlib/public/core/StringUTF8Validation.swift
+++ b/stdlib/public/core/StringUTF8Validation.swift
@@ -36,6 +36,10 @@
private struct UTF8ValidationError: Error {}
internal func validateUTF8(_ buf: UnsafeBufferPointer<UInt8>) -> UTF8ValidationResult {
+ if _allASCII(buf) {
+ return .success(UTF8ExtraInfo(isASCII: true))
+ }
+
var iter = buf.makeIterator()
var lastValidIndex = buf.startIndex
diff --git a/test/ClangImporter/objc_parse.swift b/test/ClangImporter/objc_parse.swift
index 89cae58..98b5708 100644
--- a/test/ClangImporter/objc_parse.swift
+++ b/test/ClangImporter/objc_parse.swift
@@ -273,8 +273,7 @@
class Wobbler : NSWobbling {
@objc func wobble() { }
- func returnMyself() -> Self { return self } // expected-error{{non-'@objc' method 'returnMyself()' does not satisfy requirement of '@objc' protocol 'NSWobbling'}}{{none}}
- // expected-error@-1{{method cannot be an implementation of an @objc requirement because its result type cannot be represented in Objective-C}}
+ func returnMyself() -> Self { return self }
}
extension Wobbler : NSMaybeInitWobble { // expected-error{{type 'Wobbler' does not conform to protocol 'NSMaybeInitWobble'}}
diff --git a/test/Interpreter/SDK/libc.swift b/test/Interpreter/SDK/libc.swift
index 0e1dd7d..e052e5a 100644
--- a/test/Interpreter/SDK/libc.swift
+++ b/test/Interpreter/SDK/libc.swift
@@ -13,6 +13,8 @@
import Darwin
#elseif os(Linux) || os(FreeBSD) || os(PS4) || os(Android)
import Glibc
+#elseif os(Windows)
+ import MSVCRT
#endif
let sourcePath = CommandLine.arguments[1]
diff --git a/test/Interpreter/dynamic_replacement.swift b/test/Interpreter/dynamic_replacement.swift
index c358aa9..951dee7 100644
--- a/test/Interpreter/dynamic_replacement.swift
+++ b/test/Interpreter/dynamic_replacement.swift
@@ -55,10 +55,11 @@
#if os(Linux)
import Glibc
- let dylibSuffix = "so"
+#elseif os(Windows)
+ import MSVCRT
+ import WinSDK
#else
import Darwin
- let dylibSuffix = "dylib"
#endif
var DynamicallyReplaceable = TestSuite("DynamicallyReplaceable")
@@ -108,6 +109,16 @@
expectedResult(useOrig, "public_enum_generic_func"))
}
+private func target_library_name(_ name: String) -> String {
+#if os(iOS) || os(macOS) || os(tvOS) || os(watchOS)
+ return "lib\(name).dylib"
+#elseif os(Windows)
+ return "\(name).dll"
+#else
+ return "lib\(name).so"
+#endif
+}
+
DynamicallyReplaceable.test("DynamicallyReplaceable") {
var executablePath = CommandLine.arguments[0]
executablePath.removeLast(4)
@@ -118,9 +129,11 @@
// Now, test with the module containing the replacements.
#if os(Linux)
- _ = dlopen("libModule2."+dylibSuffix, RTLD_NOW)
+ _ = dlopen(target_library_name("Module2"), RTLD_NOW)
+#elseif os(Windows)
+ _ = LoadLibraryA(target_library_name("Module2"))
#else
- _ = dlopen(executablePath+"libModule2."+dylibSuffix, RTLD_NOW)
+ _ = dlopen(executablePath+target_library_name("Module2"), RTLD_NOW)
#endif
checkExpectedResults(forOriginalLibrary: false)
}
diff --git a/test/Interpreter/dynamic_replacement_chaining.swift b/test/Interpreter/dynamic_replacement_chaining.swift
index 2b30230..027fe41 100644
--- a/test/Interpreter/dynamic_replacement_chaining.swift
+++ b/test/Interpreter/dynamic_replacement_chaining.swift
@@ -24,25 +24,38 @@
#if os(Linux)
import Glibc
- let dylibSuffix = "so"
+#elseif os(Windows)
+ import MSVCRT
+ import WinSDK
#else
import Darwin
- let dylibSuffix = "dylib"
#endif
var DynamicallyReplaceable = TestSuite("DynamicallyReplaceableChaining")
+func target_library_name(_ name: String) -> String {
+#if os(iOS) || os(macOS) || os(tvOS) || os(watchOS)
+ return "lib\(name).dylib"
+#elseif os(Windows)
+ return "\(name).dll"
+#else
+ return "lib\(name).so"
+#endif
+}
DynamicallyReplaceable.test("DynamicallyReplaceable") {
var executablePath = CommandLine.arguments[0]
executablePath.removeLast(4)
#if os(Linux)
- _ = dlopen("libB."+dylibSuffix, RTLD_NOW)
- _ = dlopen("libC."+dylibSuffix, RTLD_NOW)
+ _ = dlopen(target_library_name("B"), RTLD_NOW)
+ _ = dlopen(target_library_name("C"), RTLD_NOW)
+#elseif os(Windows)
+ _ = LoadLibraryA(target_library_name("B"))
+ _ = LoadLibraryA(target_library_name("C"))
#else
- _ = dlopen(executablePath+"libB."+dylibSuffix, RTLD_NOW)
- _ = dlopen(executablePath+"libC."+dylibSuffix, RTLD_NOW)
+ _ = dlopen(executablePath+target_library_name("B"), RTLD_NOW)
+ _ = dlopen(executablePath+target_library_name("C"), RTLD_NOW)
#endif
#if CHAINING
diff --git a/test/Interpreter/extended_grapheme_cluster_literal.swift b/test/Interpreter/extended_grapheme_cluster_literal.swift
index 3577596..a4fc2c8 100644
--- a/test/Interpreter/extended_grapheme_cluster_literal.swift
+++ b/test/Interpreter/extended_grapheme_cluster_literal.swift
@@ -17,7 +17,7 @@
}
}
-private func string(_ characters: UInt32...) -> String {
+public func string(_ characters: UInt32...) -> String {
return String(characters.map { Character(UnicodeScalar($0)!) })
}
private func expressible<T>(_ literal: Expressible<T>, as type: T.Type)
diff --git a/test/Interpreter/unions-and-bitfields.swift b/test/Interpreter/unions-and-bitfields.swift
index b4830fc..e553650 100644
--- a/test/Interpreter/unions-and-bitfields.swift
+++ b/test/Interpreter/unions-and-bitfields.swift
@@ -1,7 +1,9 @@
-// RUN: %target-build-swift %s -import-objc-header %S/Inputs/unions-and-bitfields.h -disable-bridging-pch -o %t
+// RUN: %target-build-swift %s -Xfrontend -enable-objc-interop -Xfrontend -disable-objc-attr-requires-foundation-module -import-objc-header %S/Inputs/unions-and-bitfields.h -disable-bridging-pch -o %t
// RUN: %target-codesign %t
// RUN: %target-run %t
+
// REQUIRES: executable_test
+// REQUIRES: objc_interop
// The -disable-bridging-pch above isn't actually relevant to the test; however,
// precompiled headers don't play nice with the way we include the platform
diff --git a/test/NameBinding/Inputs/NIOFoundationCompat.swift b/test/NameBinding/Inputs/NIOFoundationCompat.swift
new file mode 100644
index 0000000..54bae53
--- /dev/null
+++ b/test/NameBinding/Inputs/NIOFoundationCompat.swift
@@ -0,0 +1,9 @@
+import Foundation
+
+extension Data {
+ @_inlineable
+ public func withUnsafeBytes<R>(_ body: (UnsafeRawBufferPointer) throws -> R) rethrows -> R {
+ let r: R? = nil
+ return r!
+ }
+}
diff --git a/test/NameBinding/nio_shadowing.swift b/test/NameBinding/nio_shadowing.swift
new file mode 100644
index 0000000..6cf5b39
--- /dev/null
+++ b/test/NameBinding/nio_shadowing.swift
@@ -0,0 +1,11 @@
+// RUN: %empty-directory(%t)
+// RUN: %target-swift-frontend -emit-module -o %t %S/Inputs/NIOFoundationCompat.swift
+// RUN: %target-swift-frontend -typecheck %s -I %t -verify
+
+// REQUIRES: objc_interop
+import Foundation
+import NIOFoundationCompat
+
+func test(data: Data) {
+ data.withUnsafeBytes { x in print(x) }
+}
diff --git a/test/NameBinding/reference-dependencies-consistency.swift b/test/NameBinding/reference-dependencies-consistency.swift
new file mode 100644
index 0000000..a9f7279
--- /dev/null
+++ b/test/NameBinding/reference-dependencies-consistency.swift
@@ -0,0 +1,15 @@
+// Some types, such as StringLiteralType, used to be cached in the TypeChecker.
+// Consequently, the second primary file (in batch mode) to use that type would
+// hit in the cache and no dependency would be recorded.
+// This test ensures that this bug stays fixed.
+//
+// RUN: %empty-directory(%t)
+//
+// Create two identical inputs, each needing StringLiteralType:
+//
+// RUN: echo 'fileprivate var v: String { return "\(x)" }; fileprivate let x = "a"' >%t/1.swift
+// RUN: echo 'fileprivate var v: String { return "\(x)" }; fileprivate let x = "a"' >%t/2.swift
+//
+// RUN: %target-swift-frontend -typecheck -primary-file %t/1.swift -primary-file %t/2.swift -emit-reference-dependencies-path %t/1.swiftdeps -emit-reference-dependencies-path %t/2.swiftdeps
+//
+// RUN: cmp -s %t/1.swiftdeps %t/2.swiftdeps
diff --git a/test/ParseableInterface/lazy-vars.swift b/test/ParseableInterface/lazy-vars.swift
new file mode 100644
index 0000000..881bfbc
--- /dev/null
+++ b/test/ParseableInterface/lazy-vars.swift
@@ -0,0 +1,41 @@
+// RUN: %empty-directory(%t)
+
+// RUN: %target-swift-frontend -typecheck -module-name Test -emit-parseable-module-interface-path %t/Test.swiftinterface %s
+// RUN: %FileCheck %s < %t/Test.swiftinterface --check-prefix CHECK --check-prefix NONRESILIENT
+// RUN: %target-swift-frontend -build-module-from-parseable-interface %t/Test.swiftinterface -o %t/Test.swiftmodule
+// RUN: %target-swift-frontend -emit-module -o /dev/null -merge-modules -emit-parseable-module-interface-path - %t/Test.swiftmodule -module-name Test | %FileCheck %s --check-prefix CHECK --check-prefix NONRESILIENT
+
+// RUN: %target-swift-frontend -typecheck -module-name TestResilient -emit-parseable-module-interface-path %t/TestResilient.swiftinterface -enable-resilience %s
+// RUN: %FileCheck %s < %t/TestResilient.swiftinterface --check-prefix CHECK --check-prefix RESILIENT
+
+// RUN: %target-swift-frontend -build-module-from-parseable-interface %t/TestResilient.swiftinterface -o %t/TestResilient.swiftmodule
+// RUN: %target-swift-frontend -emit-module -o /dev/null -merge-modules -emit-parseable-module-interface-path - %t/TestResilient.swiftmodule -module-name TestResilient | %FileCheck %s --check-prefix CHECK --check-prefix RESILIENT
+
+// CHECK: @_fixed_layout public struct HasLazyVarsFixedLayout {
+// CHECK-NEXT: public var foo: [[INT:(Swift\.)?Int]] {
+// CHECK-NEXT: mutating get
+// CHECK-NEXT: set
+// CHECK-NEXT: }
+// CHECK: private var $__lazy_storage_$_foo: [[INT]]?
+// CHECK-NOT: private var bar
+// CHECK: private var $__lazy_storage_$_bar: [[INT]]?
+// CHECK-NEXT: }
+@_fixed_layout
+public struct HasLazyVarsFixedLayout {
+ public lazy var foo: Int = 0
+ private lazy var bar: Int = 0
+}
+
+// CHECK: public struct HasLazyVars {
+// CHECK-NEXT: public var foo: [[INT:(Swift\.)?Int]] {
+// CHECK-NEXT: mutating get
+// CHECK-NEXT: set
+// CHECK-NEXT: }
+// NONRESILIENT: private var $__lazy_storage_$_foo: [[INT]]?
+// CHECK-NOT: private var bar
+// NONRESILIENT: private var $__lazy_storage_$_bar: [[INT]]?
+// CHECK-NEXT: }
+public struct HasLazyVars {
+ public lazy var foo: Int = 0
+ private lazy var bar: Int = 0
+}
diff --git a/test/PrintAsObjC/dynamicself.swift b/test/PrintAsObjC/dynamicself.swift
new file mode 100644
index 0000000..112efb3
--- /dev/null
+++ b/test/PrintAsObjC/dynamicself.swift
@@ -0,0 +1,37 @@
+// REQUIRES: objc_interop
+
+// RUN: %empty-directory(%t)
+
+// FIXME: BEGIN -enable-source-import hackaround
+// RUN: %target-swift-frontend(mock-sdk: -sdk %S/../Inputs/clang-importer-sdk -I %t) -emit-module -o %t %S/../Inputs/clang-importer-sdk/swift-modules/ObjectiveC.swift -disable-objc-attr-requires-foundation-module
+// RUN: %target-swift-frontend(mock-sdk: -sdk %S/../Inputs/clang-importer-sdk -I %t) -emit-module -o %t %S/../Inputs/clang-importer-sdk/swift-modules/CoreGraphics.swift
+// RUN: %target-swift-frontend(mock-sdk: -sdk %S/../Inputs/clang-importer-sdk -I %t) -emit-module -o %t %S/../Inputs/clang-importer-sdk/swift-modules/Foundation.swift
+// RUN: %target-swift-frontend(mock-sdk: -sdk %S/../Inputs/clang-importer-sdk -I %t) -emit-module -o %t %S/../Inputs/clang-importer-sdk/swift-modules/AppKit.swift
+// FIXME: END -enable-source-import hackaround
+
+
+// RUN: %target-swift-frontend(mock-sdk: -sdk %S/../Inputs/clang-importer-sdk -I %t) -emit-module -I %S/Inputs/custom-modules -o %t %s -disable-objc-attr-requires-foundation-module
+// RUN: %target-swift-frontend(mock-sdk: -sdk %S/../Inputs/clang-importer-sdk -I %t) -parse-as-library %t/dynamicself.swiftmodule -typecheck -I %S/Inputs/custom-modules -emit-objc-header-path %t/dynamicself.h -import-objc-header %S/../Inputs/empty.h -disable-objc-attr-requires-foundation-module
+// RUN: %FileCheck %s < %t/dynamicself.h
+// RUN: %check-in-clang -I %S/Inputs/custom-modules/ %t/dynamicself.h
+
+import Foundation
+
+// CHECK-LABEL: @protocol FooProtocol
+@objc protocol FooProtocol {
+ // CHECK: - (nonnull instancetype)fooFunc SWIFT_WARN_UNUSED_RESULT;
+ func fooFunc() -> Self
+ // CHECK: - (nullable instancetype)optionalFooFunc SWIFT_WARN_UNUSED_RESULT;
+ func optionalFooFunc() -> Self?
+}
+// CHECK: @end
+
+// CHECK-LABEL: @interface BarClass : NSObject <FooProtocol>
+@objc @objcMembers class BarClass: NSObject, FooProtocol {
+ // CHECK: - (nonnull instancetype)fooFunc SWIFT_WARN_UNUSED_RESULT;
+ func fooFunc() -> Self { return self }
+ // CHECK: - (nullable instancetype)optionalFooFunc SWIFT_WARN_UNUSED_RESULT;
+ func optionalFooFunc() -> Self? { return self }
+ // CHECK: - (nonnull instancetype)init OBJC_DESIGNATED_INITIALIZER;
+}
+// CHECK: @end
diff --git a/test/SILOptimizer/pound_assert.swift b/test/SILOptimizer/pound_assert.swift
index 7a316cb..2e96a64 100644
--- a/test/SILOptimizer/pound_assert.swift
+++ b/test/SILOptimizer/pound_assert.swift
@@ -497,3 +497,91 @@
func invokeStringInitNonEmptyFlowSensitive() {
#assert(stringInitNonEmptyFlowSensitive().x == 1)
}
+
+//===----------------------------------------------------------------------===//
+// Enums and optionals.
+//===----------------------------------------------------------------------===//
+func isNil(_ x: Int?) -> Bool {
+ return x == nil
+}
+
+#assert(isNil(nil))
+#assert(!isNil(3))
+
+public enum Pet {
+ case bird
+ case cat(Int)
+ case dog(Int, Int)
+ case fish
+}
+
+public func weighPet(pet: Pet) -> Int {
+ switch pet {
+ case .bird: return 3
+ case let .cat(weight): return weight
+ case let .dog(w1, w2): return w1+w2
+ default: return 1
+ }
+}
+
+#assert(weighPet(pet: .bird) == 3)
+#assert(weighPet(pet: .fish) == 1)
+#assert(weighPet(pet: .cat(2)) == 2)
+// expected-error @+1 {{assertion failed}}
+#assert(weighPet(pet: .cat(2)) == 3)
+#assert(weighPet(pet: .dog(9, 10)) == 19)
+
+// Test indirect enums.
+indirect enum IntExpr {
+ case int(_ value: Int)
+ case add(_ lhs: IntExpr, _ rhs: IntExpr)
+ case multiply(_ lhs: IntExpr, _ rhs: IntExpr)
+}
+
+func evaluate(intExpr: IntExpr) -> Int {
+ switch intExpr {
+ case .int(let value):
+ return value
+ case .add(let lhs, let rhs):
+ return evaluate(intExpr: lhs) + evaluate(intExpr: rhs)
+ case .multiply(let lhs, let rhs):
+ return evaluate(intExpr: lhs) * evaluate(intExpr: rhs)
+ }
+}
+
+// TODO: The constant evaluator can't handle indirect enums yet.
+// expected-error @+2 {{#assert condition not constant}}
+// expected-note @+1 {{could not fold operation}}
+#assert(evaluate(intExpr: .int(5)) == 5)
+// expected-error @+2 {{#assert condition not constant}}
+// expected-note @+1 {{could not fold operation}}
+#assert(evaluate(intExpr: .add(.int(5), .int(6))) == 11)
+// expected-error @+2 {{#assert condition not constant}}
+// expected-note @+1 {{could not fold operation}}
+#assert(evaluate(intExpr: .add(.multiply(.int(2), .int(2)), .int(3))) == 7)
+
+// Test address-only enums.
+protocol IntContainerProtocol {
+ var value: Int { get }
+}
+
+struct IntContainer : IntContainerProtocol {
+ let value: Int
+}
+
+enum AddressOnlyEnum<T: IntContainerProtocol> {
+ case double(_ value: T)
+ case triple(_ value: T)
+}
+
+func evaluate<T>(addressOnlyEnum: AddressOnlyEnum<T>) -> Int {
+ switch addressOnlyEnum {
+ case .double(let value):
+ return 2 * value.value
+ case .triple(let value):
+ return 3 * value.value
+ }
+}
+
+#assert(evaluate(addressOnlyEnum: .double(IntContainer(value: 1))) == 2)
+#assert(evaluate(addressOnlyEnum: .triple(IntContainer(value: 1))) == 3)
diff --git a/test/SILOptimizer/predictable_deadalloc_elim.sil b/test/SILOptimizer/predictable_deadalloc_elim.sil
new file mode 100644
index 0000000..7e40d5a
--- /dev/null
+++ b/test/SILOptimizer/predictable_deadalloc_elim.sil
@@ -0,0 +1,267 @@
+// RUN: %target-sil-opt -enable-sil-verify-all %s -predictable-deadalloc-elim | %FileCheck %s
+
+sil_stage canonical
+
+import Swift
+import Builtin
+
+// CHECK-LABEL: sil @simple_trivial_stack : $@convention(thin) (Builtin.Int32) -> () {
+// CHECK-NOT: alloc_stack
+// CHECK: } // end sil function 'simple_trivial_stack'
+sil @simple_trivial_stack : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32):
+ %1 = alloc_stack $Builtin.Int32
+ store %0 to %1 : $*Builtin.Int32
+ dealloc_stack %1 : $*Builtin.Int32
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_trivial_init_box : $@convention(thin) (Builtin.Int32) -> () {
+// CHECK-NOT: alloc_box
+// CHECK: } // end sil function 'simple_trivial_init_box'
+sil @simple_trivial_init_box : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32):
+ %1 = alloc_box ${ var Builtin.Int32 }
+ %2 = project_box %1 : ${ var Builtin.Int32 }, 0
+ store %0 to %2 : $*Builtin.Int32
+ strong_release %1 : ${ var Builtin.Int32 }
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_trivial_uninit_box : $@convention(thin) (Builtin.Int32) -> () {
+// CHECK-NOT: alloc_box
+// CHECK: } // end sil function 'simple_trivial_uninit_box'
+sil @simple_trivial_uninit_box : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32):
+ %1 = alloc_box ${ var Builtin.Int32 }
+ %2 = project_box %1 : ${ var Builtin.Int32 }, 0
+ store %0 to %2 : $*Builtin.Int32
+ dealloc_box %1 : ${ var Builtin.Int32 }
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_nontrivial_stack : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NEXT: strong_release [[ARG]]
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
+// CHECK: } // end sil function 'simple_nontrivial_stack'
+sil @simple_nontrivial_stack : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject):
+ %1 = alloc_stack $Builtin.NativeObject
+ store %0 to %1 : $*Builtin.NativeObject
+ destroy_addr %1 : $*Builtin.NativeObject
+ dealloc_stack %1 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// We do not handle this today, since we do not understand that we need to treat
+// the strong_release of the alloc_box as a destroy_addr of the entire value.
+//
+// FIXME: We should be able to handle this.
+//
+// CHECK-LABEL: sil @simple_nontrivial_init_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: alloc_box
+// CHECK: } // end sil function 'simple_nontrivial_init_box'
+sil @simple_nontrivial_init_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject):
+ %1 = alloc_box ${ var Builtin.NativeObject }
+ %2 = project_box %1 : ${ var Builtin.NativeObject }, 0
+ store %0 to %2 : $*Builtin.NativeObject
+ strong_release %1 : ${ var Builtin.NativeObject }
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_nontrivial_uninit_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NEXT: strong_release [[ARG]]
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
+// CHECK: } // end sil function 'simple_nontrivial_uninit_box'
+sil @simple_nontrivial_uninit_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject):
+ %1 = alloc_box ${ var Builtin.NativeObject }
+ %2 = project_box %1 : ${ var Builtin.NativeObject }, 0
+ store %0 to %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_box %1 : ${ var Builtin.NativeObject }
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+//////////////////
+// Assign Tests //
+//////////////////
+
+// Make sure that we do eliminate this allocation
+// CHECK-LABEL: sil @simple_assign_take_trivial : $@convention(thin) (Builtin.Int32, @in Builtin.Int32) -> () {
+// CHECK-NOT: alloc_stack
+// CHECK: } // end sil function 'simple_assign_take_trivial'
+sil @simple_assign_take_trivial : $@convention(thin) (Builtin.Int32, @in Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32, %1 : $*Builtin.Int32):
+ %2 = alloc_stack $Builtin.Int32
+ store %0 to %2 : $*Builtin.Int32
+ copy_addr [take] %1 to %2 : $*Builtin.Int32
+ dealloc_stack %2 : $*Builtin.Int32
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// In this case, we perform an init, copy. Since we do not want to lose the +1
+// on the argument, we do not eliminate this (even though with time perhaps we
+// could).
+// CHECK-LABEL: sil @simple_init_copy : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_init_copy'
+sil @simple_init_copy : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ copy_addr %1 to [initialization] %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// This we can promote successfully.
+// CHECK-LABEL: sil @simple_init_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG0:%.*]] : $Builtin.NativeObject, [[ARG1:%.*]] : $*Builtin.NativeObject):
+// CHECK-NOT: alloc_stack
+// CHECK: strong_release [[ARG0]]
+// CHECK: [[ARG1_LOADED:%.*]] = load [[ARG1]]
+// CHECK: strong_release [[ARG1_LOADED]]
+// CHECK: } // end sil function 'simple_init_take'
+sil @simple_init_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ copy_addr [take] %1 to [initialization] %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// Since we are copying the input argument, we can not get rid of the copy_addr,
+// meaning we shouldn't eliminate the allocation here.
+// CHECK-LABEL: sil @simple_assign_no_take : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_assign_no_take'
+sil @simple_assign_no_take : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to %2 : $*Builtin.NativeObject
+ copy_addr %1 to %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// If PMO understood how to promote assigns, we should be able to handle this
+// case.
+// CHECK-LABEL: sil @simple_assign_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_assign_take'
+sil @simple_assign_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to %2 : $*Builtin.NativeObject
+ copy_addr [take] %1 to %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_diamond_without_assign : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NOT: alloc_stack
+// CHECK-NOT: store
+// CHECK: bb3:
+// CHECK-NEXT: strong_release [[ARG]]
+// CHECK: } // end sil function 'simple_diamond_without_assign'
+sil @simple_diamond_without_assign : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject):
+ %1 = alloc_stack $Builtin.NativeObject
+ store %0 to %1 : $*Builtin.NativeObject
+ cond_br undef, bb1, bb2
+
+bb1:
+ br bb3
+
+bb2:
+ br bb3
+
+bb3:
+ destroy_addr %1 : $*Builtin.NativeObject
+ dealloc_stack %1 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// We should not promote this due to this being an assign to %2.
+// CHECK-LABEL: sil @simple_diamond_with_assign : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_diamond_with_assign'
+sil @simple_diamond_with_assign : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to %2 : $*Builtin.NativeObject
+ cond_br undef, bb1, bb2
+
+bb1:
+ copy_addr [take] %1 to %2 : $*Builtin.NativeObject
+ br bb3
+
+bb2:
+ br bb3
+
+bb3:
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// Today PMO can not handle different available values coming from different
+// BBs. With time it can be taught to do that if necessary. That being said,
+// this test shows that we /tried/ and failed with the available value test
+// instead of failing earlier due to the copy_addr being an assign since we
+// explode the copy_addr.
+// CHECK-LABEL: sil @simple_diamond_with_assign_remove : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK-NOT: copy_addr
+// CHECK: } // end sil function 'simple_diamond_with_assign_remove'
+sil @simple_diamond_with_assign_remove : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to %2 : $*Builtin.NativeObject
+ cond_br undef, bb1, bb2
+
+bb1:
+ destroy_addr %2 : $*Builtin.NativeObject
+ copy_addr [take] %1 to [initialization] %2 : $*Builtin.NativeObject
+ br bb3
+
+bb2:
+ br bb3
+
+bb3:
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
diff --git a/test/SILOptimizer/predictable_deadalloc_elim_ownership.sil b/test/SILOptimizer/predictable_deadalloc_elim_ownership.sil
new file mode 100644
index 0000000..65d9ffb
--- /dev/null
+++ b/test/SILOptimizer/predictable_deadalloc_elim_ownership.sil
@@ -0,0 +1,416 @@
+// RUN: %target-sil-opt -enable-sil-verify-all -enable-sil-ownership %s -predictable-deadalloc-elim | %FileCheck %s
+
+sil_stage canonical
+
+import Swift
+import Builtin
+
+//////////////////
+// Declarations //
+//////////////////
+
+class Klass {}
+struct KlassWithKlassTuple {
+ var first: Klass
+ var second: (Klass, Klass)
+ var third: Klass
+}
+
+///////////
+// Tests //
+///////////
+
+// CHECK-LABEL: sil [ossa] @simple_trivial_stack : $@convention(thin) (Builtin.Int32) -> () {
+// CHECK-NOT: alloc_stack
+// CHECK: } // end sil function 'simple_trivial_stack'
+sil [ossa] @simple_trivial_stack : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32):
+ %1 = alloc_stack $Builtin.Int32
+ store %0 to [trivial] %1 : $*Builtin.Int32
+ dealloc_stack %1 : $*Builtin.Int32
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil [ossa] @simple_trivial_init_box : $@convention(thin) (Builtin.Int32) -> () {
+// CHECK-NOT: alloc_box
+// CHECK: } // end sil function 'simple_trivial_init_box'
+sil [ossa] @simple_trivial_init_box : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32):
+ %1 = alloc_box ${ var Builtin.Int32 }
+ %2 = project_box %1 : ${ var Builtin.Int32 }, 0
+ store %0 to [trivial] %2 : $*Builtin.Int32
+ destroy_value %1 : ${ var Builtin.Int32 }
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil [ossa] @simple_trivial_uninit_box : $@convention(thin) (Builtin.Int32) -> () {
+// CHECK-NOT: alloc_box
+// CHECK: } // end sil function 'simple_trivial_uninit_box'
+sil [ossa] @simple_trivial_uninit_box : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32):
+ %1 = alloc_box ${ var Builtin.Int32 }
+ %2 = project_box %1 : ${ var Builtin.Int32 }, 0
+ store %0 to [trivial] %2 : $*Builtin.Int32
+ dealloc_box %1 : ${ var Builtin.Int32 }
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil [ossa] @simple_nontrivial_stack : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NEXT: destroy_value [[ARG]]
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
+// CHECK: } // end sil function 'simple_nontrivial_stack'
+sil [ossa] @simple_nontrivial_stack : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject):
+ %1 = alloc_stack $Builtin.NativeObject
+ store %0 to [init] %1 : $*Builtin.NativeObject
+ destroy_addr %1 : $*Builtin.NativeObject
+ dealloc_stack %1 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// We do not handle this today, since we do not understand that we need to treat
+// the destroy_value of the alloc_box as a destroy_addr of the entire value.
+//
+// FIXME: We should be able to handle this.
+//
+// CHECK-LABEL: sil [ossa] @simple_nontrivial_init_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: alloc_box
+// CHECK: } // end sil function 'simple_nontrivial_init_box'
+sil [ossa] @simple_nontrivial_init_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject):
+ %1 = alloc_box ${ var Builtin.NativeObject }
+ %2 = project_box %1 : ${ var Builtin.NativeObject }, 0
+ store %0 to [init] %2 : $*Builtin.NativeObject
+ destroy_value %1 : ${ var Builtin.NativeObject }
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil [ossa] @simple_nontrivial_uninit_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NEXT: destroy_value [[ARG]]
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
+// CHECK: } // end sil function 'simple_nontrivial_uninit_box'
+sil [ossa] @simple_nontrivial_uninit_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject):
+ %1 = alloc_box ${ var Builtin.NativeObject }
+ %2 = project_box %1 : ${ var Builtin.NativeObject }, 0
+ store %0 to [init] %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_box %1 : ${ var Builtin.NativeObject }
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+//////////////////
+// Assign Tests //
+//////////////////
+
+// Make sure that we do eliminate this allocation
+// CHECK-LABEL: sil [ossa] @simple_assign_take_trivial : $@convention(thin) (Builtin.Int32, @in Builtin.Int32) -> () {
+// CHECK-NOT: alloc_stack
+// CHECK: } // end sil function 'simple_assign_take_trivial'
+sil [ossa] @simple_assign_take_trivial : $@convention(thin) (Builtin.Int32, @in Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32, %1 : $*Builtin.Int32):
+ %2 = alloc_stack $Builtin.Int32
+ store %0 to [trivial] %2 : $*Builtin.Int32
+ copy_addr [take] %1 to %2 : $*Builtin.Int32
+ dealloc_stack %2 : $*Builtin.Int32
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// In this case, we perform an init, copy. Since we do not want to lose the +1
+// on the argument, we do not eliminate this (even though with time perhaps we
+// could).
+// CHECK-LABEL: sil [ossa] @simple_init_copy : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_init_copy'
+sil [ossa] @simple_init_copy : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to [init] %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ copy_addr %1 to [initialization] %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// This we can promote successfully.
+// CHECK-LABEL: sil [ossa] @simple_init_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG0:%.*]] : @owned $Builtin.NativeObject, [[ARG1:%.*]] : $*Builtin.NativeObject):
+// CHECK-NOT: alloc_stack
+// CHECK: destroy_value [[ARG0]]
+// CHECK: [[ARG1_LOADED:%.*]] = load [take] [[ARG1]]
+// CHECK: destroy_value [[ARG1_LOADED]]
+// CHECK: } // end sil function 'simple_init_take'
+sil [ossa] @simple_init_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to [init] %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ copy_addr [take] %1 to [initialization] %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// Since we are copying the input argument, we can not get rid of the copy_addr,
+// meaning we shouldn't eliminate the allocation here.
+// CHECK-LABEL: sil [ossa] @simple_assign_no_take : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_assign_no_take'
+sil [ossa] @simple_assign_no_take : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to [init] %2 : $*Builtin.NativeObject
+ copy_addr %1 to %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// If PMO understood how to promote assigns, we should be able to handle this
+// case.
+// CHECK-LABEL: sil [ossa] @simple_assign_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_assign_take'
+sil [ossa] @simple_assign_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to [init] %2 : $*Builtin.NativeObject
+ copy_addr [take] %1 to %2 : $*Builtin.NativeObject
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil [ossa] @simple_diamond_without_assign : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NOT: alloc_stack
+// CHECK-NOT: store
+// CHECK: bb3:
+// CHECK-NEXT: destroy_value [[ARG]]
+// CHECK: } // end sil function 'simple_diamond_without_assign'
+sil [ossa] @simple_diamond_without_assign : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject):
+ %1 = alloc_stack $Builtin.NativeObject
+ store %0 to [init] %1 : $*Builtin.NativeObject
+ cond_br undef, bb1, bb2
+
+bb1:
+ br bb3
+
+bb2:
+ br bb3
+
+bb3:
+ destroy_addr %1 : $*Builtin.NativeObject
+ dealloc_stack %1 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// We should not promote this due to this being an assign to %2.
+// CHECK-LABEL: sil [ossa] @simple_diamond_with_assign : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_diamond_with_assign'
+sil [ossa] @simple_diamond_with_assign : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to [init] %2 : $*Builtin.NativeObject
+ cond_br undef, bb1, bb2
+
+bb1:
+ copy_addr [take] %1 to %2 : $*Builtin.NativeObject
+ br bb3
+
+bb2:
+ br bb3
+
+bb3:
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// Today PMO can not handle different available values coming from different
+// BBs. With time it can be taught to do that if necessary. That being said,
+// this test shows that we /tried/ and failed with the available value test
+// instead of failing earlier due to the copy_addr being an assign since we
+// explode the copy_addr.
+// CHECK-LABEL: sil [ossa] @simple_diamond_with_assign_remove : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK-NOT: copy_addr
+// CHECK: } // end sil function 'simple_diamond_with_assign_remove'
+sil [ossa] @simple_diamond_with_assign_remove : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+ %2 = alloc_stack $Builtin.NativeObject
+ store %0 to [init] %2 : $*Builtin.NativeObject
+ cond_br undef, bb1, bb2
+
+bb1:
+ destroy_addr %2 : $*Builtin.NativeObject
+ copy_addr [take] %1 to [initialization] %2 : $*Builtin.NativeObject
+ br bb3
+
+bb2:
+ br bb3
+
+bb3:
+ destroy_addr %2 : $*Builtin.NativeObject
+ dealloc_stack %2 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// Make sure that we can handle structs, tuples that are not fully available
+// themselves, but whose components are fully available.
+// CHECK-LABEL: sil [ossa] @multiple_inits_1 : $@convention(thin) (@owned Klass, @owned Klass, @owned Klass, @owned Klass) -> () {
+// CHECK: bb0([[ARG0:%.*]] : @owned $Klass, [[ARG1:%.*]] : @owned $Klass, [[ARG2:%.*]] : @owned $Klass, [[ARG3:%.*]] : @owned $Klass):
+// CHECK: [[TUP:%.*]] = tuple ([[ARG1]] : $Klass, [[ARG2]] : $Klass)
+// CHECK: [[STRUCT:%.*]] = struct $KlassWithKlassTuple ([[ARG0]] : $Klass, [[TUP]] : $(Klass, Klass), [[ARG3]] : $Klass)
+// CHECK: destroy_value [[STRUCT]]
+// CHECK: } // end sil function 'multiple_inits_1'
+sil [ossa] @multiple_inits_1 : $@convention(thin) (@owned Klass, @owned Klass, @owned Klass, @owned Klass) -> () {
+bb0(%0 : @owned $Klass, %1 : @owned $Klass, %2 : @owned $Klass, %3 : @owned $Klass):
+ %stack = alloc_stack $KlassWithKlassTuple
+ %stack0 = struct_element_addr %stack : $*KlassWithKlassTuple, #KlassWithKlassTuple.first
+ %stack1 = struct_element_addr %stack : $*KlassWithKlassTuple, #KlassWithKlassTuple.second
+ %stack10 = tuple_element_addr %stack1 : $*(Klass, Klass), 0
+ %stack11 = tuple_element_addr %stack1 : $*(Klass, Klass), 1
+ %stack2 = struct_element_addr %stack : $*KlassWithKlassTuple, #KlassWithKlassTuple.third
+
+ store %0 to [init] %stack0 : $*Klass
+ store %1 to [init] %stack10 : $*Klass
+ store %2 to [init] %stack11 : $*Klass
+ store %3 to [init] %stack2 : $*Klass
+
+ destroy_addr %stack : $*KlassWithKlassTuple
+ dealloc_stack %stack : $*KlassWithKlassTuple
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// CHECK-LABEL: sil [ossa] @multiple_inits_2 : $@convention(thin) (@owned Klass, @owned (Klass, Klass), @owned Klass) -> () {
+// CHECK: bb0([[ARG0:%.*]] : @owned $Klass, [[ARG1:%.*]] : @owned $(Klass, Klass), [[ARG2:%.*]] : @owned $Klass):
+// CHECK: ([[LHS:%.*]], [[RHS:%.*]]) = destructure_tuple [[ARG1]]
+// CHECK: [[TUP:%.*]] = tuple ([[LHS]] : $Klass, [[RHS]] : $Klass)
+// CHECK: [[STRUCT:%.*]] = struct $KlassWithKlassTuple ([[ARG0]] : $Klass, [[TUP]] : $(Klass, Klass), [[ARG2]] : $Klass)
+// CHECK: destroy_value [[STRUCT]]
+// CHECK: } // end sil function 'multiple_inits_2'
+sil [ossa] @multiple_inits_2 : $@convention(thin) (@owned Klass, @owned (Klass, Klass), @owned Klass) -> () {
+bb0(%0 : @owned $Klass, %1 : @owned $(Klass, Klass), %2 : @owned $Klass):
+ %stack = alloc_stack $KlassWithKlassTuple
+ %stack0 = struct_element_addr %stack : $*KlassWithKlassTuple, #KlassWithKlassTuple.first
+ %stack1 = struct_element_addr %stack : $*KlassWithKlassTuple, #KlassWithKlassTuple.second
+ %stack2 = struct_element_addr %stack : $*KlassWithKlassTuple, #KlassWithKlassTuple.third
+
+ store %0 to [init] %stack0 : $*Klass
+ store %1 to [init] %stack1 : $*(Klass, Klass)
+ store %2 to [init] %stack2 : $*Klass
+
+ destroy_addr %stack : $*KlassWithKlassTuple
+ dealloc_stack %stack : $*KlassWithKlassTuple
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// We can not promote this since we have destroy_addr that are not fully
+// available. This would mean that we would need to split up the store which is
+// unsupported today.
+//
+// CHECK-LABEL: sil [ossa] @destroy_addr_not_fully_available : $@convention(thin) (@owned KlassWithKlassTuple) -> () {
+// CHECK: alloc_stack
+// CHECK: } // end sil function 'destroy_addr_not_fully_available'
+sil [ossa] @destroy_addr_not_fully_available : $@convention(thin) (@owned KlassWithKlassTuple) -> () {
+bb0(%0 : @owned $KlassWithKlassTuple):
+ %stack = alloc_stack $KlassWithKlassTuple
+ store %0 to [init] %stack : $*KlassWithKlassTuple
+ %stack0 = struct_element_addr %stack : $*KlassWithKlassTuple, #KlassWithKlassTuple.first
+ %stack1 = struct_element_addr %stack : $*KlassWithKlassTuple, #KlassWithKlassTuple.second
+ %stack2 = struct_element_addr %stack : $*KlassWithKlassTuple, #KlassWithKlassTuple.third
+
+ destroy_addr %stack0 : $*Klass
+ destroy_addr %stack1 : $*(Klass, Klass)
+ destroy_addr %stack2 : $*Klass
+ dealloc_stack %stack : $*KlassWithKlassTuple
+ %9999 = tuple()
+ return %9999 : $()
+
+}
+
+struct NativeObjectPair {
+ var f1: Builtin.NativeObject
+ var f2: Builtin.NativeObject
+}
+
+struct NativeObjectTriple {
+ var f1: Builtin.NativeObject
+ var f2: NativeObjectPair
+}
+
+// diamond_test_4 from predictable_memopt.sil after running through
+// predictable-memaccess-opt. We should be able to eliminate %2.
+// CHECK-LABEL: sil [ossa] @diamond_test_4 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair) -> () {
+// CHECK-NOT: alloc_stack
+// CHECK: } // end sil function 'diamond_test_4'
+sil [ossa] @diamond_test_4 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair) -> () {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $NativeObjectPair):
+ %2 = alloc_stack $NativeObjectTriple
+ cond_br undef, bb1, bb2
+
+bb1:
+ %4 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f1
+ %5 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ store %0 to [init] %4 : $*Builtin.NativeObject
+ store %1 to [init] %5 : $*NativeObjectPair
+ br bb3
+
+bb2:
+ %13 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f1
+ %14 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ store %0 to [init] %13 : $*Builtin.NativeObject
+ store %1 to [init] %14 : $*NativeObjectPair
+ br bb3
+
+bb3:
+ destroy_addr %2 : $*NativeObjectTriple
+ dealloc_stack %2 : $*NativeObjectTriple
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// We should do nothing here since we do not have a fully available value.
+//
+// CHECK-LABEL: sil [ossa] @promote_partial_store_assign : $@convention(thin) (@owned NativeObjectPair, @owned Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: } // end sil function 'promote_partial_store_assign'
+sil [ossa] @promote_partial_store_assign : $@convention(thin) (@owned NativeObjectPair, @owned Builtin.NativeObject) -> () {
+bb0(%0 : @owned $NativeObjectPair, %1 : @owned $Builtin.NativeObject):
+ %2 = alloc_stack $NativeObjectPair
+ store %0 to [init] %2 : $*NativeObjectPair
+ %3 = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.f1
+ store %1 to [assign] %3 : $*Builtin.NativeObject
+ destroy_addr %2 : $*NativeObjectPair
+ dealloc_stack %2 : $*NativeObjectPair
+ %9999 = tuple()
+ return %9999 : $()
+}
diff --git a/test/SILOptimizer/predictable_memaccess_opts.sil b/test/SILOptimizer/predictable_memaccess_opts.sil
new file mode 100644
index 0000000..f46420a
--- /dev/null
+++ b/test/SILOptimizer/predictable_memaccess_opts.sil
@@ -0,0 +1,531 @@
+// RUN: %target-sil-opt -enable-sil-verify-all -enable-sil-ownership -predictable-memaccess-opts %s | %FileCheck %s
+
+sil_stage raw
+
+import Builtin
+import Swift
+
+//////////////////
+// Declarations //
+//////////////////
+
+class Klass {}
+
+struct NativeObjectPair {
+ var x: Klass
+ var y: Klass
+}
+
+/// Needed to avoid tuple scalarization code in the use gatherer.
+struct KlassAndTuple {
+ var first: Klass
+ var second: (Klass, Klass)
+}
+
+///////////
+// Tests //
+///////////
+
+//===---
+// Fully Available Leaf Node Values
+//
+
+// CHECK-LABEL: sil [ossa] @simple_trivial_load_promotion : $@convention(thin) (Builtin.Int32) -> Builtin.Int32 {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK: return [[ARG]]
+// CHECK: } // end sil function 'simple_trivial_load_promotion'
+sil [ossa] @simple_trivial_load_promotion : $@convention(thin) (Builtin.Int32) -> Builtin.Int32 {
+bb0(%0 : $Builtin.Int32):
+ %1 = alloc_stack $Builtin.Int32
+ store %0 to [trivial] %1 : $*Builtin.Int32
+ %2 = load [trivial] %1 : $*Builtin.Int32
+ dealloc_stack %1 : $*Builtin.Int32
+ return %2 : $Builtin.Int32
+}
+
+// CHECK-LABEL: sil [ossa] @simple_nontrivial_load_promotion : $@convention(thin) (@owned Klass) -> @owned Klass {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK: [[STACK:%.*]] = alloc_stack $Klass
+// CHECK: [[ARG_COPY:%.*]] = copy_value [[ARG]]
+// CHECK: store [[ARG]] to [init] [[STACK]]
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[ARG_COPY]]
+// CHECK: } // end sil function 'simple_nontrivial_load_promotion'
+sil [ossa] @simple_nontrivial_load_promotion : $@convention(thin) (@owned Klass) -> @owned Klass {
+bb0(%0 : @owned $Klass):
+ %1 = alloc_stack $Klass
+ store %0 to [init] %1 : $*Klass
+ %2 = load [copy] %1 : $*Klass
+ destroy_addr %1 : $*Klass
+ dealloc_stack %1 : $*Klass
+ return %2 : $Klass
+}
+
+// CHECK-LABEL: sil [ossa] @struct_nontrivial_load_promotion : $@convention(thin) (@owned Klass, @owned Klass) -> @owned NativeObjectPair {
+// CHECK: bb0([[ARG1:%.*]] : @owned $Klass, [[ARG2:%.*]] : @owned $Klass):
+// CHECK: [[STACK:%.*]] = alloc_stack $NativeObjectPair
+// CHECK: [[FIRST_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: [[ARG2_COPY:%.*]] = copy_value [[ARG2]]
+// CHECK: store [[ARG2]] to [init] [[SECOND_ADDR]]
+// CHECK: [[RESULT:%.*]] = struct $NativeObjectPair ([[ARG1_COPY:%.*]] : $Klass, [[ARG2_COPY:%.*]] : $Klass)
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'struct_nontrivial_load_promotion'
+sil [ossa] @struct_nontrivial_load_promotion : $@convention(thin) (@owned Klass, @owned Klass) -> @owned NativeObjectPair {
+bb0(%0 : @owned $Klass, %1 : @owned $Klass):
+ %2 = alloc_stack $NativeObjectPair
+ %3 = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ %4 = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.y
+ store %0 to [init] %3 : $*Klass
+ store %1 to [init] %4 : $*Klass
+ %5 = load [copy] %2 : $*NativeObjectPair
+ destroy_addr %2 : $*NativeObjectPair
+ dealloc_stack %2 : $*NativeObjectPair
+ return %5 : $NativeObjectPair
+}
+
+// CHECK-LABEL: sil [ossa] @tuple_nontrivial_load_promotion : $@convention(thin) (@owned Klass, @owned Klass) -> @owned (Klass, Klass) {
+// CHECK: bb0([[ARG1:%.*]] : @owned $Klass, [[ARG2:%.*]] : @owned $Klass):
+// CHECK: [[STACK:%.*]] = alloc_stack $(Klass, Klass)
+// CHECK: [[FIRST_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: [[ARG2_COPY:%.*]] = copy_value [[ARG2]]
+// CHECK: store [[ARG2]] to [init] [[SECOND_ADDR]]
+// CHECK: [[RESULT:%.*]] = tuple ([[ARG1_COPY:%.*]] : $Klass, [[ARG2_COPY:%.*]] : $Klass)
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'tuple_nontrivial_load_promotion'
+sil [ossa] @tuple_nontrivial_load_promotion : $@convention(thin) (@owned Klass, @owned Klass) -> @owned (Klass, Klass) {
+bb0(%0 : @owned $Klass, %1 : @owned $Klass):
+ %2 = alloc_stack $(Klass, Klass)
+ %3 = tuple_element_addr %2 : $*(Klass, Klass), 0
+ %4 = tuple_element_addr %2 : $*(Klass, Klass), 1
+ store %0 to [init] %3 : $*Klass
+ store %1 to [init] %4 : $*Klass
+ %5 = load [copy] %2 : $*(Klass, Klass)
+ destroy_addr %2 : $*(Klass, Klass)
+ dealloc_stack %2 : $*(Klass, Klass)
+ return %5 : $(Klass, Klass)
+}
+
+// CHECK-LABEL: sil [ossa] @simple_nontrivial_load_promotion_multi_insertpt : $@convention(thin) (@owned Klass) -> @owned Klass {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK: [[STACK:%.*]] = alloc_stack $Klass
+// CHECK: cond_br undef, bb1, bb2
+//
+// CHECK: bb1:
+// CHECK: [[ARG_COPY:%.*]] = copy_value [[ARG]]
+// CHECK: store [[ARG]] to [init] [[STACK]]
+// CHECK: br bb3([[ARG_COPY]] :
+//
+// CHECK: bb2:
+// CHECK: [[ARG_COPY:%.*]] = copy_value [[ARG]]
+// CHECK: store [[ARG]] to [init] [[STACK]]
+// CHECK: br bb3([[ARG_COPY]] :
+//
+// CHECK: bb3([[RESULT:%.*]] : @owned $Klass):
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'simple_nontrivial_load_promotion_multi_insertpt'
+sil [ossa] @simple_nontrivial_load_promotion_multi_insertpt : $@convention(thin) (@owned Klass) -> @owned Klass {
+bb0(%0 : @owned $Klass):
+ %1 = alloc_stack $Klass
+ cond_br undef, bb1, bb2
+
+bb1:
+ store %0 to [init] %1 : $*Klass
+ br bb3
+
+bb2:
+ store %0 to [init] %1 : $*Klass
+ br bb3
+
+bb3:
+ %2 = load [copy] %1 : $*Klass
+ destroy_addr %1 : $*Klass
+ dealloc_stack %1 : $*Klass
+ return %2 : $Klass
+}
+
+// CHECK-LABEL: sil [ossa] @struct_nontrivial_load_promotion_multi_insertpt : $@convention(thin) (@owned Klass, @owned Klass) -> @owned NativeObjectPair {
+// CHECK: bb0([[ARG1:%.*]] : @owned $Klass, [[ARG2:%.*]] : @owned $Klass):
+// CHECK: [[STACK:%.*]] = alloc_stack $NativeObjectPair
+// CHECK: cond_br undef, bb1, bb2
+//
+// CHECK: bb1:
+// CHECK: [[FIRST_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: [[ARG2_COPY:%.*]] = copy_value [[ARG2]]
+// CHECK: store [[ARG2]] to [init] [[SECOND_ADDR]]
+// CHECK: br bb3([[ARG1_COPY]] : $Klass, [[ARG2_COPY]] : $Klass)
+//
+// CHECK: bb2:
+// CHECK: [[FIRST_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: [[ARG2_COPY:%.*]] = copy_value [[ARG2]]
+// CHECK: store [[ARG2]] to [init] [[SECOND_ADDR]]
+// CHECK: br bb3([[ARG1_COPY]] : $Klass, [[ARG2_COPY]] : $Klass)
+//
+// CHECK: bb3([[ARG1_COPY:%.*]] : @owned $Klass, [[ARG2_COPY:%.*]] : @owned $Klass):
+// CHECK: [[RESULT:%.*]] = struct $NativeObjectPair ([[ARG1_COPY:%.*]] : $Klass, [[ARG2_COPY:%.*]] : $Klass)
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'struct_nontrivial_load_promotion_multi_insertpt'
+sil [ossa] @struct_nontrivial_load_promotion_multi_insertpt : $@convention(thin) (@owned Klass, @owned Klass) -> @owned NativeObjectPair {
+bb0(%0 : @owned $Klass, %1 : @owned $Klass):
+ %2 = alloc_stack $NativeObjectPair
+ cond_br undef, bb1, bb2
+
+bb1:
+ %3a = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ %4a = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.y
+ store %0 to [init] %3a : $*Klass
+ store %1 to [init] %4a : $*Klass
+ br bb3
+
+bb2:
+ %3b = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ %4b = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.y
+ store %0 to [init] %3b : $*Klass
+ store %1 to [init] %4b : $*Klass
+ br bb3
+
+bb3:
+ %5 = load [copy] %2 : $*NativeObjectPair
+ destroy_addr %2 : $*NativeObjectPair
+ dealloc_stack %2 : $*NativeObjectPair
+ return %5 : $NativeObjectPair
+}
+
+// CHECK-LABEL: sil [ossa] @tuple_nontrivial_load_promotion_multi_insertpt : $@convention(thin) (@owned Klass, @owned Klass) -> @owned (Klass, Klass) {
+// CHECK: bb0([[ARG1:%.*]] : @owned $Klass, [[ARG2:%.*]] : @owned $Klass):
+// CHECK: [[STACK:%.*]] = alloc_stack $(Klass, Klass)
+// CHECK: cond_br undef, bb1, bb2
+//
+// CHECK: bb1:
+// CHECK: [[FIRST_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: [[ARG2_COPY:%.*]] = copy_value [[ARG2]]
+// CHECK: store [[ARG2]] to [init] [[SECOND_ADDR]]
+// CHECK: br bb3([[ARG1_COPY]] : $Klass, [[ARG2_COPY]] : $Klass)
+//
+// CHECK: bb2:
+// CHECK: [[FIRST_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: [[ARG2_COPY:%.*]] = copy_value [[ARG2]]
+// CHECK: store [[ARG2]] to [init] [[SECOND_ADDR]]
+// CHECK: br bb3([[ARG1_COPY]] : $Klass, [[ARG2_COPY]] : $Klass)
+//
+// CHECK: bb3([[ARG1_COPY:%.*]] : @owned $Klass, [[ARG2_COPY:%.*]] : @owned $Klass):
+// CHECK: [[RESULT:%.*]] = tuple ([[ARG1_COPY:%.*]] : $Klass, [[ARG2_COPY:%.*]] : $Klass)
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'tuple_nontrivial_load_promotion_multi_insertpt'
+sil [ossa] @tuple_nontrivial_load_promotion_multi_insertpt : $@convention(thin) (@owned Klass, @owned Klass) -> @owned (Klass, Klass) {
+bb0(%0 : @owned $Klass, %1 : @owned $Klass):
+ %2 = alloc_stack $(Klass, Klass)
+ cond_br undef, bb1, bb2
+
+bb1:
+ %3a = tuple_element_addr %2 : $*(Klass, Klass), 0
+ %4a = tuple_element_addr %2 : $*(Klass, Klass), 1
+ store %0 to [init] %3a : $*Klass
+ store %1 to [init] %4a : $*Klass
+ br bb3
+
+bb2:
+ %3b = tuple_element_addr %2 : $*(Klass, Klass), 0
+ %4b = tuple_element_addr %2 : $*(Klass, Klass), 1
+ store %0 to [init] %3b : $*Klass
+ store %1 to [init] %4b : $*Klass
+ br bb3
+
+bb3:
+ %5 = load [copy] %2 : $*(Klass, Klass)
+ destroy_addr %2 : $*(Klass, Klass)
+ dealloc_stack %2 : $*(Klass, Klass)
+ return %5 : $(Klass, Klass)
+}
+
+//===---
+// Value Not Fully Available
+//
+
+// CHECK-LABEL: sil [ossa] @struct_nontrivial_load_promotion_multi_insertpt_value_not_fully_available : $@convention(thin) (@owned Klass, @owned Klass, @owned Klass) -> @owned NativeObjectPair {
+// CHECK: bb0([[ARG1:%.*]] : @owned $Klass, [[ARG2:%.*]] : @owned $Klass, [[ARG3:%.*]] : @owned $Klass):
+// CHECK: [[STACK:%.*]] = alloc_stack $NativeObjectPair
+// CHECK: cond_br undef, bb1, bb2
+//
+// CHECK: bb1:
+// CHECK: [[FIRST_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: store [[ARG2]] to [init] [[SECOND_ADDR]]
+// CHECK: destroy_value [[ARG3]]
+// CHECK: br bb3([[ARG1_COPY]] : $Klass)
+//
+// CHECK: bb2:
+// CHECK: [[FIRST_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: destroy_value [[ARG2]]
+// CHECK: store [[ARG3]] to [init] [[SECOND_ADDR]]
+// CHECK: br bb3([[ARG1_COPY]] : $Klass)
+//
+// CHECK: bb3([[ARG1_COPY:%.*]] : @owned $Klass):
+// CHECK: [[SECOND_ADDR:%.*]] = struct_element_addr [[STACK]]
+// CHECK: [[SECOND_VAL_COPY:%.*]] = load [copy] [[SECOND_ADDR]]
+// CHECK: [[RESULT:%.*]] = struct $NativeObjectPair ([[ARG1_COPY:%.*]] : $Klass, [[SECOND_VAL_COPY]] : $Klass)
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'struct_nontrivial_load_promotion_multi_insertpt_value_not_fully_available'
+sil [ossa] @struct_nontrivial_load_promotion_multi_insertpt_value_not_fully_available : $@convention(thin) (@owned Klass, @owned Klass, @owned Klass) -> @owned NativeObjectPair {
+bb0(%0 : @owned $Klass, %1 : @owned $Klass, %arg2 : @owned $Klass):
+ %2 = alloc_stack $NativeObjectPair
+ cond_br undef, bb1, bb2
+
+bb1:
+ %3a = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ %4a = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.y
+ store %0 to [init] %3a : $*Klass
+ store %1 to [init] %4a : $*Klass
+ destroy_value %arg2 : $Klass
+ br bb3
+
+bb2:
+ %3b = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ %4b = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.y
+ store %0 to [init] %3b : $*Klass
+ destroy_value %1 : $Klass
+ store %arg2 to [init] %4b : $*Klass
+ br bb3
+
+bb3:
+ %5 = load [copy] %2 : $*NativeObjectPair
+ destroy_addr %2 : $*NativeObjectPair
+ dealloc_stack %2 : $*NativeObjectPair
+ return %5 : $NativeObjectPair
+}
+
+// CHECK-LABEL: sil [ossa] @tuple_nontrivial_load_promotion_multi_insertpt_value_not_fully_available : $@convention(thin) (@owned Klass, @owned Klass, @owned Klass) -> @owned (Klass, Klass) {
+// CHECK: bb0([[ARG1:%.*]] : @owned $Klass, [[ARG2:%.*]] : @owned $Klass, [[ARG3:%.*]] : @owned $Klass):
+// CHECK: [[STACK:%.*]] = alloc_stack $(Klass, Klass)
+// This is here b/c we scalarize loads in our use list. Really, PMO shouldn't scalarize.
+// CHECK: [[SCALARIZED_TUPLE_GEP:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: cond_br undef, bb1, bb2
+//
+// CHECK: bb1:
+// CHECK: [[FIRST_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: store [[ARG2]] to [init] [[SECOND_ADDR]]
+// CHECK: destroy_value [[ARG3]]
+// CHECK: br bb3([[ARG1_COPY]] : $Klass)
+//
+// CHECK: bb2:
+// CHECK: [[FIRST_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[SECOND_ADDR:%.*]] = tuple_element_addr [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [init] [[FIRST_ADDR]]
+// CHECK: destroy_value [[ARG2]]
+// CHECK: store [[ARG3]] to [init] [[SECOND_ADDR]]
+// CHECK: br bb3([[ARG1_COPY]] : $Klass)
+//
+// CHECK: bb3([[ARG1_COPY:%.*]] : @owned $Klass):
+// CHECK: [[SECOND_VAL_COPY:%.*]] = load [copy] [[SCALARIZED_TUPLE_GEP]]
+// CHECK: [[RESULT:%.*]] = tuple ([[ARG1_COPY:%.*]] : $Klass, [[SECOND_VAL_COPY]] : $Klass)
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'tuple_nontrivial_load_promotion_multi_insertpt_value_not_fully_available'
+sil [ossa] @tuple_nontrivial_load_promotion_multi_insertpt_value_not_fully_available : $@convention(thin) (@owned Klass, @owned Klass, @owned Klass) -> @owned (Klass, Klass) {
+bb0(%0 : @owned $Klass, %1 : @owned $Klass, %arg2 : @owned $Klass):
+ %2 = alloc_stack $(Klass, Klass)
+ cond_br undef, bb1, bb2
+
+bb1:
+ %3a = tuple_element_addr %2 : $*(Klass, Klass), 0
+ %4a = tuple_element_addr %2 : $*(Klass, Klass), 1
+ store %0 to [init] %3a : $*Klass
+ store %1 to [init] %4a : $*Klass
+ destroy_value %arg2 : $Klass
+ br bb3
+
+bb2:
+ %3b = tuple_element_addr %2 : $*(Klass, Klass), 0
+ %4b = tuple_element_addr %2 : $*(Klass, Klass), 1
+ store %0 to [init] %3b : $*Klass
+ destroy_value %1 : $Klass
+ store %arg2 to [init] %4b : $*Klass
+ br bb3
+
+bb3:
+ %5 = load [copy] %2 : $*(Klass, Klass)
+ destroy_addr %2 : $*(Klass, Klass)
+ dealloc_stack %2 : $*(Klass, Klass)
+ return %5 : $(Klass, Klass)
+}
+
+//===---
+// Tests For Partial Uses Of Available Value
+//
+
+// CHECK-LABEL: sil [ossa] @simple_partialstructuse_load_promotion : $@convention(thin) (@owned NativeObjectPair) -> @owned Klass {
+// CHECK: bb0([[ARG:%.*]] : @owned $NativeObjectPair):
+// CHECK: [[STACK:%.*]] = alloc_stack
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[BORROWED_ARG_FIELD:%.*]] = struct_extract [[BORROWED_ARG]]
+// CHECK: [[COPIED_ARG_FIELD:%.*]] = copy_value [[BORROWED_ARG_FIELD]]
+// CHECK: end_borrow [[BORROWED_ARG]]
+// CHECK: store [[ARG]] to [init] [[STACK]]
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[COPIED_ARG_FIELD]]
+// CHECK: } // end sil function 'simple_partialstructuse_load_promotion'
+sil [ossa] @simple_partialstructuse_load_promotion : $@convention(thin) (@owned NativeObjectPair) -> (@owned Klass) {
+bb0(%0 : @owned $NativeObjectPair):
+ %1 = alloc_stack $NativeObjectPair
+ store %0 to [init] %1 : $*NativeObjectPair
+ %2 = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x
+ %3 = load [copy] %2 : $*Klass
+ destroy_addr %1 : $*NativeObjectPair
+ dealloc_stack %1 : $*NativeObjectPair
+ return %3 : $Klass
+}
+
+// CHECK-LABEL: sil [ossa] @simple_partialtupleuse_load_promotion : $@convention(thin) (@owned KlassAndTuple) -> @owned Klass {
+// CHECK: bb0([[ARG:%.*]] : @owned $KlassAndTuple):
+// CHECK: [[STACK:%.*]] = alloc_stack
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[BORROWED_ARG_FIELD_1:%.*]] = struct_extract [[BORROWED_ARG]]
+// CHECK: [[BORROWED_ARG_FIELD_2:%.*]] = tuple_extract [[BORROWED_ARG_FIELD_1]]
+// CHECK: [[COPIED_ARG_FIELD:%.*]] = copy_value [[BORROWED_ARG_FIELD_2]]
+// CHECK: end_borrow [[BORROWED_ARG]]
+// CHECK: store [[ARG]] to [init] [[STACK]]
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[COPIED_ARG_FIELD]]
+// CHECK: } // end sil function 'simple_partialtupleuse_load_promotion'
+sil [ossa] @simple_partialtupleuse_load_promotion : $@convention(thin) (@owned KlassAndTuple) -> (@owned Klass) {
+bb0(%0 : @owned $KlassAndTuple):
+ %1 = alloc_stack $KlassAndTuple
+ store %0 to [init] %1 : $*KlassAndTuple
+ %2 = struct_element_addr %1 : $*KlassAndTuple, #KlassAndTuple.second
+ %3 = tuple_element_addr %2 : $*(Klass, Klass), 0
+ %4 = load [copy] %3 : $*Klass
+ destroy_addr %1 : $*KlassAndTuple
+ dealloc_stack %1 : $*KlassAndTuple
+ return %4 : $Klass
+}
+
+// CHECK-LABEL: sil [ossa] @simple_assignstore : $@convention(thin) (@owned Klass, @owned Klass) -> @owned Klass {
+// CHECK: bb0([[ARG0:%.*]] : @owned $Klass, [[ARG1:%.*]] : @owned $Klass):
+// CHECK: [[STACK:%.*]] = alloc_stack $Klass
+// CHECK: store [[ARG0]] to [init] [[STACK]]
+// CHECK: [[ARG1_COPY:%.*]] = copy_value [[ARG1]]
+// CHECK: store [[ARG1]] to [assign] [[STACK]]
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[ARG1_COPY]]
+// CHECK: } // end sil function 'simple_assignstore'
+sil [ossa] @simple_assignstore : $@convention(thin) (@owned Klass, @owned Klass) -> @owned Klass {
+bb0(%0 : @owned $Klass, %1 : @owned $Klass):
+ %2 = alloc_stack $Klass
+ store %0 to [init] %2 : $*Klass
+ store %1 to [assign] %2 : $*Klass
+ %3 = load [copy] %2 : $*Klass
+ destroy_addr %2 : $*Klass
+ dealloc_stack %2 : $*Klass
+ return %3 : $Klass
+}
+
+// CHECK-LABEL: sil [ossa] @diamond_test_2 : $@convention(thin) (@owned NativeObjectPair) -> @owned Klass {
+// CHECK: bb0([[ARG:%.*]] : @owned $NativeObjectPair):
+// CHECK: [[STACK:%.*]] = alloc_stack $NativeObjectPair
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[LHS1:%.*]] = struct_extract [[BORROWED_ARG]] : $NativeObjectPair, #NativeObjectPair.x
+// CHECK: [[LHS1_COPY:%.*]] = copy_value [[LHS1]]
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[LHS2:%.*]] = struct_extract [[BORROWED_ARG]] : $NativeObjectPair, #NativeObjectPair.x
+// CHECK: [[LHS2_COPY:%.*]] = copy_value [[LHS2]]
+// CHECK: store [[ARG]] to [init] [[STACK]]
+// CHECK: cond_br undef, bb1, bb2
+//
+// CHECK: bb1:
+// CHECK: destroy_value [[LHS1_COPY]]
+// CHECK: br bb3([[LHS2_COPY]] :
+//
+// CHECK: bb2:
+// CHECK: destroy_value [[LHS2_COPY]]
+// CHECK: br bb3([[LHS1_COPY]] :
+//
+// CHECK: bb3([[PHI:%.*]] :
+// CHECK: destroy_addr [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[PHI]]
+// CHECK: } // end sil function 'diamond_test_2'
+sil [ossa] @diamond_test_2 : $@convention(thin) (@owned NativeObjectPair) -> @owned Klass {
+bb0(%0 : @owned $NativeObjectPair):
+ %1 = alloc_stack $NativeObjectPair
+ store %0 to [init] %1 : $*NativeObjectPair
+ cond_br undef, bb1, bb2
+
+bb1:
+ %2 = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x
+ %3 = load [copy] %2 : $*Klass
+ br bb3(%3 : $Klass)
+
+bb2:
+ %4 = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x
+ %5 = load [copy] %4 : $*Klass
+ br bb3(%5 : $Klass)
+
+bb3(%6 : @owned $Klass):
+ destroy_addr %1 : $*NativeObjectPair
+ dealloc_stack %1 : $*NativeObjectPair
+ return %6 : $Klass
+}
+
+////////////////////
+// Negative Tests //
+////////////////////
+
+// CHECK-LABEL: sil [ossa] @simple_nontrivial_loadtake_no_promote : $@convention(thin) (@owned Klass) -> @owned Klass {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK: [[STACK:%.*]] = alloc_stack $Klass
+// CHECK: store [[ARG]] to [init] [[STACK]]
+// CHECK: [[RESULT:%.*]] = load [take] [[STACK]]
+// CHECK: dealloc_stack [[STACK]]
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'simple_nontrivial_loadtake_no_promote'
+sil [ossa] @simple_nontrivial_loadtake_no_promote : $@convention(thin) (@owned Klass) -> @owned Klass {
+bb0(%0 : @owned $Klass):
+ %1 = alloc_stack $Klass
+ store %0 to [init] %1 : $*Klass
+ %2 = load [take] %1 : $*Klass
+ dealloc_stack %1 : $*Klass
+ return %2 : $Klass
+}
diff --git a/test/SILOptimizer/predictable_memopt_ownership.sil b/test/SILOptimizer/predictable_memopt_ownership.sil
new file mode 100644
index 0000000..e2509a6
--- /dev/null
+++ b/test/SILOptimizer/predictable_memopt_ownership.sil
@@ -0,0 +1,989 @@
+// RUN: %target-sil-opt -enable-sil-ownership -enable-sil-verify-all %s -predictable-memaccess-opts -predictable-deadalloc-elim | %FileCheck %s
+
+sil_stage raw
+
+import Swift
+import Builtin
+
+//////////////////
+// Declarations //
+//////////////////
+
+struct NativeObjectPair {
+ var x: Builtin.NativeObject
+ var y: Builtin.NativeObject
+}
+
+struct ContainsNativeObject {
+ var x : Builtin.NativeObject
+ var y : Int32
+ var z : Builtin.NativeObject
+}
+
+struct ComplexStruct {
+ var f1 : Builtin.NativeObject
+ var f2 : ContainsNativeObject
+ var f3 : Builtin.Int32
+}
+
+sil @inout_builtinobject_user : $@convention(thin) (@inout Builtin.NativeObject) -> ()
+sil @get_builtin_object : $@convention(thin) () -> @owned Builtin.NativeObject
+
+///////////
+// Tests //
+///////////
+
+// CHECK-LABEL: sil [ossa] @simple_reg_promotion
+// CHECK: bb0(%0 : $Int):
+// CHECK-NEXT: return %0 : $Int
+sil [ossa] @simple_reg_promotion : $@convention(thin) (Int) -> Int {
+bb0(%0 : $Int):
+ %1 = alloc_box $<τ_0_0> { var τ_0_0 } <Int>
+ %1a = project_box %1 : $<τ_0_0> { var τ_0_0 } <Int>, 0
+ store %0 to [trivial] %1a : $*Int
+ %3 = alloc_box $<τ_0_0> { var τ_0_0 } <Int>
+ %3a = project_box %3 : $<τ_0_0> { var τ_0_0 } <Int>, 0
+ %4 = load [trivial] %1a : $*Int
+ store %4 to [trivial] %3a : $*Int
+ %6 = load [trivial] %3a : $*Int
+ destroy_value %3 : $<τ_0_0> { var τ_0_0 } <Int>
+ destroy_value %1 : $<τ_0_0> { var τ_0_0 } <Int>
+ return %6 : $Int
+}
+
+// Verify that promotion has promoted the tuple load away, and we know that
+// %0 is being returned through scalar instructions in SSA form.
+//
+// CHECK-LABEL: sil [ossa] @tuple_reg_promotion
+// CHECK: bb0(%0 : $Int):
+// CHECK-NEXT: [[TUPLE:%[0-9]+]] = tuple ({{.*}} : $Int, {{.*}} : $Int)
+// CHECK-NEXT: [[TUPLE_ELT:%[0-9]+]] = tuple_extract [[TUPLE]] : $(Int, Int), 0
+// CHECK-NEXT: return [[TUPLE_ELT]] : $Int
+sil [ossa] @tuple_reg_promotion : $@convention(thin) (Int) -> Int {
+bb0(%0 : $Int):
+ %1 = alloc_box $<τ_0_0> { var τ_0_0 } <(Int, Int)>
+ %1a = project_box %1 : $<τ_0_0> { var τ_0_0 } <(Int, Int)>, 0
+ %a = tuple_element_addr %1a : $*(Int, Int), 0
+ %b = tuple_element_addr %1a : $*(Int, Int), 1
+ store %0 to [trivial] %a : $*Int
+ store %0 to [trivial] %b : $*Int
+ %c = load [trivial] %1a : $*(Int, Int)
+ %d = tuple_extract %c : $(Int, Int), 0
+ destroy_value %1 : $<τ_0_0> { var τ_0_0 } <(Int, Int)>
+ return %d : $Int
+}
+
+sil @takes_Int_inout : $@convention(thin) (@inout Int) -> ()
+sil @takes_NativeObject_inout : $@convention(thin) (@inout Builtin.NativeObject) -> ()
+
+// Verify that load promotion works properly with inout arguments.
+//
+// func used_by_inout(a : Int) -> (Int, Int) {
+// var t = a
+// takes_Int_inout(&a)
+// return (t, a)
+//}
+//
+// CHECK-LABEL: sil [ossa] @used_by_inout : $@convention(thin) (Int) -> (Int, Int) {
+// CHECK: bb0([[ARG:%.*]] : $Int):
+sil [ossa] @used_by_inout : $@convention(thin) (Int) -> (Int, Int) {
+bb0(%0 : $Int):
+ // This alloc_stack can't be removed since it is used by an inout call.
+ // CHECK: [[BOX:%.*]] = alloc_box $<τ_0_0> { var τ_0_0 } <Int>
+ // CHECK: [[PB_BOX:%.*]] = project_box [[BOX]]
+ %1 = alloc_box $<τ_0_0> { var τ_0_0 } <Int>
+ %1a = project_box %1 : $<τ_0_0> { var τ_0_0 } <Int>, 0
+ store %0 to [trivial] %1a : $*Int
+
+ // This load should be eliminated.
+ // CHECK-NOT: load
+ // CHECK: [[FUNC:%.*]] = function_ref @takes_Int_inout : $@convention(thin) (@inout Int) -> ()
+ // CHECK: apply [[FUNC]]([[PB_BOX]])
+ %3 = load [trivial] %1a : $*Int
+ %5 = function_ref @takes_Int_inout : $@convention(thin) (@inout Int) -> ()
+ %6 = apply %5(%1a) : $@convention(thin) (@inout Int) -> ()
+
+ // This load is needed in case the callee modifies the allocation.
+ // CHECK: [[RES:%[0-9]+]] = load [trivial] [[PB_BOX]]
+ %7 = load [trivial] %1a : $*Int
+
+ // This should use the incoming argument to the function.
+ // CHECK: tuple ([[ARG]] : $Int, [[RES]] : $Int)
+ %8 = tuple (%3 : $Int, %7 : $Int)
+ destroy_value %1 : $<τ_0_0> { var τ_0_0 } <Int>
+ return %8 : $(Int, Int)
+}
+
+
+struct AddressOnlyStruct {
+ var a : Any
+ var b : Int
+}
+
+/// returns_generic_struct - This returns a struct by reference.
+sil @returns_generic_struct : $@convention(thin) () -> @out AddressOnlyStruct
+
+sil @takes_closure : $@convention(thin) (@owned @callee_owned () -> ()) -> ()
+sil @closure0 : $@convention(thin) (@owned <τ_0_0> { var τ_0_0 } <Int>) -> ()
+
+
+// CHECK-LABEL: sil [ossa] @closure_test2
+sil [ossa] @closure_test2 : $@convention(thin) (Int) -> Int {
+bb0(%1 : $Int):
+ %0 = alloc_box $<τ_0_0> { var τ_0_0 } <Int>
+ %0a = project_box %0 : $<τ_0_0> { var τ_0_0 } <Int>, 0
+ store %1 to [trivial] %0a : $*Int // CHECK: store
+
+ %5 = function_ref @takes_closure : $@convention(thin) (@owned @callee_owned () -> ()) -> ()
+ %6 = function_ref @closure0 : $@convention(thin) (@owned <τ_0_0> { var τ_0_0 } <Int>) -> ()
+ %0Copy = copy_value %0 : $<τ_0_0> { var τ_0_0 } <Int>
+ %8 = partial_apply %6(%0Copy) : $@convention(thin) (@owned <τ_0_0> { var τ_0_0 } <Int>) -> ()
+ %9 = apply %5(%8) : $@convention(thin) (@owned @callee_owned () -> ()) -> ()
+ destroy_value %0 : $<τ_0_0> { var τ_0_0 } <Int>
+
+ store %1 to [trivial] %0a : $*Int // CHECK: store
+
+ // In an escape region, we should not promote loads.
+ %r = load [trivial] %0a : $*Int // CHECK: load
+ return %r : $Int
+}
+
+class SomeClass {}
+
+sil @getSomeClass : $@convention(thin) () -> @owned SomeClass
+
+
+// CHECK-LABEL: sil [ossa] @assign_test_trivial
+//
+// Verify that the load got forwarded from an assign.
+// CHECK: return %0 : $Int
+sil [ossa] @assign_test_trivial : $@convention(thin) (Int) -> Int {
+bb0(%0 : $Int):
+ %1 = alloc_box $<τ_0_0> { var τ_0_0 } <Int>
+ %1a = project_box %1 : $<τ_0_0> { var τ_0_0 } <Int>, 0
+
+ store %0 to [trivial] %1a : $*Int
+ store %0 to [trivial] %1a : $*Int
+ store %0 to [trivial] %1a : $*Int
+
+ %2 = load [trivial] %1a : $*Int
+ destroy_value %1 : $<τ_0_0> { var τ_0_0 } <Int>
+ return %2 : $Int
+}
+
+// CHECK-LABEL: sil [ossa] @multiple_level_extract_1 : $@convention(thin) (@owned ContainsNativeObject) -> Builtin.Int32 {
+// CHECK: bb0([[ARG:%.*]] : @owned $ContainsNativeObject):
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[FIELD1:%.*]] = struct_extract [[BORROWED_ARG]] : $ContainsNativeObject, #ContainsNativeObject.y
+// CHECK: [[FIELD2:%.*]] = struct_extract [[FIELD1]] : $Int32, #Int32._value
+// CHECK: end_borrow [[BORROWED_ARG]]
+// CHECK: destroy_value [[ARG]]
+// CHECK: return [[FIELD2]]
+// CHECK: } // end sil function 'multiple_level_extract_1'
+sil [ossa] @multiple_level_extract_1 : $@convention(thin) (@owned ContainsNativeObject) -> Builtin.Int32 {
+bb0(%0 : @owned $ContainsNativeObject):
+ %1 = alloc_stack $ContainsNativeObject
+ store %0 to [init] %1 : $*ContainsNativeObject
+
+ %2 = struct_element_addr %1 : $*ContainsNativeObject, #ContainsNativeObject.y
+ %3 = struct_element_addr %2 : $*Int32, #Int32._value
+ %4 = load [trivial] %3 : $*Builtin.Int32
+
+ destroy_addr %1 : $*ContainsNativeObject
+ dealloc_stack %1 : $*ContainsNativeObject
+ return %4 : $Builtin.Int32
+}
+
+// CHECK-LABEL: sil [ossa] @multiple_level_extract_2 : $@convention(thin) (@owned ComplexStruct) -> (@owned Builtin.NativeObject, @owned Builtin.NativeObject, Builtin.Int32) {
+// CHECK: bb0([[ARG:%.*]] : @owned $ComplexStruct):
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[f1:%.*]] = struct_extract [[BORROWED_ARG]] : $ComplexStruct, #ComplexStruct.f3
+// CHECK: end_borrow [[BORROWED_ARG]]
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[f2:%.*]] = struct_extract [[BORROWED_ARG]] : $ComplexStruct, #ComplexStruct.f2
+// CHECK: [[f2_x:%.*]] = struct_extract [[f2]] : $ContainsNativeObject, #ContainsNativeObject.x
+// CHECK: [[f2_x_copy:%.*]] = copy_value [[f2_x]]
+// CHECK: end_borrow [[BORROWED_ARG]]
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[f3:%.*]] = struct_extract [[BORROWED_ARG]] : $ComplexStruct, #ComplexStruct.f1
+// CHECK: [[f3_copy:%.*]] = copy_value [[f3]]
+// CHECK: end_borrow [[BORROWED_ARG]]
+// CHECK: destroy_value [[ARG]]
+// CHECK: [[RESULT:%.*]] = tuple ([[f3_copy]] : $Builtin.NativeObject, [[f2_x_copy]] : $Builtin.NativeObject, [[f1]] : $Builtin.Int32)
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'multiple_level_extract_2'
+sil [ossa] @multiple_level_extract_2 : $@convention(thin) (@owned ComplexStruct) -> (@owned Builtin.NativeObject, @owned Builtin.NativeObject, Builtin.Int32) {
+bb0(%0 : @owned $ComplexStruct):
+ %1 = alloc_stack $ComplexStruct
+ store %0 to [init] %1 : $*ComplexStruct
+
+ %2 = struct_element_addr %1 : $*ComplexStruct, #ComplexStruct.f1
+ %3 = struct_element_addr %1 : $*ComplexStruct, #ComplexStruct.f2
+ %4 = struct_element_addr %3 : $*ContainsNativeObject, #ContainsNativeObject.x
+ %5 = struct_element_addr %1 : $*ComplexStruct, #ComplexStruct.f3
+
+ %6 = load [copy] %2 : $*Builtin.NativeObject
+ %7 = load [copy] %4 : $*Builtin.NativeObject
+ %8 = load [trivial] %5 : $*Builtin.Int32
+
+ destroy_addr %1 : $*ComplexStruct
+ dealloc_stack %1 : $*ComplexStruct
+
+ %9 = tuple(%6 : $Builtin.NativeObject, %7 : $Builtin.NativeObject, %8 : $Builtin.Int32)
+ return %9 : $(Builtin.NativeObject, Builtin.NativeObject, Builtin.Int32)
+}
+
+var int_global : Int
+
+// CHECK-LABEL: sil [ossa] @promote_alloc_stack
+sil [ossa] @promote_alloc_stack : $@convention(thin) (Int32) -> Builtin.Int32 {
+bb0(%0 : $Int32):
+ %5 = integer_literal $Builtin.Int32, 1
+ // CHECK: [[IL:%[0-9]+]] = integer_literal
+
+ %18 = struct $Int32 (%5 : $Builtin.Int32)
+ %22 = alloc_stack $Int32
+
+ // CHECK-NOT: alloc_stack
+
+ store %18 to [trivial] %22 : $*Int32
+ %24 = struct_element_addr %22 : $*Int32, #Int32._value
+ %25 = load [trivial] %24 : $*Builtin.Int32
+ dealloc_stack %22 : $*Int32
+ // CHECK-NEXT: return [[IL]]
+ return %25 : $Builtin.Int32
+}
+
+// CHECK-LABEL: sil [ossa] @copy_addr_to_load
+// CHECK: bb0(%0 : $Int):
+// CHECK-NEXT: return %0
+sil [ossa] @copy_addr_to_load : $@convention(thin) (Int) -> Int {
+bb0(%0 : $Int):
+ %1 = alloc_stack $Int
+ store %0 to [trivial] %1 : $*Int
+ %2 = alloc_stack $Int
+
+ copy_addr %1 to [initialization] %2 : $*Int
+
+ %3 = load [trivial] %2 : $*Int
+
+ dealloc_stack %2 : $*Int
+ dealloc_stack %1 : $*Int
+ return %3 : $Int
+}
+
+// rdar://15170149
+// CHECK-LABEL: sil [ossa] @store_to_copyaddr
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NEXT: return [[ARG]]
+sil [ossa] @store_to_copyaddr : $(Bool) -> Bool {
+bb0(%0 : $Bool):
+ %1 = alloc_stack $Bool
+ store %0 to [trivial] %1 : $*Bool
+ %3 = alloc_stack $Bool
+ copy_addr %1 to [initialization] %3 : $*Bool
+ %5 = load [trivial] %3 : $*Bool
+ copy_addr %3 to %1 : $*Bool
+ %12 = load [trivial] %1 : $*Bool
+ dealloc_stack %3 : $*Bool
+ dealloc_stack %1 : $*Bool
+ return %12 : $Bool
+}
+
+// CHECK-LABEL: sil [ossa] @cross_block_load_promotion
+sil [ossa] @cross_block_load_promotion : $@convention(thin) (Int) -> Int {
+bb0(%0 : $Int):
+ %1 = alloc_stack $Int
+ store %0 to [trivial] %1 : $*Int
+ %11 = integer_literal $Builtin.Int1, 1
+ cond_br %11, bb1, bb2
+
+bb1:
+ br bb5
+
+bb2:
+ br bb5
+
+bb5:
+ %15 = load [trivial] %1 : $*Int
+ dealloc_stack %1 : $*Int
+ return %15 : $Int
+
+// CHECK: return %0 : $Int
+}
+
+struct XYStruct { var x, y : Int }
+
+sil @init_xy_struct : $@convention(thin) () -> XYStruct
+
+
+// CHECK-LABEL: sil [ossa] @cross_block_load_promotion_struct
+sil [ossa] @cross_block_load_promotion_struct : $@convention(thin) (Int, Int) -> Int {
+bb0(%0 : $Int, %1 : $Int):
+ %stack = alloc_stack $XYStruct
+
+ %7 = function_ref @init_xy_struct : $@convention(thin) () -> XYStruct
+ %9 = apply %7() : $@convention(thin) () -> XYStruct
+ store %9 to [trivial] %stack : $*XYStruct
+
+ %11 = struct_element_addr %stack : $*XYStruct, #XYStruct.y
+ store %0 to [trivial] %11 : $*Int
+
+ %12 = integer_literal $Builtin.Int1, 1
+ cond_br %12, bb1, bb2
+
+bb1:
+ %13 = struct_element_addr %stack : $*XYStruct, #XYStruct.x
+ store %1 to [trivial] %13 : $*Int
+ br bb3
+
+bb2:
+ br bb3
+
+bb3:
+ %15 = load [trivial] %11 : $*Int
+ dealloc_stack %stack : $*XYStruct
+ return %15 : $Int
+
+// CHECK: return %0 : $Int
+}
+
+// CHECK-LABEL: sil [ossa] @cross_block_load_promotion_struct2
+sil [ossa] @cross_block_load_promotion_struct2 : $@convention(thin) (Int, Int) -> Int {
+bb0(%0 : $Int, %2 : $Int):
+ %1 = alloc_stack $XYStruct
+
+ %7 = function_ref @init_xy_struct : $@convention(thin) () -> XYStruct
+ %9 = apply %7() : $@convention(thin) () -> XYStruct
+ store %9 to [trivial] %1 : $*XYStruct
+
+ %11 = struct_element_addr %1 : $*XYStruct, #XYStruct.x
+ store %0 to [trivial] %11 : $*Int
+
+ %12 = integer_literal $Builtin.Int1, 1
+ cond_br %12, bb1, bb2
+
+bb1:
+ %13 = struct_element_addr %1 : $*XYStruct, #XYStruct.x
+ store %0 to [trivial] %13 : $*Int
+ br bb5
+
+bb2:
+ br bb5
+
+bb5:
+ %15 = load [trivial] %11 : $*Int
+ dealloc_stack %1 : $*XYStruct
+ return %15 : $Int
+
+// CHECK: return %0 : $Int
+}
+
+
+// CHECK-LABEL: sil [ossa] @destroy_addr_test
+sil [ossa] @destroy_addr_test : $@convention(method) (@owned SomeClass) -> @owned SomeClass {
+bb0(%0 : @owned $SomeClass):
+ %1 = alloc_stack $SomeClass
+ %2 = tuple ()
+ store %0 to [init] %1 : $*SomeClass
+ %7 = load [copy] %1 : $*SomeClass
+ destroy_value %7 : $SomeClass
+ %12 = load [copy] %1 : $*SomeClass
+ destroy_addr %1 : $*SomeClass
+ dealloc_stack %1 : $*SomeClass
+ return %12 : $SomeClass
+}
+
+
+protocol P {}
+class C : P {}
+
+sil [ossa] @use : $@convention(thin) (@in P) -> ()
+
+// rdar://15492647
+// CHECK-LABEL: sil [ossa] @destroy_addr_removed
+sil [ossa] @destroy_addr_removed : $@convention(thin) () -> () {
+bb0:
+ %3 = alloc_stack $SomeClass
+ %f = function_ref @getSomeClass : $@convention(thin) () -> @owned SomeClass
+ %9 = apply %f() : $@convention(thin) () -> @owned SomeClass
+ // CHECK: [[CVAL:%[0-9]+]] = apply
+
+ store %9 to [init] %3 : $*SomeClass
+ destroy_addr %3 : $*SomeClass
+ dealloc_stack %3 : $*SomeClass
+ %15 = tuple ()
+ return %15 : $()
+// CHECK-NEXT: destroy_value [[CVAL]]
+}
+
+// <rdar://problem/17755462> Predictable memory opts removes refcount operation
+// CHECK-LABEL: sil [ossa] @dead_allocation_1
+sil [ossa] @dead_allocation_1 : $@convention(thin) (@owned Optional<AnyObject>) -> () {
+bb0(%0 : @owned $Optional<AnyObject>):
+// CHECK: copy_value %0
+ %1 = alloc_stack $Optional<AnyObject>
+ %2 = alloc_stack $Optional<AnyObject>
+ store %0 to [init] %2 : $*Optional<AnyObject>
+// CHECK-NOT: copy_addr
+ copy_addr %2 to [initialization] %1 : $*Optional<AnyObject>
+ destroy_addr %2 : $*Optional<AnyObject>
+ dealloc_stack %2 : $*Optional<AnyObject>
+ destroy_addr %1 : $*Optional<AnyObject>
+ dealloc_stack %1 : $*Optional<AnyObject>
+ %3 = tuple ()
+ return %3 : $()
+}
+
+// CHECK-LABEL: sil [ossa] @dead_allocation_2
+sil [ossa] @dead_allocation_2 : $@convention(thin) (@owned Optional<AnyObject>) -> () {
+bb0(%0 : @owned $Optional<AnyObject>):
+// CHECK: copy_value %0
+// CHECK-NOT: alloc_stack
+ %1 = alloc_stack $Optional<AnyObject>
+ %2 = alloc_stack $Optional<AnyObject>
+ store %0 to [init] %1 : $*Optional<AnyObject>
+// CHECK-NOT: copy_addr
+ copy_addr %1 to [initialization] %2 : $*Optional<AnyObject>
+ destroy_addr %2 : $*Optional<AnyObject>
+ dealloc_stack %2 : $*Optional<AnyObject>
+ destroy_addr %1 : $*Optional<AnyObject>
+ dealloc_stack %1 : $*Optional<AnyObject>
+ %3 = tuple ()
+ return %3 : $()
+}
+
+enum IndirectCase {
+ indirect case X(Int)
+}
+
+// CHECK-LABEL: sil [ossa] @indirect_enum_box
+sil [ossa] @indirect_enum_box : $@convention(thin) (Int) -> @owned IndirectCase {
+// CHECK: bb0([[X:%.*]] : $Int):
+entry(%x : $Int):
+ // CHECK: [[BOX:%.*]] = alloc_box ${ var Int }
+ %b = alloc_box ${ var Int }
+ // CHECK: [[PB:%.*]] = project_box [[BOX]]
+ %ba = project_box %b : ${ var Int }, 0
+ // CHECK: store [[X]] to [trivial] [[PB]]
+ store %x to [trivial] %ba : $*Int
+ // CHECK: [[E:%.*]] = enum $IndirectCase, #IndirectCase.X!enumelt.1, [[BOX]] : ${ var Int }
+ %e = enum $IndirectCase, #IndirectCase.X!enumelt.1, %b : ${ var Int }
+ // CHECK: return [[E]]
+ return %e : $IndirectCase
+}
+
+sil [ossa] @write_to_bool : $@convention(c) (UnsafeMutablePointer<Bool>) -> Int32
+
+// CHECK-LABEL: sil [ossa] @escaping_address
+sil [ossa] @escaping_address : $@convention(thin) () -> Bool {
+bb0:
+ // CHECK: [[A:%[0-9]+]] = alloc_stack
+ %a = alloc_stack $Bool
+ %f = function_ref @write_to_bool : $@convention(c) (UnsafeMutablePointer<Bool>) -> Int32
+ %a2p = address_to_pointer %a : $*Bool to $Builtin.RawPointer
+ %ump = struct $UnsafeMutablePointer<Bool> (%a2p : $Builtin.RawPointer)
+
+ %0 = integer_literal $Builtin.Int1, 0
+ %b0 = struct $Bool (%0 : $Builtin.Int1)
+ // CHECK: [[BV:%[0-9]+]] = struct_element_addr [[A]]
+ %bv = struct_element_addr %a : $*Bool, #Bool._value
+ store %b0 to [trivial] %a : $*Bool
+
+ // CHECK: apply
+ %ap = apply %f(%ump) : $@convention(c) (UnsafeMutablePointer<Bool>) -> Int32
+
+ // CHECK: [[L:%[0-9]+]] = load [trivial] [[BV]]
+ %l = load [trivial] %bv : $*Builtin.Int1
+ // CHECK: [[R:%[0-9]+]] = struct $Bool ([[L]]
+ %r = struct $Bool (%l : $Builtin.Int1)
+ dealloc_stack %a : $*Bool
+ // CHECK: return [[R]]
+ return %r : $Bool
+}
+
+///////////////////
+// Diamond Tests //
+///////////////////
+
+// These tests ensure that we insert all gep operations, before the stores and
+// any new load operations at the location where the old load was. It also
+// ensures that we are able to handle values that are provided with multilple
+// available values from different stores. Today the tests use the exact same
+// value since pred mem opts is so conservative (it will not support having
+// different available values from different blocks due to the predicate it uses
+// while merging).
+
+// We should just remove the stores here.
+// CHECK-LABEL: sil [ossa] @diamond_test_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK-NOT: alloc_stack
+// CHECK-NOT: store
+// CHECK-NOT: load
+// CHECK: } // end sil function 'diamond_test_1'
+sil [ossa] @diamond_test_1 : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject):
+ %1 = alloc_stack $Builtin.NativeObject
+ cond_br undef, bb1, bb2
+
+bb1:
+ store %0 to [init] %1 : $*Builtin.NativeObject
+ br bb3
+
+bb2:
+ store %0 to [init] %1 : $*Builtin.NativeObject
+ br bb3
+
+bb3:
+ %2 = load [copy] %1 : $*Builtin.NativeObject
+ destroy_value %2 : $Builtin.NativeObject
+ destroy_addr %1 : $*Builtin.NativeObject
+ dealloc_stack %1 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// This test makes sure that we insert the tuple_extracts that we need before
+// the store in bb0, not at the load block.
+// CHECK-LABEL: sil [ossa] @diamond_test_2 : $@convention(thin) (@owned NativeObjectPair) -> @owned Builtin.NativeObject {
+// CHECK: bb0([[ARG:%.*]] : @owned $NativeObjectPair):
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[LHS1:%.*]] = struct_extract [[BORROWED_ARG]] : $NativeObjectPair, #NativeObjectPair.x
+// CHECK: [[LHS1_COPY:%.*]] = copy_value [[LHS1]]
+// CHECK: [[BORROWED_ARG:%.*]] = begin_borrow [[ARG]]
+// CHECK: [[LHS2:%.*]] = struct_extract [[BORROWED_ARG]] : $NativeObjectPair, #NativeObjectPair.x
+// CHECK: [[LHS2_COPY:%.*]] = copy_value [[LHS2]]
+// CHECK: cond_br undef, bb1, bb2
+//
+// CHECK: bb1:
+// CHECK: destroy_value [[LHS1_COPY]]
+// CHECK: br bb3([[LHS2_COPY]] :
+//
+// CHECK: bb2:
+// CHECK: destroy_value [[LHS2_COPY]] : $Builtin.NativeObject
+// CHECK: br bb3([[LHS1_COPY]] :
+//
+// CHECK: bb3([[PHI:%.*]] :
+// CHECK: destroy_value [[ARG]]
+// CHECK: return [[PHI]]
+// CHECK: } // end sil function 'diamond_test_2'
+sil [ossa] @diamond_test_2 : $@convention(thin) (@owned NativeObjectPair) -> @owned Builtin.NativeObject {
+bb0(%0 : @owned $NativeObjectPair):
+ %1 = alloc_stack $NativeObjectPair
+ store %0 to [init] %1 : $*NativeObjectPair
+ cond_br undef, bb1, bb2
+
+bb1:
+ %2 = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x
+ %3 = load [copy] %2 : $*Builtin.NativeObject
+ br bb3(%3 : $Builtin.NativeObject)
+
+bb2:
+ %4 = struct_element_addr %1 : $*NativeObjectPair, #NativeObjectPair.x
+ %5 = load [copy] %4 : $*Builtin.NativeObject
+ br bb3(%5 : $Builtin.NativeObject)
+
+bb3(%6 : @owned $Builtin.NativeObject):
+ destroy_addr %1 : $*NativeObjectPair
+ dealloc_stack %1 : $*NativeObjectPair
+ return %6 : $Builtin.NativeObject
+}
+
+// We should be able to promote all memory operations here.
+//
+// CHECK-LABEL: sil [ossa] @diamond_test_3 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> @owned Builtin.NativeObject {
+// CHECK-NOT: alloc_stack
+// CHECK-NOT: load
+// CHECK-NOT: store
+// CHECK: } // end sil function 'diamond_test_3'
+sil [ossa] @diamond_test_3 : $@convention(thin) (@owned Builtin.NativeObject, @owned Builtin.NativeObject) -> @owned Builtin.NativeObject {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $Builtin.NativeObject):
+ %2 = alloc_stack $NativeObjectPair
+ %3 = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ %4 = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.y
+ store %0 to [init] %3 : $*Builtin.NativeObject
+ store %1 to [init] %4 : $*Builtin.NativeObject
+ cond_br undef, bb1, bb2
+
+bb1:
+ %tup_addr_1 = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ %tup_val_1 = load [copy] %tup_addr_1 : $*Builtin.NativeObject
+ br bb3(%tup_val_1 : $Builtin.NativeObject)
+
+bb2:
+ %tup_addr_2 = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ %tup_val_2 = load [copy] %tup_addr_2 : $*Builtin.NativeObject
+ br bb3(%tup_val_2 : $Builtin.NativeObject)
+
+bb3(%result : @owned $Builtin.NativeObject):
+ destroy_addr %2 : $*NativeObjectPair
+ dealloc_stack %2 : $*NativeObjectPair
+ return %result : $Builtin.NativeObject
+}
+
+struct NativeObjectTriple {
+ var f1: Builtin.NativeObject
+ var f2: NativeObjectPair
+}
+
+// Make sure we insert the struct_extracts in bb1, bb2.
+//
+// CHECK-LABEL: sil [ossa] @diamond_test_4 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair) -> @owned Builtin.NativeObject {
+// CHECK: bb0([[ARG0:%.*]] : @owned $Builtin.NativeObject, [[ARG1:%.*]] : @owned $NativeObjectPair):
+// CHECK: cond_br undef, bb1, bb2
+//
+// CHECK: bb1:
+// CHECK-NEXT: [[BORROWED_ARG1:%.*]] = begin_borrow [[ARG1]]
+// CHECK-NEXT: [[PAIR_LHS:%.*]] = struct_extract [[BORROWED_ARG1]]
+// CHECK-NEXT: [[PAIR_LHS_COPY:%.*]] = copy_value [[PAIR_LHS]]
+// CHECK-NEXT: end_borrow [[BORROWED_ARG1]]
+// CHECK-NEXT: br bb3([[PAIR_LHS_COPY]] :
+//
+// CHECK: bb2:
+// CHECK-NEXT: [[BORROWED_ARG1:%.*]] = begin_borrow [[ARG1]]
+// CHECK-NEXT: [[PAIR_LHS:%.*]] = struct_extract [[BORROWED_ARG1]]
+// CHECK-NEXT: [[PAIR_LHS_COPY:%.*]] = copy_value [[PAIR_LHS]]
+// CHECK-NEXT: end_borrow [[BORROWED_ARG1]]
+// CHECK-NEXT: br bb3([[PAIR_LHS_COPY]] :
+//
+// CHECK: bb3([[PHI:%.*]] : @owned $Builtin.NativeObject):
+// CHECK-NEXT: [[REFORMED:%.*]] = struct $NativeObjectTriple ([[ARG0]] : {{.*}}, [[ARG1]] : {{.*}})
+// CHECK-NEXT: destroy_value [[REFORMED]]
+// CHECK-NEXT: return [[PHI]]
+// CHECK: } // end sil function 'diamond_test_4'
+sil [ossa] @diamond_test_4 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair) -> @owned Builtin.NativeObject {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $NativeObjectPair):
+ %2 = alloc_stack $NativeObjectTriple
+ cond_br undef, bb1, bb2
+
+bb1:
+ %3 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f1
+ %4 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ store %0 to [init] %3 : $*Builtin.NativeObject
+ store %1 to [init] %4 : $*NativeObjectPair
+ br bb3
+
+bb2:
+ %5 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f1
+ %6 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ store %0 to [init] %5 : $*Builtin.NativeObject
+ store %1 to [init] %6 : $*NativeObjectPair
+ br bb3
+
+bb3:
+ %11 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ %12 = struct_element_addr %11 : $*NativeObjectPair, #NativeObjectPair.x
+ %13 = load [copy] %12 : $*Builtin.NativeObject
+ destroy_addr %2 : $*NativeObjectTriple
+ dealloc_stack %2 : $*NativeObjectTriple
+ return %13 : $Builtin.NativeObject
+}
+
+// Make sure that we do the right thing if our definite init value is partially
+// overridden along one path
+//
+// CHECK-LABEL: sil [ossa] @diamond_test_5 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair, @owned Builtin.NativeObject) -> @owned NativeObjectPair {
+// CHECK: bb0([[ARG0:%.*]] : @owned $Builtin.NativeObject, [[ARG1:%.*]] : @owned $NativeObjectPair, [[ARG2:%.*]] : @owned $Builtin.NativeObject):
+// CHECK: [[BOX:%.*]] = alloc_stack $NativeObjectTriple
+// CHECK: br bb1
+//
+// CHECK: bb1:
+// CHECK: [[TRIPLE_LHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f1
+// CHECK: [[TRIPLE_RHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f2
+// CHECK: store [[ARG0]] to [init] [[TRIPLE_LHS]]
+// CHECK: [[BORROWED_ARG1:%.*]] = begin_borrow [[ARG1]]
+// CHECK: [[BORROWED_TRIPLE_RHS_RHS_VAL:%.*]] = struct_extract [[BORROWED_ARG1]] : $NativeObjectPair, #NativeObjectPair.y
+// CHECK: [[TRIPLE_RHS_RHS_VAL:%.*]] = copy_value [[BORROWED_TRIPLE_RHS_RHS_VAL]]
+// CHECK: store [[ARG1]] to [init] [[TRIPLE_RHS]]
+// CHECK: cond_br undef, bb2, bb3
+//
+// CHECK: bb2:
+// CHECK: [[TRIPLE_RHS_LHS:%.*]] = struct_element_addr [[TRIPLE_RHS]]
+// CHECK: store [[ARG2]] to [assign] [[TRIPLE_RHS_LHS]]
+// CHECK: br bb4
+//
+// CHECK: bb3:
+// CHECK: br bb4
+//
+// CHECK: bb4:
+// CHECK: [[TRIPLE_RHS_LHS:%.*]] = struct_element_addr [[TRIPLE_RHS]] : $*NativeObjectPair, #NativeObjectPair.x
+// CHECK: [[TRIPLE_RHS_LHS_VAL:%.*]] = load [copy] [[TRIPLE_RHS_LHS]] : $*Builtin.NativeObject
+// CHECK: [[STRUCT:%.*]] = struct $NativeObjectPair ([[TRIPLE_RHS_LHS_VAL]] : {{.*}}, [[TRIPLE_RHS_RHS_VAL]] : {{.*}})
+// CHECK: destroy_addr [[BOX]]
+// CHECK: return [[STRUCT]]
+// CHECK: } // end sil function 'diamond_test_5'
+sil [ossa] @diamond_test_5 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair, @owned Builtin.NativeObject) -> @owned NativeObjectPair {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $NativeObjectPair, %arg2 : @owned $Builtin.NativeObject):
+ %2 = alloc_stack $NativeObjectTriple
+ br bb1
+
+bb1:
+ %5 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f1
+ %6 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ store %0 to [init] %5 : $*Builtin.NativeObject
+ store %1 to [init] %6 : $*NativeObjectPair
+ cond_br undef, bb2, bb3
+
+bb2:
+ %11 = struct_element_addr %6 : $*NativeObjectPair, #NativeObjectPair.x
+ store %arg2 to [assign] %11 : $*Builtin.NativeObject
+ br bb4
+
+bb3:
+ destroy_value %arg2 : $Builtin.NativeObject
+ br bb4
+
+bb4:
+ %13 = load [copy] %6 : $*NativeObjectPair
+ destroy_addr %2 : $*NativeObjectTriple
+ dealloc_stack %2 : $*NativeObjectTriple
+ return %13 : $NativeObjectPair
+}
+
+// CHECK-LABEL: sil [ossa] @diamond_test_6 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair, @owned Builtin.NativeObject) -> @owned NativeObjectPair {
+// CHECK: bb0([[ARG0:%.*]] : @owned $Builtin.NativeObject, [[ARG1:%.*]] : @owned $NativeObjectPair, [[ARG2:%.*]] : @owned $Builtin.NativeObject):
+// CHECK: [[BOX:%.*]] = alloc_stack $NativeObjectTriple
+// CHECK: cond_br undef, [[TRUE_BB:bb[0-9]+]], [[FALSE_BB:bb[0-9]+]]
+//
+// CHECK: [[TRUE_BB]]:
+// CHECK: [[TRIPLE_LHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f1
+// CHECK: [[TRIPLE_RHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f2
+// CHECK: store [[ARG0]] to [init] [[TRIPLE_LHS]]
+// CHECK: [[BORROWED_ARG1:%.*]] = begin_borrow [[ARG1]]
+// CHECK: [[BORROWED_TRIPLE_RHS_RHS_VAL:%.*]] = struct_extract [[BORROWED_ARG1]] : $NativeObjectPair, #NativeObjectPair.y
+// CHECK: [[TRIPLE_RHS_RHS_VAL:%.*]] = copy_value [[BORROWED_TRIPLE_RHS_RHS_VAL]]
+// CHECK: store [[ARG1]] to [init] [[TRIPLE_RHS]]
+// CHECK: cond_br undef, [[CRITEDGE_BREAK_BB_1:bb[0-9]+]], [[CRITEDGE_BREAK_BB_2:bb[0-9]+]]
+//
+// CHECK: [[CRITEDGE_BREAK_BB_1]]:
+// CHECK-NEXT: br [[SUCC_2:bb[0-9]+]]([[TRIPLE_RHS_RHS_VAL]] :
+//
+// CHECK: [[CRITEDGE_BREAK_BB_2]]:
+// CHECK-NEXT: br [[SUCC_1:bb[0-9]+]]([[TRIPLE_RHS_RHS_VAL]] :
+//
+// CHECK: [[FALSE_BB]]:
+// CHECK: [[TRIPLE_LHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f1
+// CHECK: [[TRIPLE_RHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f2
+// CHECK: store [[ARG0]] to [init] [[TRIPLE_LHS]]
+// CHECK: [[BORROWED_ARG1:%.*]] = begin_borrow [[ARG1]]
+// CHECK: [[BORROWED_TRIPLE_RHS_RHS_VAL:%.*]] = struct_extract [[BORROWED_ARG1]] : $NativeObjectPair, #NativeObjectPair.y
+// CHECK: [[TRIPLE_RHS_RHS_VAL:%.*]] = copy_value [[BORROWED_TRIPLE_RHS_RHS_VAL]]
+// CHECK: store [[ARG1]] to [init] [[TRIPLE_RHS]]
+// CHECK: cond_br undef, [[CRITEDGE_BREAK_BB_1:bb[0-9]+]], [[CRITEDGE_BREAK_BB_2:bb[0-9]+]]
+//
+// CHECK: [[CRITEDGE_BREAK_BB_1]]:
+// CHECK-NEXT: br [[SUCC_2]]([[TRIPLE_RHS_RHS_VAL]] :
+//
+// CHECK: [[CRITEDGE_BREAK_BB_2]]:
+// CHECK-NEXT: br [[SUCC_1]]([[TRIPLE_RHS_RHS_VAL]] :
+//
+// CHECK: [[SUCC_2]]([[PHI1:%.*]] : @owned $Builtin.NativeObject):
+// CHECK: [[TRIPLE_RHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f2
+// CHECK: [[TRIPLE_RHS_LHS:%.*]] = struct_element_addr [[TRIPLE_RHS]]
+// CHECK: store [[ARG2]] to [assign] [[TRIPLE_RHS_LHS]]
+// CHECK: br [[EXIT_BB:bb[0-9]+]]([[PHI1:%.*]] : $Builtin.NativeObject)
+//
+// CHECK: [[SUCC_1]]([[PHI:%.*]] : @owned $Builtin.NativeObject):
+// CHECK: br [[EXIT_BB]]([[PHI]] : {{.*}})
+//
+// CHECK: [[EXIT_BB]]([[PHI:%.*]] : @owned $Builtin.NativeObject):
+// CHECK: [[TRIPLE_RHS:%.*]] = struct_element_addr [[BOX]] : $*NativeObjectTriple, #NativeObjectTriple.f2
+// CHECK: [[TRIPLE_RHS_LHS:%.*]] = struct_element_addr [[TRIPLE_RHS]] : $*NativeObjectPair, #NativeObjectPair.x
+// CHECK: [[TRIPLE_RHS_LHS_VAL:%.*]] = load [copy] [[TRIPLE_RHS_LHS]] : $*Builtin.NativeObject
+// CHECK: [[STRUCT:%.*]] = struct $NativeObjectPair ([[TRIPLE_RHS_LHS_VAL]] : {{.*}}, [[PHI]] : {{.*}})
+// CHECK: destroy_addr [[BOX]]
+// CHECK: return [[STRUCT]]
+// CHECK: } // end sil function 'diamond_test_6'
+sil [ossa] @diamond_test_6 : $@convention(thin) (@owned Builtin.NativeObject, @owned NativeObjectPair, @owned Builtin.NativeObject) -> @owned NativeObjectPair {
+bb0(%0 : @owned $Builtin.NativeObject, %1 : @owned $NativeObjectPair, %arg2 : @owned $Builtin.NativeObject):
+ %2 = alloc_stack $NativeObjectTriple
+ cond_br undef, bb1, bb2
+
+bb1:
+ %5 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f1
+ %6 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ store %0 to [init] %5 : $*Builtin.NativeObject
+ store %1 to [init] %6 : $*NativeObjectPair
+ cond_br undef, bb3, bb4
+
+bb3:
+ br bb7
+
+bb4:
+ br bb8
+
+bb2:
+ %7 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f1
+ %8 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ store %0 to [init] %7 : $*Builtin.NativeObject
+ store %1 to [init] %8 : $*NativeObjectPair
+ cond_br undef, bb5, bb6
+
+bb5:
+ br bb7
+
+bb6:
+ br bb8
+
+bb7:
+ %11 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ %12 = struct_element_addr %11 : $*NativeObjectPair, #NativeObjectPair.x
+ store %arg2 to [assign] %12 : $*Builtin.NativeObject
+ br bb9
+
+bb8:
+ destroy_value %arg2 : $Builtin.NativeObject
+ br bb9
+
+bb9:
+ %13 = struct_element_addr %2 : $*NativeObjectTriple, #NativeObjectTriple.f2
+ %14 = load [copy] %13 : $*NativeObjectPair
+ destroy_addr %2 : $*NativeObjectTriple
+ dealloc_stack %2 : $*NativeObjectTriple
+ return %14 : $NativeObjectPair
+}
+
+///////////////////////
+// Unreachable Tests //
+///////////////////////
+
+// Make sure that we can handle a dead allocation with a destroy_addr in an
+// unreachable block.
+//
+// TODO: We can support this with trivial changes to canPromoteDestroyAddr. We
+// just need to distinguish a promotion failure around lack of availability vs
+// promotion failure for other reasons.
+//
+//
+// CHECK-LABEL: sil [ossa] @dead_allocation_with_unreachable_destroy_addr : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] : @owned $Builtin.NativeObject):
+// CHECK-NEXT: alloc_stack
+// CHECK-NEXT: store
+// CHECK-NEXT: br bb1
+//
+// CHECK: bb1:
+// CHECK-NEXT: destroy_addr
+// CHECK-NEXT: dealloc_stack
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
+//
+// CHECK: bb2:
+// CHECK-NEXT: destroy_addr
+// CHECK-NEXT: unreachable
+// CHECK: } // end sil function 'dead_allocation_with_unreachable_destroy_addr'
+sil [ossa] @dead_allocation_with_unreachable_destroy_addr : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : @owned $Builtin.NativeObject):
+ %1 = alloc_stack $Builtin.NativeObject
+ store %0 to [init] %1 : $*Builtin.NativeObject
+ br bb1
+
+bb1:
+ destroy_addr %1 : $*Builtin.NativeObject
+ dealloc_stack %1 : $*Builtin.NativeObject
+ %9999 = tuple()
+ return %9999 : $()
+
+bb2:
+ destroy_addr %1 : $*Builtin.NativeObject
+ unreachable
+}
+
+
+class K {
+ init()
+}
+
+sil [ossa] @init_k : $@convention(thin) () -> @out K
+
+struct S {
+ var k: K
+}
+
+// CHECK-LABEL: sil [ossa] @recursive_struct_destroy_with_apply : $@convention(thin) () -> @owned S {
+// CHECK: alloc_stack
+// CHECK: } // end sil function 'recursive_struct_destroy_with_apply'
+sil [ossa] @recursive_struct_destroy_with_apply : $@convention(thin) () -> @owned S {
+bb0:
+ %0 = alloc_stack $S
+ %1 = struct_element_addr %0 : $*S, #S.k
+ %2 = function_ref @init_k : $@convention(thin) () -> @out K
+ %3 = apply %2(%1) : $@convention(thin) () -> @out K
+ %4 = load [take] %0 : $*S
+ dealloc_stack %0 : $*S
+ return %4 : $S
+}
+
+struct SWithOpt {
+ var k: Optional<K>
+}
+
+// CHECK-LABEL: sil [ossa] @recursive_struct_destroy_with_enum_init : $@convention(thin) (@owned K) -> @owned SWithOpt {
+// CHECK: alloc_stack
+// CHECK: } // end sil function 'recursive_struct_destroy_with_enum_init'
+sil [ossa] @recursive_struct_destroy_with_enum_init : $@convention(thin) (@owned K) -> @owned SWithOpt {
+bb0(%arg : @owned $K):
+ %0 = alloc_stack $SWithOpt
+ %1 = struct_element_addr %0 : $*SWithOpt, #SWithOpt.k
+ %2 = init_enum_data_addr %1 : $*Optional<K>, #Optional.some!enumelt.1
+ store %arg to [init] %2 : $*K
+ inject_enum_addr %1 : $*Optional<K>, #Optional.some!enumelt.1
+ %4 = load [take] %0 : $*SWithOpt
+ dealloc_stack %0 : $*SWithOpt
+ return %4 : $SWithOpt
+}
+
+// We do not support this now, so make sure we do not do anything.
+//
+// CHECK-LABEL: sil [ossa] @promote_init_enum_data_addr : $@convention(thin)
+// CHECK: alloc_stack
+// CHECK: load
+// CHECK: [[RESULT:%.*]] = load
+// CHECK: return [[RESULT]]
+// CHECK: } // end sil function 'promote_init_enum_data_addr'
+sil [ossa] @promote_init_enum_data_addr : $@convention(thin) (@in Int) -> Int {
+bb0(%0 : $*Int):
+ %1 = alloc_stack $Optional<Int>
+ %2 = load [trivial] %0 : $*Int
+ %3 = init_enum_data_addr %1 : $*Optional<Int>, #Optional.some!enumelt.1
+ store %2 to [trivial] %3 : $*Int
+ inject_enum_addr %1 : $*Optional<Int>, #Optional.some!enumelt.1
+ %4 = load [trivial] %3 : $*Int
+ dealloc_stack %1 : $*Optional<Int>
+ return %4 : $Int
+}
+
+// We should do nothing here since we do not have a fully available value.
+//
+// CHECK-LABEL: sil [ossa] @promote_partial_store_assign : $@convention(thin) (@owned NativeObjectPair, @owned Builtin.NativeObject) -> () {
+sil [ossa] @promote_partial_store_assign : $@convention(thin) (@owned NativeObjectPair, @owned Builtin.NativeObject) -> () {
+bb0(%0 : @owned $NativeObjectPair, %1 : @owned $Builtin.NativeObject):
+ %2 = alloc_stack $NativeObjectPair
+ store %0 to [init] %2 : $*NativeObjectPair
+ %3 = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ store %1 to [assign] %3 : $*Builtin.NativeObject
+ destroy_addr %2 : $*NativeObjectPair
+ dealloc_stack %2 : $*NativeObjectPair
+ %9999 = tuple()
+ return %9999 : $()
+}
+
+// We shouldn't promote this as well since we do not support this load [take]
+// version of store [assign]. With time, we could.
+//
+// CHECK-LABEL: sil [ossa] @promote_partial_store_split_assign : $@convention(thin) (@owned NativeObjectPair, @owned Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: load [take]
+// CHECK: } // end sil function 'promote_partial_store_split_assign'
+sil [ossa] @promote_partial_store_split_assign : $@convention(thin) (@owned NativeObjectPair, @owned Builtin.NativeObject) -> () {
+bb0(%0 : @owned $NativeObjectPair, %1 : @owned $Builtin.NativeObject):
+ %2 = alloc_stack $NativeObjectPair
+ store %0 to [init] %2 : $*NativeObjectPair
+ %3 = struct_element_addr %2 : $*NativeObjectPair, #NativeObjectPair.x
+ // Take the old, init the new, destroy the old.
+ %4 = load [take] %3 : $*Builtin.NativeObject
+ store %1 to [init] %3 : $*Builtin.NativeObject
+ destroy_value %4 : $Builtin.NativeObject
+ destroy_addr %2 : $*NativeObjectPair
+ dealloc_stack %2 : $*NativeObjectPair
+ %9999 = tuple()
+ return %9999 : $()
+}
diff --git a/test/SourceKit/CompileNotifications/type-context-info.swift b/test/SourceKit/CompileNotifications/type-context-info.swift
new file mode 100644
index 0000000..66ba8dc
--- /dev/null
+++ b/test/SourceKit/CompileNotifications/type-context-info.swift
@@ -0,0 +1,12 @@
+// RUN: %sourcekitd-test -req=track-compiles == -req=typecontextinfo %s -offset=0 -- %s | %FileCheck %s -check-prefix=COMPILE_1
+// COMPILE_1: {
+// COMPILE_1: key.notification: source.notification.compile-will-start,
+// COMPILE_1: key.filepath: "SOURCE_DIR{{.*}}type-context-info.swift",
+// COMPILE_1: key.compileid: [[CID1:".*"]]
+// COMPILE_1: }
+// COMPILE_1: {
+// COMPILE_1: key.notification: source.notification.compile-did-finish,
+// COMPILE_1: key.compileid: [[CID1]]
+// COMPILE_1: }
+// COMPILE_1-NOT: compile-will-start
+// COMPILE_1-NOT: compile-did-finish
diff --git a/test/api-digester/Outputs/Cake-abi.txt b/test/api-digester/Outputs/Cake-abi.txt
index 3814e72..1aaa9f2 100644
--- a/test/api-digester/Outputs/Cake-abi.txt
+++ b/test/api-digester/Outputs/Cake-abi.txt
@@ -62,8 +62,8 @@
cake1: Var fixedLayoutStruct2.BecomeFixedBinaryOrder is now a stored property
cake1: Var fixedLayoutStruct2.NoLongerWithFixedBinaryOrder is no longer a stored property
cake2: EnumElement FrozenKind.AddedCase is added to a non-resilient type
+cake2: Var fixedLayoutStruct.$__lazy_storage_$_lazy_d is added to a non-resilient type
cake2: Var fixedLayoutStruct.c is added to a non-resilient type
-cake2: Var fixedLayoutStruct.lazy_d.storage is added to a non-resilient type
/* Conformance changes */
cake1: Func ObjCProtocol.addOptional() is now an optional requirement
diff --git a/test/attr/attr_autoclosure.swift b/test/attr/attr_autoclosure.swift
index 4ab1bf1..e843355 100644
--- a/test/attr/attr_autoclosure.swift
+++ b/test/attr/attr_autoclosure.swift
@@ -234,3 +234,14 @@
arr.lazy.filter { $0 >= escapableF() }.isEmpty
}
}
+
+// SR-2688
+class Foo {
+ typealias FooClosure = () -> String
+ func fooFunction(closure: @autoclosure FooClosure) {} // ok
+}
+
+class Bar {
+ typealias BarClosure = (String) -> String
+ func barFunction(closure: @autoclosure BarClosure) {} // expected-error {{argument type of @autoclosure parameter must be '()'}}
+}
diff --git a/test/attr/attr_objc.swift b/test/attr/attr_objc.swift
index ebfe523..5bbccc1 100644
--- a/test/attr/attr_objc.swift
+++ b/test/attr/attr_objc.swift
@@ -323,7 +323,6 @@
func dynamicSelf1() -> Self { return self }
@objc func dynamicSelf1_() -> Self { return self }
- // expected-error@-1{{method cannot be marked @objc because its result type cannot be represented in Objective-C}}
@objc func genericParams<T: NSObject>() -> [T] { return [] }
// expected-error@-1{{method cannot be marked @objc because it has generic parameters}}
diff --git a/tools/SourceKit/lib/SwiftLang/SwiftTypeContextInfo.cpp b/tools/SourceKit/lib/SwiftLang/SwiftTypeContextInfo.cpp
index fb8afbc..e62d277 100644
--- a/tools/SourceKit/lib/SwiftLang/SwiftTypeContextInfo.cpp
+++ b/tools/SourceKit/lib/SwiftLang/SwiftTypeContextInfo.cpp
@@ -12,6 +12,7 @@
#include "SwiftASTManager.h"
#include "SwiftLangSupport.h"
+#include "SwiftEditorDiagConsumer.h"
#include "swift/Frontend/Frontend.h"
#include "swift/Frontend/PrintingDiagnosticConsumer.h"
#include "swift/IDE/TypeContextInfo.h"
@@ -54,6 +55,7 @@
auto bufferIdentifier =
Lang.resolvePathSymlinks(UnresolvedInputFile->getBufferIdentifier());
+ auto origOffset = Offset;
auto newBuffer = makeCodeCompletionMemoryBuffer(UnresolvedInputFile, Offset,
bufferIdentifier);
@@ -61,6 +63,22 @@
PrintingDiagnosticConsumer PrintDiags;
CI.addDiagnosticConsumer(&PrintDiags);
+ EditorDiagConsumer TraceDiags;
+ trace::TracedOperation TracedOp(trace::OperationKind::CodeCompletion);
+ if (TracedOp.enabled()) {
+ CI.addDiagnosticConsumer(&TraceDiags);
+ trace::SwiftInvocation SwiftArgs;
+ trace::initTraceInfo(SwiftArgs, bufferIdentifier, Args);
+ TracedOp.setDiagnosticProvider(
+ [&TraceDiags](SmallVectorImpl<DiagnosticEntryInfo> &diags) {
+ TraceDiags.getAllDiagnostics(diags);
+ });
+ TracedOp.start(
+ SwiftArgs,
+ {std::make_pair("OriginalOffset", std::to_string(origOffset)),
+ std::make_pair("Offset", std::to_string(Offset))});
+ }
+
CompilerInvocation Invocation;
bool Failed = Lang.getASTManager()->initCompilerInvocation(
Invocation, Args, CI.getDiags(), bufferIdentifier, Error);
diff --git a/unittests/Parse/LexerTests.cpp b/unittests/Parse/LexerTests.cpp
index 4233b89..a1428e4 100644
--- a/unittests/Parse/LexerTests.cpp
+++ b/unittests/Parse/LexerTests.cpp
@@ -29,7 +29,7 @@
std::vector<Token> tokenizeAndKeepEOF(unsigned BufferID) {
Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr,
- /*InSILMode=*/false);
+ LexerMode::Swift);
std::vector<Token> Tokens;
do {
Tokens.emplace_back();
@@ -131,7 +131,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -148,7 +148,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -165,7 +165,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -182,7 +182,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -205,7 +205,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -228,7 +228,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -244,7 +244,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -262,7 +262,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -280,7 +280,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::None,
TriviaRetentionMode::WithoutTrivia);
@@ -312,7 +312,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::ReturnAsTokens,
TriviaRetentionMode::WithoutTrivia);
@@ -371,7 +371,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::AttachToNextToken,
TriviaRetentionMode::WithoutTrivia);
@@ -403,7 +403,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::None,
TriviaRetentionMode::WithTrivia);
@@ -445,7 +445,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::AttachToNextToken,
TriviaRetentionMode::WithTrivia);
@@ -487,7 +487,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source, 14));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -534,7 +534,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source, 16));
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -582,7 +582,7 @@
unsigned BufferID = SourceMgr.addMemBufferCopy(StringRef(Source, 16));
SourceMgr.setCodeCompletionPoint(BufferID, 6);
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false);
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift);
Token Tok;
@@ -675,7 +675,7 @@
unsigned BufferID = SourceMgr.addMemBufferCopy(Source);
Lexer Primary(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr,
- /*InSILMode=*/false);
+ LexerMode::Swift);
std::vector<Token> TokensPrimary;
do {
TokensPrimary.emplace_back();
@@ -786,7 +786,7 @@
Diags.addConsumer(DiagConsumer);
Lexer L(LangOpts, SourceMgr, BufferID, &Diags,
- /*InSILMode=*/false, HashbangMode::Disallowed,
+ LexerMode::Swift, HashbangMode::Disallowed,
CommentRetentionMode::None, TriviaRetentionMode::WithTrivia);
ASSERT_TRUE(containsPrefix(DiagConsumer.messages,
@@ -808,7 +808,7 @@
Diags.addConsumer(DiagConsumer);
Lexer L(LangOpts, SourceMgr, BufferID, &Diags,
- /*InSILMode=*/false, HashbangMode::Disallowed,
+ LexerMode::Swift, HashbangMode::Disallowed,
CommentRetentionMode::None, TriviaRetentionMode::WithTrivia,
/*Offset=*/5, /*EndOffset=*/SourceLen);
diff --git a/unittests/Parse/LexerTriviaTests.cpp b/unittests/Parse/LexerTriviaTests.cpp
index 33fcb16..38ab2ab 100644
--- a/unittests/Parse/LexerTriviaTests.cpp
+++ b/unittests/Parse/LexerTriviaTests.cpp
@@ -20,7 +20,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(SourceStr);
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::AttachToNextToken,
TriviaRetentionMode::WithTrivia);
@@ -75,7 +75,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(SourceStr);
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::AttachToNextToken,
TriviaRetentionMode::WithTrivia);
@@ -98,7 +98,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(SourceStr);
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::AttachToNextToken,
TriviaRetentionMode::WithTrivia);
@@ -131,7 +131,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(SourceStr);
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::AttachToNextToken,
TriviaRetentionMode::WithTrivia);
@@ -166,7 +166,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(SourceStr);
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::AttachToNextToken,
TriviaRetentionMode::WithTrivia);
@@ -208,7 +208,7 @@
SourceManager SourceMgr;
unsigned BufferID = SourceMgr.addMemBufferCopy(SourceStr);
- Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, /*InSILMode=*/false,
+ Lexer L(LangOpts, SourceMgr, BufferID, /*Diags=*/nullptr, LexerMode::Swift,
HashbangMode::Disallowed, CommentRetentionMode::AttachToNextToken,
TriviaRetentionMode::WithTrivia);
diff --git a/unittests/SourceKit/SwiftLang/CMakeLists.txt b/unittests/SourceKit/SwiftLang/CMakeLists.txt
index ca018cc..a94360b 100644
--- a/unittests/SourceKit/SwiftLang/CMakeLists.txt
+++ b/unittests/SourceKit/SwiftLang/CMakeLists.txt
@@ -1,16 +1,9 @@
if(NOT SWIFT_HOST_VARIANT MATCHES "${SWIFT_DARWIN_EMBEDDED_VARIANTS}")
-
add_swift_unittest(SourceKitSwiftLangTests
CursorInfoTest.cpp
EditingTest.cpp
)
-
- target_link_libraries(SourceKitSwiftLangTests
- PRIVATE
- SourceKitSwiftLang
- )
-
- set_property(TARGET SourceKitSwiftLangTests APPEND_STRING PROPERTY COMPILE_FLAGS
- " '-DSWIFTLIB_DIR=\"${SWIFTLIB_DIR}\"'")
-
+ target_link_libraries(SourceKitSwiftLangTests PRIVATE SourceKitSwiftLang)
+ target_compile_definitions(SourceKitSwiftLangTests PRIVATE
+ SWIFTLIB_DIR=\"${SWIFTLIB_DIR}\")
endif()
diff --git a/unittests/SourceKit/SwiftLang/EditingTest.cpp b/unittests/SourceKit/SwiftLang/EditingTest.cpp
index 2294f17..8599ba7 100644
--- a/unittests/SourceKit/SwiftLang/EditingTest.cpp
+++ b/unittests/SourceKit/SwiftLang/EditingTest.cpp
@@ -19,8 +19,11 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/TargetSelect.h"
#include "gtest/gtest.h"
-#include <mutex>
+
+#include <chrono>
#include <condition_variable>
+#include <mutex>
+#include <thread>
using namespace SourceKit;
using namespace llvm;
@@ -177,7 +180,7 @@
DocUpdState->HasUpdate = false;
}
- void doubleOpenWithDelay(useconds_t delay, bool close);
+ void doubleOpenWithDelay(std::chrono::microseconds delay, bool close);
private:
std::vector<const char *> makeArgs(const char *DocName,
@@ -229,7 +232,8 @@
EXPECT_EQ(SemaDiagStage, Consumer.DiagStage);
}
-void EditTest::doubleOpenWithDelay(useconds_t delay, bool closeDoc) {
+void EditTest::doubleOpenWithDelay(std::chrono::microseconds delay,
+ bool closeDoc) {
const char *DocName = "/test.swift";
const char *Contents =
"func foo() { _ = unknown_name }\n";
@@ -239,8 +243,8 @@
open(DocName, Contents, Args, Consumer);
ASSERT_EQ(0u, Consumer.Diags.size());
// Open again without closing; this reinitializes the semantic info on the doc
- if (delay)
- usleep(delay);
+ if (delay > std::chrono::microseconds(0))
+ std::this_thread::sleep_for(delay);
if (closeDoc)
close(DocName);
reset(Consumer);
@@ -277,10 +281,10 @@
// The middle case in particular verifies the ASTManager is only calling the
// correct ASTConsumers.
- doubleOpenWithDelay(0, true);
- doubleOpenWithDelay(1000, true); // 1 ms
- doubleOpenWithDelay(10000, true); // 10 ms
- doubleOpenWithDelay(100000, true); // 100 ms
+ doubleOpenWithDelay(std::chrono::microseconds(0), true);
+ doubleOpenWithDelay(std::chrono::milliseconds(1), true);
+ doubleOpenWithDelay(std::chrono::milliseconds(10), true);
+ doubleOpenWithDelay(std::chrono::milliseconds(100), true);
}
TEST_F(EditTest, DiagsAfterReopen) {
@@ -288,8 +292,8 @@
// close the original document, causing it to reinitialize instead of create
// a fresh document.
- doubleOpenWithDelay(0, false);
- doubleOpenWithDelay(1000, false); // 1 ms
- doubleOpenWithDelay(10000, false); // 10 ms
- doubleOpenWithDelay(100000, false); // 100 ms
+ doubleOpenWithDelay(std::chrono::microseconds(0), false);
+ doubleOpenWithDelay(std::chrono::milliseconds(1), false);
+ doubleOpenWithDelay(std::chrono::milliseconds(10), false);
+ doubleOpenWithDelay(std::chrono::milliseconds(100), false);
}
diff --git a/utils/build-presets.ini b/utils/build-presets.ini
index c5e093a..e115668 100644
--- a/utils/build-presets.ini
+++ b/utils/build-presets.ini
@@ -1775,6 +1775,8 @@
[preset: build_swiftsyntax_release]
release
+lto
no-assertions
build-libparser-only
swiftsyntax
+verbose-build
diff --git a/utils/build-script b/utils/build-script
index 5b58f38..0e28e54 100755
--- a/utils/build-script
+++ b/utils/build-script
@@ -993,8 +993,13 @@
help="Print the expanded build-script invocation generated "
"by the preset, but do not run the preset",
action=arguments.action.optional_bool)
+ parser.add_argument(
+ "--swiftsyntax-install-prefix",
+ help="specify the directory to where SwiftSyntax should be installed")
+ parser.add_argument(
+ "--build-dir",
+ help="specify the directory where build artifact should be stored")
args = parser.parse_args()
-
if len(args.preset_file_names) == 0:
args.preset_file_names = [
os.path.join(
@@ -1044,6 +1049,12 @@
build_script_args += ["--distcc"]
if args.build_jobs:
build_script_args += ["--jobs", str(args.build_jobs)]
+ if args.swiftsyntax_install_prefix:
+ build_script_args += ["--install-swiftsyntax",
+ "--install-destdir",
+ args.swiftsyntax_install_prefix]
+ if args.build_dir:
+ build_script_args += ["--build-dir", args.build_dir]
diagnostics.note('using preset "{}", which expands to \n\n{}\n'.format(
args.preset, shell.quote_command(build_script_args)))
diff --git a/utils/build-script-impl b/utils/build-script-impl
index 7411576..26a0db8 100755
--- a/utils/build-script-impl
+++ b/utils/build-script-impl
@@ -266,7 +266,8 @@
user-config-args "" "**Renamed to --extra-cmake-options**: User-supplied arguments to cmake when used to do configuration."
only-execute "all" "Only execute the named action (see implementation)"
llvm-lit-args "" "If set, override the lit args passed to LLVM"
- clang-profile-instr-use "" "If set, profile file to use for clang PGO"
+ clang-profile-instr-use "" "If set, profile file to use for clang PGO while building llvm/clang"
+ swift-profile-instr-use "" "If set, profile file to use for clang PGO while building swift"
coverage-db "" "If set, coverage database to use when prioritizing testing"
build-toolchain-only "" "If set, only build the necessary tools to build an external toolchain"
skip-local-host-install "" "If we are cross-compiling multiple targets, skip an install pass locally if the hosts match"
@@ -759,6 +760,11 @@
)
fi
+ if [[ "${SWIFT_PROFILE_INSTR_USE}" ]]; then
+ swift_cmake_options+=(
+ -DSWIFT_PROFDATA_FILE="${SWIFT_PROFILE_INSTR_USE}"
+ )
+ fi
swift_cmake_options+=(
-DCOVERAGE_DB="${COVERAGE_DB}"
)
@@ -2302,6 +2308,7 @@
-DSWIFT_BUILD_EXTERNAL_PERF_TESTSUITE:BOOL=$(true_false "${build_external_perf_testsuite_this_time}")
-DSWIFT_BUILD_EXAMPLES:BOOL=$(true_false "${BUILD_SWIFT_EXAMPLES}")
-DSWIFT_INCLUDE_TESTS:BOOL=$(true_false "${build_tests_this_time}")
+ -DSWIFT_INSTALL_COMPONENTS:STRING="${SWIFT_INSTALL_COMPONENTS}"
-DSWIFT_EMBED_BITCODE_SECTION:BOOL=$(true_false "${EMBED_BITCODE_SECTION}")
-DSWIFT_TOOLS_ENABLE_LTO:STRING="${SWIFT_TOOLS_ENABLE_LTO}"
-DSWIFT_BUILD_RUNTIME_WITH_HOST_COMPILER:BOOL=$(true_false "${BUILD_RUNTIME_WITH_HOST_COMPILER}")
@@ -2372,13 +2379,6 @@
)
fi
- if [ "${SWIFT_INSTALL_COMPONENTS}" ] ; then
- cmake_options=(
- "${cmake_options[@]}"
- -DSWIFT_INSTALL_COMPONENTS:STRING="${SWIFT_INSTALL_COMPONENTS}"
- )
- fi
-
if contains_product "lldb" ; then
lldb_build_dir=$(build_directory ${host} lldb)
cmake_options=(
@@ -3566,8 +3566,20 @@
exit 1
fi
echo "--- Installing ${product} ---"
- DYLIB_DIR="${host_install_destdir}${host_install_prefix}/lib/swift/${SWIFT_HOST_VARIANT}"
- MODULE_DIR="${DYLIB_DIR}/${SWIFT_HOST_VARIANT_ARCH}"
+ if [ "${BUILD_LIBPARSER_ONLY}" ]; then
+ # We don't have a toolchain so we should install to the specified dir
+ DYLIB_DIR="${INSTALL_DESTDIR}"
+ MODULE_DIR="${INSTALL_DESTDIR}"
+ # Install libParser is necessary
+ rsync -a "$(build_directory ${host} swift)/lib/lib_InternalSwiftSyntaxParser.dylib" "${INSTALL_DESTDIR}"
+ # Install module map of libParser so client can import SwiftSyntax
+ rsync -a "${SWIFT_SOURCE_DIR}/include/swift-c/SyntaxParser" "${INSTALL_DESTDIR}"
+ else
+ # We have a toolchain so install to the toolchain
+ DYLIB_DIR="${host_install_destdir}${host_install_prefix}/lib/swift/${SWIFT_HOST_VARIANT}"
+ MODULE_DIR="${DYLIB_DIR}/${SWIFT_HOST_VARIANT_ARCH}"
+ fi
+
if [[ -z "${SKIP_INSTALL_SWIFTSYNTAX_MODULE}" ]] ; then
call "${swiftsyntax_build_command[@]}" --dylib-dir="${DYLIB_DIR}" --swiftmodule-dir "${MODULE_DIR}" --install
else
diff --git a/validation-test/Sema/Inputs/rdar47334176_types.swift b/validation-test/Sema/Inputs/rdar47334176_types.swift
new file mode 100644
index 0000000..f5266f1
--- /dev/null
+++ b/validation-test/Sema/Inputs/rdar47334176_types.swift
@@ -0,0 +1,10 @@
+public protocol P : class {
+ associatedtype V
+}
+
+public protocol R {
+ associatedtype V
+}
+
+public enum E<V> : R {}
+public class C<V> : R {}
diff --git a/validation-test/Sema/large-switch-rdar47365349.swift b/validation-test/Sema/large-switch-rdar47365349.swift
new file mode 100644
index 0000000..bbc1981
--- /dev/null
+++ b/validation-test/Sema/large-switch-rdar47365349.swift
@@ -0,0 +1,212 @@
+// RUN: %target-typecheck-verify-swift
+
+enum NumericBase {
+ case binary
+ case ternary
+ case quaternary
+ case quinary
+ case senary
+ case septary
+ case octal
+ case nonary
+ case decimal
+ case undecimal
+ case duodecimal
+}
+
+enum Direction {
+ case left
+ case right
+}
+
+enum WritingSystem {
+ case logographic
+ case alphabet(kind: Alphabet)
+ case abjad
+ case abugida
+ case syllabary
+ case other
+}
+
+enum Alphabet {
+ case roman
+ case greek
+ case cyrillic
+}
+
+func test(base: NumericBase, direction: Direction, writingSystem: WritingSystem) {
+ switch (base, direction, writingSystem) {
+ case (.binary, .left, .logographic),
+ (.binary, .left, .alphabet),
+ (.binary, .left, .abugida):
+ break
+
+ case (.binary, .right, .logographic),
+ (.binary, .right, .alphabet),
+ (.binary, .right, .abugida):
+ break
+
+ case (.binary, _, .abjad):
+ break
+
+ case (.binary, _, .syllabary):
+ break
+
+ case (.ternary, .left, .logographic):
+ break
+
+ case (.ternary, .left, .alphabet),
+ (.ternary, .left, .abugida):
+ break
+
+ case (.ternary, .right, .logographic),
+ (.ternary, .right, .abugida):
+ break
+
+ case (.ternary, .right, .alphabet):
+ break
+
+ case (.ternary, _, .abjad):
+ break
+
+ case (.ternary, _, .syllabary):
+ break
+
+ case (.quaternary, .left, .logographic):
+ break
+
+ case (.quaternary, .left, .alphabet),
+ (.quaternary, .left, .abugida):
+ break
+
+ case (.quaternary, .right, .logographic),
+ (.quaternary, .right, .abugida):
+ break
+
+ case (.quaternary, .right, .alphabet):
+ break
+
+ case (.quaternary, _, .abjad):
+ break
+
+ case (.quaternary, _, .syllabary):
+ break
+
+ case (.quinary, .left, .logographic),
+ (.senary, .left, .logographic):
+ break
+
+ case (.quinary, .left, .alphabet),
+ (.senary, .left, .alphabet),
+ (.quinary, .left, .abugida),
+ (.senary, .left, .abugida):
+ break
+
+ case (.quinary, .right, .logographic),
+ (.senary, .right, .logographic):
+ break
+
+ case (.quinary, .right, .alphabet),
+ (.senary, .right, .alphabet),
+ (.quinary, .right, .abugida),
+ (.senary, .right, .abugida):
+ break
+
+ case (.quinary, _, .abjad),
+ (.senary, _, .abjad):
+ break
+
+ case (.quinary, _, .syllabary),
+ (.senary, _, .syllabary):
+ break
+
+ case (.septary, .left, .logographic):
+ break
+
+ case (.septary, .left, .alphabet),
+ (.septary, .left, .abugida):
+ break
+
+ case (.septary, .right, .logographic):
+ break
+
+ case (.septary, .right, .alphabet),
+ (.septary, .right, .abugida):
+ break
+
+ case (.septary, _, .abjad):
+ break
+
+ case (.septary, _, .syllabary):
+ break
+
+ case (.decimal, .left, .logographic):
+ break
+
+ case (.decimal, .left, .alphabet),
+ (.decimal, .left, .abugida):
+ break
+
+ case (.decimal, .right, .logographic):
+ break
+
+ case (.decimal, .right, .alphabet),
+ (.decimal, .right, .abugida):
+ break
+
+ case (.octal, .left, .logographic),
+ (.nonary, .left, .logographic):
+ break
+
+ case (.octal, .left, .alphabet),
+ (.nonary, .left, .alphabet),
+ (.octal, .left, .abugida),
+ (.nonary, .left, .abugida):
+ break
+
+ case (.octal, .right, .logographic),
+ (.nonary, .right, .logographic):
+ break
+
+ case (.octal, .right, .alphabet),
+ (.nonary, .right, .alphabet),
+ (.octal, .right, .abugida),
+ (.nonary, .right, .abugida):
+ break
+
+ case (.octal, _, .abjad),
+ (.nonary, _, .abjad),
+ (.decimal, _, .abjad):
+ break
+
+ case (.octal, _, .syllabary),
+ (.nonary, _, .syllabary),
+ (.decimal, _, .syllabary):
+ break
+
+ case (.undecimal, .left, .logographic):
+ break
+
+ case (.undecimal, .left, .alphabet),
+ (.undecimal, .left, .abugida):
+ break
+
+ case (.undecimal, .right, .logographic):
+ break
+
+ case (.undecimal, .right, .alphabet),
+ (.undecimal, .right, .abugida):
+ break
+
+ case (.undecimal, _, .abjad):
+ break
+
+ case (.undecimal, _, .syllabary):
+ break
+
+ case (.duodecimal, _, _):
+ break
+ case (_, _, .other):
+ break
+ }
+}
diff --git a/validation-test/Sema/rdar47334176.swift b/validation-test/Sema/rdar47334176.swift
new file mode 100644
index 0000000..cca1c66
--- /dev/null
+++ b/validation-test/Sema/rdar47334176.swift
@@ -0,0 +1,13 @@
+// RUN: %empty-directory(%t)
+// RUN: %target-swift-frontend -emit-module -o %t/rdar47334176_types.swiftmodule %S/Inputs/rdar47334176_types.swift
+// RUN: %target-swift-frontend -I %t -typecheck %s
+
+import rdar47334176_types
+
+// To test all possibilities let's declare one of the types
+// in the same module as function declaration which uses it.
+struct S<V> : R {}
+
+func foo<T : P, U>(_: T?, _: (T.V.V) -> Void) where T.V == E<U> {} // Ok
+func bar<T : P, U>(_: T?, _: (T.V.V) -> Void) where T.V == S<U> {} // Ok
+func baz<T : P, U>(_: T?, _: (T.V.V) -> Void) where T.V == C<U> {} // Ok