blob: 378139f60499040a06c0c17d1374d011d82e045a [file] [log] [blame]
// RUN: %target-sil-opt -enforce-exclusivity=none -enable-sil-verify-all %s -licm | %FileCheck %s
// Declare this SIL to be canonical because some tests break raw SIL
// conventions. e.g. address-type block args. -enforce-exclusivity=none is also
// required to allow address-type block args in canonical SIL.
sil_stage canonical
import Builtin
import Swift
// CHECK-LABEL: @memset
// CHECK: bb0
// CHECK: load %0
// CHECK: br bb2
// CHECK: bb2({{.*}}):
// CHECK-NOT: load
// CHECK-NOT: fix_lifetime
// CHECK: cond_br
sil @memset : $@convention(thin) (@inout Builtin.NativeObject, Int) -> () {
bb0(%0 : $*Builtin.NativeObject, %1 : $Int):
%5 = integer_literal $Builtin.Int1, -1
%46 = integer_literal $Builtin.Word, 0
br bb2(%46 : $Builtin.Word)
bb1:
%52 = tuple ()
return %52 : $()
bb2(%54 : $Builtin.Word):
%55 = integer_literal $Builtin.Word, 1
%57 = builtin "sadd_with_overflow_Word"(%54 : $Builtin.Word, %55 : $Builtin.Word, %5 : $Builtin.Int1) : $(Builtin.Word, Builtin.Int1)
%58 = tuple_extract %57 : $(Builtin.Word, Builtin.Int1), 0
%59 = load %0 : $*Builtin.NativeObject
%60 = integer_literal $Builtin.Word, 100
%96 = ref_to_raw_pointer %59 : $Builtin.NativeObject to $Builtin.RawPointer
%97 = index_raw_pointer %96 : $Builtin.RawPointer, %58 : $Builtin.Word
%98 = pointer_to_address %97 : $Builtin.RawPointer to [strict] $*Int
%99 = index_addr %98 : $*Int, %54 : $Builtin.Word
fix_lifetime %59: $Builtin.NativeObject
store %1 to %99 : $*Int
%101 = builtin "cmp_eq_Word"(%58 : $Builtin.Word, %60 : $Builtin.Word) : $Builtin.Int1
cond_br %101, bb1, bb2(%58 : $Builtin.Word)
}
// CHECK-LABEL: @must_move_condfail
// CHECK: bb0
// CHECK: load %0
// CHECK: cond_fail
// CHECK: [[INVARIANTADDR:%.*]] = pointer_to_address
// CHECK: load [[INVARIANTADDR]]
// CHECK: br bb2
// CHECK: bb2({{.*}}):
// The address computation of the load was guarded by the cond_fail. If we hoist
// the load we must also hoist the cond_fail.
// CHECK-NOT: cond_fail
// CHECK-NOT: load
// CHECK: cond_br
sil @must_move_condfail : $@convention(thin) (@inout Builtin.NativeObject, Int, Builtin.Word) -> () {
bb0(%0 : $*Builtin.NativeObject, %1 : $Int, %2: $Builtin.Word):
%5 = integer_literal $Builtin.Int1, -1
%6 = load %0 : $*Builtin.NativeObject
%46 = integer_literal $Builtin.Word, 0
br bb2(%46 : $Builtin.Word)
bb1:
%102 = tuple ()
return %102 : $()
bb2(%48 : $Builtin.Word):
%51 = builtin "sadd_with_overflow_Word"(%2 : $Builtin.Word, %46 : $Builtin.Word, %5 : $Builtin.Int1) : $(Builtin.Word, Builtin.Int1)
%52 = tuple_extract %51 : $(Builtin.Word, Builtin.Int1), 0
%53 = tuple_extract %51 : $(Builtin.Word, Builtin.Int1), 1
cond_fail %53 : $Builtin.Int1
%55 = integer_literal $Builtin.Word, 1
%57 = builtin "sadd_with_overflow_Word"(%48 : $Builtin.Word, %55 : $Builtin.Word, %5 : $Builtin.Int1) : $(Builtin.Word, Builtin.Int1)
%58 = tuple_extract %57 : $(Builtin.Word, Builtin.Int1), 0
%60 = integer_literal $Builtin.Word, 100
%61 = ref_to_raw_pointer %6 : $Builtin.NativeObject to $Builtin.RawPointer
%62 = index_raw_pointer %61 : $Builtin.RawPointer, %52 : $Builtin.Word
%63 = pointer_to_address %62 : $Builtin.RawPointer to [strict] $*Builtin.NativeObject
%64 = load %63 : $*Builtin.NativeObject
%96 = ref_to_raw_pointer %64 : $Builtin.NativeObject to $Builtin.RawPointer
%97 = index_raw_pointer %96 : $Builtin.RawPointer, %58 : $Builtin.Word
%98 = pointer_to_address %97 : $Builtin.RawPointer to [strict] $*Int
%99 = index_addr %98 : $*Int, %48 : $Builtin.Word
store %1 to %99 : $*Int
%101 = builtin "cmp_eq_Word"(%58 : $Builtin.Word, %60 : $Builtin.Word) : $Builtin.Int1
cond_br %101, bb1, bb2(%58 : $Builtin.Word)
}
// CHECK-LABEL: sil @hoist_outer_loop
// CHECK: bb0([[ADDR:%.*]] : $*Builtin.Int1
// CHECK: load [[ADDR]]
// CHECK: integer_literal $Builtin.Word, 101
// CHECK: br bb1
// CHECK: return
sil @hoist_outer_loop : $@convention(thin) (@inout Builtin.Int1, Int) -> () {
bb0(%0 : $*Builtin.Int1, %1 : $Int):
%2 = integer_literal $Builtin.Int1, -1
%3 = integer_literal $Builtin.Word, 0
br bb1
// Outer loop.
bb1:
%5 = load %0 : $*Builtin.Int1
%6 = integer_literal $Builtin.Word, 101
cond_br %5, bb2, bb3
bb2:
cond_br %5, bb4, bb1
// Inner loop.
bb3:
cond_br %5, bb2, bb3
bb4:
%10 = tuple ()
return %10 : $()
}
// CHECK-LABEL: sil @dont_hoist_outer_loop
// CHECK: bb0([[ADDR:%.*]] : $*Builtin.Int1
// CHECK: integer_literal $Builtin.Word, 101
// CHECK: br bb1
// CHECK: bb1:
// CHECK: load [[ADDR]]
// CHECK: return
sil @dont_hoist_outer_loop : $@convention(thin) (@inout Builtin.Int1, Int) -> () {
bb0(%0 : $*Builtin.Int1, %1 : $Int):
%2 = integer_literal $Builtin.Int1, -1
%3 = integer_literal $Builtin.Word, 0
br bb1
// Outer loop.
bb1:
%5 = load %0 : $*Builtin.Int1
%6 = integer_literal $Builtin.Word, 101
cond_br %5, bb2, bb3
bb2:
cond_br %5, bb4, bb1
// Inner loop.
bb3:
store %2 to %0 : $*Builtin.Int1
cond_br %5, bb2, bb3
bb4:
%10 = tuple ()
return %10 : $()
}
sil [_semantics "array.get_count"] @getCount : $@convention(method) (@guaranteed Array<Int>) -> Int
sil @user : $@convention(thin) (Int) -> ()
// CHECK-LABEL: sil @dont_hoist_get_count_on_low_level_sil
// CHECK: {{^}}bb1:
// CHECK: apply
// CHECK: apply
// CHECK: {{^}}bb2:
// CHECK: return
sil @dont_hoist_get_count_on_low_level_sil : $@convention(thin) (@guaranteed Array<Int>) -> () {
bb0(%0 : $Array<Int>):
br bb1
bb1:
%f1 = function_ref @getCount : $@convention(method) (@guaranteed Array<Int>) -> Int
%f2 = function_ref @user : $@convention(thin) (Int) -> ()
%c1 = apply %f1(%0) : $@convention(method) (@guaranteed Array<Int>) -> Int
%c2 = apply %f2(%c1) : $@convention(thin) (Int) -> ()
cond_br undef, bb1, bb2
bb2:
%r1 = tuple ()
return %r1 : $()
}
sil @use_addr : $@convention(thin) (@inout Int32) -> ()
// CHECK-LABEL: sil @dont_hoist_aliased_stack_location
// CHECK: {{^}}bb0
// CHECK-NOT: load
// CHECK: {{^}}bb1:
// CHECK: store
// CHECK: apply
// CHECK: {{^}}bb2:
// CHECK: return
sil @dont_hoist_aliased_stack_location : $@convention(thin) (Int32) -> () {
bb0(%0 : $Int32):
%313 = alloc_stack $Int32
br bb1
bb1:
store %0 to %313 : $*Int32
%f = function_ref @use_addr : $@convention(thin) (@inout Int32) -> ()
%a = apply %f(%313) : $@convention(thin) (@inout Int32) -> ()
cond_br undef, bb1, bb2
bb2:
dealloc_stack %313 : $*Int32
%52 = tuple ()
return %52 : $()
}
public protocol P : class {
func foo() -> Int32
func boo() -> Int32
}
// Check that LICM does not hoist a metatype instruction before
// the open_existential instruction which creates the archtype,
// because this would break the dominance relation between them.
// CHECK-LABEL: sil @dont_hoist_metatype
// CHECK-NOT: metatype
// CHECK-NOT: witness_method
// CHECK: bb1({{%.*}} : $P)
// CHECK-NOT: metatype
// CHECK-NOT: witness_method
// CHECK: open_existential_ref
// CHECK: metatype
// CHECK: witness_method
// CHECK: cond_br
sil @dont_hoist_metatype : $@convention(thin) (@inout Builtin.Int1, @owned P) -> () {
bb0(%0 : $*Builtin.Int1, %1 : $P):
br bb1(%1 : $P)
// Loop
bb1(%existential : $P):
%2 = open_existential_ref %existential : $P to $@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60") P
%3 = metatype $@thick (@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60") P).Type
%4 = witness_method $@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60") P, #P.foo, %2 : $@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60") P : $@convention(witness_method: P) <Ï„_0_0 where Ï„_0_0 : P> (@guaranteed Ï„_0_0) -> Int32
%5 = apply %4<@opened("C4960DBA-02C5-11E6-BE1B-B8E856428C60") P>(%2) : $@convention(witness_method: P) <Ï„_0_0 where Ï„_0_0 : P> (@guaranteed Ï„_0_0) -> Int32
%6 = load %0 : $*Builtin.Int1
cond_br %6, bb3, bb1(%existential : $P)
bb3:
br bb4
bb4:
strong_release %1 : $P
%10 = tuple ()
return %10 : $()
}
// CHECK-LABEL: dont_hoist_existential_meta_type
// CHECK: bb0({{.*}}:
// CHECK-NOT: existential_metatype
// CHECK: bb1:
// CHECK: existential_metatype
// CHECK: cond_br
// CHECK: bb2:
sil @dont_hoist_existential_meta_type : $@convention(thin) (@in P) -> () {
bb0(%0 : $*P):
%1 = alloc_stack $P
br bb1
bb1:
copy_addr %0 to [initialization] %1 : $*P
%2 = existential_metatype $@thick P.Type, %1 : $*P
cond_br undef, bb1, bb2
bb2:
dealloc_stack %1 : $*P
destroy_addr %0 : $*P
%52 = tuple ()
return %52 : $()
}
sil @get_unknown_value : $@convention(thin) () -> Builtin.Int32
sil @get_unknown_value2 : $@convention(thin) () -> Builtin.Int32
sil @callee : $@convention(thin) (@inout Builtin.Int32) -> () {
bb0(%0 : $*Builtin.Int32):
%1 = function_ref @get_unknown_value : $@convention(thin) () -> Builtin.Int32
%2 = apply %1() : $@convention(thin) () -> Builtin.Int32
store %2 to %0 : $*Builtin.Int32
%9999 = tuple()
return %9999 : $()
}
sil @use_value : $@convention(thin) (Builtin.Int32) -> ()
// Check if escape analysis figures out that the alloc_stack escapes to callee.
//
// CHECK-LABEL: sil @dont_hoist_aliased_load
// CHECK: bb2:
// CHECK-NEXT: apply
// CHECK-NEXT: load
// CHECK-NEXT: apply
sil @dont_hoist_aliased_load : $@convention(thin) () -> () {
bb0:
%0 = alloc_stack $Builtin.Int32
%1 = integer_literal $Builtin.Int32, 0
%3 = function_ref @callee : $@convention(thin) (@inout Builtin.Int32) -> ()
%5 = function_ref @use_value : $@convention(thin) (Builtin.Int32) -> ()
%unknown_value_fn = function_ref @get_unknown_value2 : $@convention(thin) () -> Builtin.Int32
store %1 to %0 : $*Builtin.Int32
br bb1(%0 : $*Builtin.Int32)
bb1(%phi1 : $*Builtin.Int32):
br bb2
bb2:
apply %3(%0) : $@convention(thin) (@inout Builtin.Int32) -> ()
%4 = load %phi1 : $*Builtin.Int32
%6 = apply %unknown_value_fn() : $@convention(thin) () -> Builtin.Int32
%33 = builtin "cmp_eq_Int32"(%4 : $Builtin.Int32, %6 : $Builtin.Int32) : $Builtin.Int1
cond_br %33, bb2, bb3
bb3:
%9999 = tuple()
dealloc_stack %0 : $*Builtin.Int32
return %9999 : $()
}
class RefElemClass {
var x : Int32
init()
}
// Check hoisting of ref_element_addr in conditional control flow (for exclusivity)
//
// CHECK-LABEL: sil @hoist_ref_elem
// CHECK: bb0(%0 : $RefElemClass):
// CHECK-NEXT: ref_element_addr %0 : $RefElemClass, #RefElemClass.x
// CHECK-NEXT: br bb1
sil @hoist_ref_elem : $@convention(thin) (RefElemClass) -> () {
bb0(%0 : $RefElemClass):
br bb1
// loop.
bb1:
cond_br undef, bb2, bb3
bb2:
cond_br undef, bb4, bb1
bb3:
%x = ref_element_addr %0 : $RefElemClass, #RefElemClass.x
br bb1
bb4:
%10 = tuple ()
return %10 : $()
}
sil @potential_escape : $@convention(thin) (@guaranteed RefElemClass) -> ()
// CHECK-LABEL: sil @dont_hoist_begin_cow_mutation
// CHECK: bb1:
// CHECK-NEXT: begin_cow_mutation
// CHECK-NEXT: end_cow_mutation
// CHECK-NEXT: apply
sil @dont_hoist_begin_cow_mutation : $@convention(thin) (@owned RefElemClass) -> @owned RefElemClass {
bb0(%0 : $RefElemClass):
br bb1
bb1:
(%u, %m) = begin_cow_mutation %0 : $RefElemClass
%b = end_cow_mutation %m : $RefElemClass
%f = function_ref @potential_escape : $@convention(thin) (@guaranteed RefElemClass) -> ()
%a = apply %f(%b) : $@convention(thin) (@guaranteed RefElemClass) -> ()
cond_br undef, bb1, bb2
bb2:
return %b : $RefElemClass
}
// CHECK-LABEL: sil @hoist_load_and_store
// CHECK: [[V1:%[0-9]+]] = load %0
// CHECK: br bb1([[V1]] : $Int32)
// CHECK: bb1([[V2:%[0-9]+]] : $Int32):
// CHECK-NOT: load
// CHECK: [[E:%[0-9]+]] = struct_extract [[V2]]
// CHECK: "sadd_with_overflow_Int64"([[E]]
// CHECK: [[V3:%[0-9]+]] = struct $Int32
// CHECK-NOT: store
// CHECK: bb2:
// CHECK: br bb1([[V3]] : $Int32)
// CHECK: bb3:
// CHECK: store [[V3]] to %0
// CHECK: } // end sil function 'hoist_load_and_store'
sil @hoist_load_and_store : $@convention(thin) (@inout Int32, Int32) -> () {
bb0(%0 : $*Int32, %1 : $Int32):
%8 = struct_element_addr %0 : $*Int32, #Int32._value
%9 = struct_extract %1 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, 0
br bb1
bb1:
%17 = load %8 : $*Builtin.Int32
%18 = builtin "sadd_with_overflow_Int64"(%17 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%19 = tuple_extract %18 : $(Builtin.Int32, Builtin.Int1), 0
%20 = struct $Int32 (%19 : $Builtin.Int32)
store %20 to %0 : $*Int32
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%12 = tuple ()
return %12 : $()
}
// Just make sure the optimizer does not crash in case the operand of the
// store is the load itself.
sil @hoist_load_and_redundant_store : $@convention(thin) (@inout Int32) -> () {
bb0(%0 : $*Int32):
br bb1
bb1:
%1 = load %0 : $*Int32
store %1 to %0 : $*Int32
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%12 = tuple ()
return %12 : $()
}
// CHECK-LABEL: sil @hoist_load_and_two_stores
// CHECK: [[V1:%[0-9]+]] = load %0
// CHECK: br bb1([[V1]] : $Int32)
// CHECK: bb1([[V2:%[0-9]+]] : $Int32):
// CHECK-NOT: load
// CHECK: [[E:%[0-9]+]] = struct_extract [[V2]]
// CHECK: "sadd_with_overflow_Int64"([[E]]
// CHECK: [[V3:%[0-9]+]] = struct $Int32
// CHECK: bb2:
// CHECK-NOT: store
// CHECK: br bb4([[V3]] : $Int32)
// CHECK: bb3:
// CHECK-NOT: store
// CHECK: br bb4([[V3]] : $Int32)
// CHECK: bb4([[V4:%[0-9]+]] : $Int32):
// CHECK: cond_br
// CHECK: bb5:
// CHECK: br bb1([[V4]] : $Int32)
// CHECK: bb6:
// CHECK: store [[V4]] to %0
// CHECK: } // end sil function 'hoist_load_and_two_stores'
sil @hoist_load_and_two_stores : $@convention(thin) (@inout Int32, Int32) -> () {
bb0(%0 : $*Int32, %1 : $Int32):
%8 = struct_element_addr %0 : $*Int32, #Int32._value
%9 = struct_extract %1 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, 0
br bb1
bb1:
%17 = load %8 : $*Builtin.Int32
%18 = builtin "sadd_with_overflow_Int64"(%17 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%19 = tuple_extract %18 : $(Builtin.Int32, Builtin.Int1), 0
%20 = struct $Int32 (%19 : $Builtin.Int32)
cond_br undef, bb2, bb3
bb2:
store %20 to %0 : $*Int32
br bb4
bb3:
store %20 to %0 : $*Int32
br bb4
bb4:
cond_br undef, bb5, bb6
bb5:
br bb1
bb6:
%12 = tuple ()
return %12 : $()
}
// CHECK-LABEL: sil @dont_hoist_stores_not_dominating_exit
// CHECK: bb0(%0 : $*Int32, %1 : $Int32):
// CHECK-NOT: load
// CHECK: bb1:
// CHECK: load
// CHECK: bb3:
// CHECK: store
// CHECK: bb4:
// CHECK: bb6:
// CHECK-NOT: store
// CHECK: } // end sil function 'dont_hoist_stores_not_dominating_exit'
sil @dont_hoist_stores_not_dominating_exit : $@convention(thin) (@inout Int32, Int32) -> () {
bb0(%0 : $*Int32, %1 : $Int32):
%8 = struct_element_addr %0 : $*Int32, #Int32._value
%9 = struct_extract %1 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, 0
br bb1
bb1:
%17 = load %8 : $*Builtin.Int32
%18 = builtin "sadd_with_overflow_Int64"(%17 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%19 = tuple_extract %18 : $(Builtin.Int32, Builtin.Int1), 0
%20 = struct $Int32 (%19 : $Builtin.Int32)
cond_br undef, bb2, bb3
bb2:
br bb4
bb3:
store %20 to %0 : $*Int32
br bb4
bb4:
cond_br undef, bb5, bb6
bb5:
br bb1
bb6:
%12 = tuple ()
return %12 : $()
}
// CHECK-LABEL: sil @hoist_when_store_is_in_preheader
// CHECK: bb0(%0 : $*Int32, %1 : $Int32):
// CHECK: store
// CHECK: load
// CHECK: bb1(%{{[0-9]+}} : $Int32):
// CHECK-NOT: load
// CHECK-NOT: store
// CHECK: bb4([[P:%[0-9]+]] : $Int32):
// CHECK: bb6:
// CHECK: store [[P]] to %0
// CHECK: } // end sil function 'hoist_when_store_is_in_preheader'
sil @hoist_when_store_is_in_preheader : $@convention(thin) (@inout Int32, Int32) -> () {
bb0(%0 : $*Int32, %1 : $Int32):
%8 = struct_element_addr %0 : $*Int32, #Int32._value
%9 = struct_extract %1 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, 0
store %1 to %0 : $*Int32
br bb1
bb1:
%17 = load %8 : $*Builtin.Int32
%18 = builtin "sadd_with_overflow_Int64"(%17 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%19 = tuple_extract %18 : $(Builtin.Int32, Builtin.Int1), 0
%20 = struct $Int32 (%19 : $Builtin.Int32)
cond_br undef, bb2, bb3
bb2:
br bb4
bb3:
store %20 to %0 : $*Int32
br bb4
bb4:
cond_br undef, bb5, bb6
bb5:
br bb1
bb6:
%12 = tuple ()
return %12 : $()
}
// CHECK-LABEL: sil @hoist_loads_and_stores_multiple_exits
// CHECK: [[V1:%[0-9]+]] = load %0
// CHECK: br bb1([[V1]] : $Int32)
// CHECK: bb1([[V2:%[0-9]+]] : $Int32):
// CHECK-NOT: load
// CHECK: [[E:%[0-9]+]] = struct_extract [[V2]]
// CHECK: "sadd_with_overflow_Int64"([[E]]
// CHECK: [[V3:%[0-9]+]] = struct $Int32
// CHECK: bb2:
// CHECK-NOT: store
// CHECK: cond_br undef, bb1([[V3]] : $Int32), bb3
// CHECK: bb3:
// CHECK: store [[V3]] to %0
// CHECK: br bb6
// CHECK: bb4:
// CHECK-NOT: store
// CHECK: cond_br undef, bb1([[V3]] : $Int32), bb5
// CHECK: bb5:
// CHECK: store [[V3]] to %0
// CHECK: br bb6
// CHECK: bb6:
// CHECK: } // end sil function 'hoist_loads_and_stores_multiple_exits'
sil @hoist_loads_and_stores_multiple_exits : $@convention(thin) (@inout Int32, Int32) -> () {
// %0 // users: %14, %17, %5, %2
// %1 // user: %3
bb0(%0 : $*Int32, %1 : $Int32):
%2 = struct_element_addr %0 : $*Int32, #Int32._value
%3 = struct_extract %1 : $Int32, #Int32._value // user: %9
%4 = integer_literal $Builtin.Int1, 0 // user: %9
%5 = load %0 : $*Int32 // user: %6
br bb1(%5 : $Int32) // id: %6
// %7 // user: %8
bb1(%7 : $Int32): // Preds: bb0 bb2 bb4
%8 = struct_extract %7 : $Int32, #Int32._value // user: %9
%9 = builtin "sadd_with_overflow_Int64"(%8 : $Builtin.Int32, %3 : $Builtin.Int32, %4 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1) // user: %10
%10 = tuple_extract %9 : $(Builtin.Int32, Builtin.Int1), 0 // user: %11
%11 = struct $Int32 (%10 : $Builtin.Int32) // users: %14, %17, %13, %16
cond_br undef, bb2, bb4 // id: %12
bb2: // Preds: bb1
cond_br undef, bb1(%11 : $Int32), bb3 // id: %13
bb3: // Preds: bb2
store %11 to %0 : $*Int32 // id: %14
br bb6 // id: %15
bb4: // Preds: bb1
cond_br undef, bb1(%11 : $Int32), bb5 // id: %16
bb5: // Preds: bb4
store %11 to %0 : $*Int32 // id: %17
br bb6 // id: %18
bb6: // Preds: bb3 bb5
%19 = tuple () // user: %20
return %19 : $() // id: %20
} // end sil function 'hoist_loads_and_stores'
// ==================================================================
// Test combined load/store hoisting/sinking with aliases
struct Index {
@_hasStorage var value: Int64 { get set }
}
// -----------------------------------------------------------------------------
// Test combined load/store hoisting/sinking with obvious aliasing loads
// The loop contains loads and stores to the same accesspath: %3 alloc_stack -> #0 -> #0
// However, they don't share the same projection instructions.
// LICM should still hoist the loads and sink the stores in a combined transformation.
//
// CHECK-LABEL: sil shared @testCombinedLdStAliasingLoad : $@convention(method) (Int64) -> Int64 {
// CHECK: bb0(%0 : $Int64):
// CHECK: store {{.*}} to %{{.*}} : $*Int64
// CHECK: load %{{.*}} : $*Int64
// CHECK: br bb1
// CHECK-NOT: {{(load|store)}}
// CHECK: bb3:
// CHECK-NOT: {{(load|store)}}
// CHECK: store %{{.*}} to %{{.*}} : $*Int64
// CHECK-NOT: {{(load|store)}}
// CHECK-LABEL: } // end sil function 'testCombinedLdStAliasingLoad'
sil shared @testCombinedLdStAliasingLoad : $@convention(method) (Int64) -> Int64 {
bb0(%0 : $Int64):
%zero = integer_literal $Builtin.Int64, 0
%intz = struct $Int64(%zero : $Builtin.Int64)
%stackAddr = alloc_stack $Index
%outerAddr1 = struct_element_addr %stackAddr : $*Index, #Index.value
store %intz to %outerAddr1 : $*Int64
%innerAddr1 = struct_element_addr %outerAddr1 : $*Int64, #Int64._value
%outerAddr2 = struct_element_addr %stackAddr : $*Index, #Index.value
%innerAddr2 = struct_element_addr %outerAddr2 : $*Int64, #Int64._value
br bb1
bb1:
%val = load %innerAddr2 : $*Builtin.Int64
%intv = struct $Int64(%zero : $Builtin.Int64)
store %intv to %outerAddr2 : $*Int64
%val2 = load %innerAddr1 : $*Builtin.Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
dealloc_stack %stackAddr : $*Index
%result = struct $Int64(%val2 : $Builtin.Int64)
return %result : $Int64
}
// -----------------------------------------------------------------------------
// Test combined load/store hoisting/sinking with obvious aliasing stores
// CHECK-LABEL: sil shared @testCombinedLdStAliasingStore : $@convention(method) (Int64) -> Int64 {
// CHECK: bb0(%0 : $Int64):
// CHECK: store
// CHECK-NOT: {{(load|store)}}
// CHECK: bb1:
// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64
// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64
// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Builtin.Int64
// CHECK-NEXT: cond_br
// CHECK-NOT: {{(load|store)}}
// CHECK: load
// CHECK-NOT: {{(load|store)}}
// CHECK-LABEL: } // end sil function 'testCombinedLdStAliasingStore'
sil shared @testCombinedLdStAliasingStore : $@convention(method) (Int64) -> Int64 {
bb0(%0 : $Int64):
%zero = integer_literal $Builtin.Int64, 0
%intz = struct $Int64(%zero : $Builtin.Int64)
%stackAddr = alloc_stack $Index
%outerAddr1 = struct_element_addr %stackAddr : $*Index, #Index.value
store %intz to %outerAddr1 : $*Int64
%innerAddr1 = struct_element_addr %outerAddr1 : $*Int64, #Int64._value
%outerAddr2 = struct_element_addr %stackAddr : $*Index, #Index.value
%innerAddr2 = struct_element_addr %outerAddr2 : $*Int64, #Int64._value
br bb1
bb1:
%val = load %innerAddr2 : $*Builtin.Int64
%intv = struct $Int64(%zero : $Builtin.Int64)
store %intv to %outerAddr2 : $*Int64
store %val to %innerAddr1 : $*Builtin.Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
dealloc_stack %stackAddr : $*Index
%final = load %innerAddr2 : $*Builtin.Int64
%result = struct $Int64(%final : $Builtin.Int64)
return %result : $Int64
}
// -----------------------------------------------------------------------------
// Test combined load/store hoisting/sinking with unknown aliasing loads
// CHECK-LABEL: sil shared @testCombinedLdStUnknownLoad : $@convention(method) (Int64, Builtin.RawPointer, Builtin.RawPointer) -> Int64 {
// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer, %2 : $Builtin.RawPointer):
// CHECK-NOT: {{(load|store)}}
// CHECK: bb1:
// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64
// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64
// CHECK-NEXT: load %{{.*}} : $*Builtin.Int64
// CHECK-NEXT: cond_br
// CHECK-NOT: {{(load|store)}}
// CHECK-LABEL: } // end sil function 'testCombinedLdStUnknownLoad'
sil shared @testCombinedLdStUnknownLoad : $@convention(method) (Int64, Builtin.RawPointer, Builtin.RawPointer) -> Int64 {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer, %2 : $Builtin.RawPointer):
%addr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index
%addr2 = pointer_to_address %2 : $Builtin.RawPointer to $*Index
%outerAddr1 = struct_element_addr %addr1 : $*Index, #Index.value
%outerAddr2 = struct_element_addr %addr2 : $*Index, #Index.value
%innerAddr1 = struct_element_addr %outerAddr1 : $*Int64, #Int64._value
%innerAddr2 = struct_element_addr %outerAddr2 : $*Int64, #Int64._value
br bb1
bb1:
%val = load %innerAddr2 : $*Builtin.Int64
store %0 to %outerAddr2 : $*Int64
%val2 = load %innerAddr1 : $*Builtin.Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%result = struct $Int64(%val2 : $Builtin.Int64)
return %result : $Int64
}
// -----------------------------------------------------------------------------
// Reduced test case from rdar://61246061
//
// Test miscompilation of BidirectionalCollection<IndexSet>._distance with
// combined load/store hoisting/sinking with mutiple loads from
// aliasing addresses.
// getRange
sil @getRange : $@convention(thin) () -> Range<Int64>
// CHECK-LABEL: sil shared @testLICMReducedCombinedLdStExtraProjection : $@convention(method) (Int64) -> Int64 {
// CHECK: bb0(%0 : $Int64):
// CHECK: store %0 to %{{.*}} : $*Int64
// CHECK: load %{{.*}} : $*Int64
// CHECK-NOT: {{(load|store)}}
// CHECK: bb7:
// CHECK-NEXT: store %{{.*}} to %{{.*}} : $*Int64
// CHECK-NOT: {{(load|store)}}
// CHECK-LABEL: } // end sil function 'testLICMReducedCombinedLdStExtraProjection'
sil shared @testLICMReducedCombinedLdStExtraProjection : $@convention(method) (Int64) -> Int64 {
// %0 // users: %5, %1
bb0(%0 : $Int64):
%1 = struct_extract %0 : $Int64, #Int64._value // users: %35, %20
%2 = integer_literal $Builtin.Int64, 0 // user: %9
%3 = alloc_stack $Index // users: %41, %13, %4
%4 = struct_element_addr %3 : $*Index, #Index.value // users: %8, %5
store %0 to %4 : $*Int64 // id: %5
%6 = integer_literal $Builtin.Int64, 1 // user: %11
%7 = integer_literal $Builtin.Int1, -1 // user: %11
%8 = struct_element_addr %4 : $*Int64, #Int64._value // user: %34
br bb1(%2 : $Builtin.Int64) // id: %9
// %10 // user: %11
bb1(%10 : $Builtin.Int64): // Preds: bb8 bb0
%11 = builtin "sadd_with_overflow_Int64"(%10 : $Builtin.Int64, %6 : $Builtin.Int64, %7 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) // user: %12
%12 = tuple_extract %11 : $(Builtin.Int64, Builtin.Int1), 0 // users: %38, %37
%13 = struct_element_addr %3 : $*Index, #Index.value // users: %32, %27, %24, %14
%14 = struct_element_addr %13 : $*Int64, #Int64._value // user: %15
%15 = load %14 : $*Builtin.Int64 // user: %18
%16 = integer_literal $Builtin.Int64, 1 // user: %18
%17 = integer_literal $Builtin.Int1, -1 // user: %18
%18 = builtin "sadd_with_overflow_Int64"(%15 : $Builtin.Int64, %16 : $Builtin.Int64, %17 : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1) // user: %19
%19 = tuple_extract %18 : $(Builtin.Int64, Builtin.Int1), 0 // users: %26, %23, %20
%20 = builtin "cmp_eq_Int64"(%19 : $Builtin.Int64, %1 : $Builtin.Int64) : $Builtin.Int1 // user: %21
cond_br %20, bb2, bb3 // id: %21
bb2: // Preds: bb1
cond_br undef, bb4, bb5 // id: %22
bb3: // Preds: bb1
%23 = struct $Int64 (%19 : $Builtin.Int64) // user: %24
store %23 to %13 : $*Int64 // id: %24
br bb6 // id: %25
bb4: // Preds: bb2
%26 = struct $Int64 (%19 : $Builtin.Int64) // user: %27
store %26 to %13 : $*Int64 // id: %27
br bb6 // id: %28
bb5: // Preds: bb2
// function_ref getRange
%29 = function_ref @getRange : $@convention(thin) () -> Range<Int64> // user: %30
%30 = apply %29() : $@convention(thin) () -> Range<Int64> // user: %31
%31 = struct_extract %30 : $Range<Int64>, #Range.lowerBound // user: %32
store %31 to %13 : $*Int64 // id: %32
br bb6 // id: %33
bb6: // Preds: bb5 bb4 bb3
%34 = load %8 : $*Builtin.Int64 // user: %35
%35 = builtin "cmp_eq_Int64"(%34 : $Builtin.Int64, %1 : $Builtin.Int64) : $Builtin.Int1 // user: %36
cond_br %35, bb7, bb8 // id: %36
bb7: // Preds: bb6
br bb9(%12 : $Builtin.Int64) // id: %37
bb8: // Preds: bb6
br bb1(%12 : $Builtin.Int64) // id: %38
// %39 // user: %40
bb9(%39 : $Builtin.Int64): // Preds: bb7
%40 = struct $Int64 (%39 : $Builtin.Int64) // user: %42
dealloc_stack %3 : $*Index // id: %41
return %40 : $Int64 // id: %42
}
// testConditionalTrapInInfiniteSyncLoop and
// testConditionalTrapDominatingSyncLoopExit
//
// It's legal for the optimizer to consider code after the loop as
// always reachable, but when a loop has no exits, or when the loops
// exits are dominated by a conditional statement we should not
// consider conditional statements within the loop as dominating all
// possible execution paths through the loop. At least not when there
// is at least one path through the loop that contains a
// "synchronization point", such as a function that may contain a
// memory barrier, perform I/O, or exit the program.
sil @mayExit : $@convention(thin) () -> ()
// CHECK-LABEL: sil @testConditionalTrapInInfiniteSyncLoop : $@convention(thin) (Builtin.Int1, Builtin.Int1) -> () {
// CHECK: bb0
// CHECK-NOT: cond_fail
// CHECK: br bb1
// CHECK: bb1:
// CHECK: cond_br %0, bb2, bb3
// CHECK: bb2:
// CHECK: cond_fail %1 : $Builtin.Int1, "arithmetic overflow"
// CHECK-LABEL: } // end sil function 'testConditionalTrapInInfiniteSyncLoop'
sil @testConditionalTrapInInfiniteSyncLoop : $@convention(thin) (Builtin.Int1, Builtin.Int1) -> () {
bb0(%0 : $Builtin.Int1, %1 : $Builtin.Int1):
br bb1
bb1: // loop head
cond_br %0, bb2, bb3
bb2: // maybe never executed
cond_fail %1 : $Builtin.Int1, "arithmetic overflow"
br bb4
bb3:
// synchronization point: has "real" side-effects that we can't
// reorder with traps
%f = function_ref @mayExit : $@convention(thin) () -> ()
apply %f() : $@convention(thin) () -> ()
br bb4
bb4: // latch
br bb1
}
// CHECK-LABEL: sil @testConditionalTrapDominatingSyncLoopExit : $@convention(thin) (Builtin.Int1, Builtin.Int1, Builtin.Int1) -> () {
// CHECK: bb0
// CHECK-NOT: cond_fail
// CHECK: br bb1
// CHECK: bb1:
// CHECK: cond_br %0, bb2, bb4
// CHECK: bb2:
// CHECK: cond_fail %1 : $Builtin.Int1, "arithmetic overflow"
// CHECK-LABEL: } // end sil function 'testConditionalTrapDominatingSyncLoopExit'
sil @testConditionalTrapDominatingSyncLoopExit : $@convention(thin) (Builtin.Int1, Builtin.Int1, Builtin.Int1) -> () {
bb0(%0 : $Builtin.Int1, %1 : $Builtin.Int1, %2 : $Builtin.Int1):
br bb1
bb1: // loop head
cond_br %0, bb2, bb4
bb2: // maybe never executed
cond_fail %1 : $Builtin.Int1, "arithmetic overflow"
cond_br %2, bb3, bb5
bb3: // tail
br bb1
bb4:
// synchronization point: has "real" side-effects that we can't
// reorder with traps
%f = function_ref @mayExit : $@convention(thin) () -> ()
apply %f() : $@convention(thin) () -> ()
br bb1
bb5:
%99 = tuple ()
return %99 : $()
}
// Test load splitting with a loop-invariant stored value. The loop
// will be empty after combined load/store hoisting/sinking.
//
// TODO: sink a struct_extract (or other non-side-effect instructions)
// with no uses in the loop.
//
// CHECK-LABEL: sil shared @testLoadSplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64
// CHECK: [[STOREDVAL:%.*]] = struct_extract %0 : $Int64, #Int64._value
// CHECK: br bb1([[PRELOAD]] : $Int64)
// CHECK: bb1([[PHI:%.*]] : $Int64):
// CHECK-NEXT: [[OUTERVAL:%.*]] = struct $Index ([[PHI]] : $Int64)
// CHECK-NEXT: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK-NEXT: br bb1(%0 : $Int64)
// CHECK: bb3:
// CHECK-NEXT: store %0 to %{{.*}} : $*Int64
// CHECK-NEXT: tuple ([[OUTERVAL]] : $Index, [[PHI]] : $Int64, [[STOREDVAL]] : $Builtin.Int64)
// CHECK-LABEL: } // end sil function 'testLoadSplit'
sil shared @testLoadSplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index
%middleAddr1 = struct_element_addr %outerAddr1 : $*Index, #Index.value
br bb1
bb1:
%val1 = load %outerAddr1 : $*Index
%val2 = load %middleAddr1 : $*Int64
%outerAddr2 = pointer_to_address %1 : $Builtin.RawPointer to $*Index
%middleAddr2 = struct_element_addr %outerAddr2 : $*Index, #Index.value
store %0 to %middleAddr2 : $*Int64
%innerAddr1 = struct_element_addr %middleAddr1 : $*Int64, #Int64._value
%val3 = load %innerAddr1 : $*Builtin.Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%result = tuple (%val1 : $Index, %val2 : $Int64, %val3 : $Builtin.Int64)
return %result : $(Index, Int64, Builtin.Int64)
}
// Test load splitting with a loop-varying stored value.
// CHECK-LABEL: sil shared @testLoadSplitPhi : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
// CHECK: [[PRELOAD:%.*]] = load %{{.*}} : $*Int64
// CHECK: br bb1(%4 : $Int64)
// CHECK: bb1([[PHI:%.*]] : $Int64):
// CHECK-NEXT: [[OUTERVAL:%.*]] = struct $Index ([[PHI]] : $Int64)
// CHECK-NEXT: [[EXTRACT:%.*]] = struct_extract [[PHI]] : $Int64, #Int64._value
// CHECK-NEXT: builtin "uadd_with_overflow_Int32"([[EXTRACT]] : $Builtin.Int64
// CHECK-NEXT: tuple_extract
// CHECK-NEXT: [[ADD:%.*]] = struct $Int64
// CHECK-NEXT: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK-NEXT: br bb1([[ADD]] : $Int64)
// CHECK: bb3:
// CHECK-NEXT: store [[ADD]] to %{{.*}} : $*Int64
// CHECK-NEXT: tuple ([[OUTERVAL]] : $Index, [[ADD]] : $Int64, [[EXTRACT]] : $Builtin.Int64)
// CHECK-LABEL: } // end sil function 'testLoadSplitPhi'
sil shared @testLoadSplitPhi : $@convention(method) (Int64, Builtin.RawPointer) -> (Index, Int64, Builtin.Int64) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*Index
%middleAddr1 = struct_element_addr %outerAddr1 : $*Index, #Index.value
%innerAddr1 = struct_element_addr %middleAddr1 : $*Int64, #Int64._value
br bb1
bb1:
%outerVal = load %outerAddr1 : $*Index
%innerVal = load %innerAddr1 : $*Builtin.Int64
%one = integer_literal $Builtin.Int64, 1
%zero = integer_literal $Builtin.Int1, 0
%add = builtin "uadd_with_overflow_Int32"(%innerVal : $Builtin.Int64, %one : $Builtin.Int64, %zero : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1)
%inc = tuple_extract %add : $(Builtin.Int64, Builtin.Int1), 0
%outerAddr2 = pointer_to_address %1 : $Builtin.RawPointer to $*Index
%middleAddr2 = struct_element_addr %outerAddr2 : $*Index, #Index.value
%newVal = struct $Int64 (%inc : $Builtin.Int64)
store %newVal to %middleAddr2 : $*Int64
%middleVal = load %middleAddr1 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%result = tuple (%outerVal : $Index, %middleVal : $Int64, %innerVal : $Builtin.Int64)
return %result : $(Index, Int64, Builtin.Int64)
}
struct State {
@_hasStorage var valueSet: (Int64, Int64, Int64) { get set }
@_hasStorage var singleValue: Int64 { get set }
}
// Test the we can remove a store to an individual tuple element when
// the struct containing the tuple is used within the loop.
// The optimized loop should only contain the add operation and a phi, with no memory access.
//
// CHECK-LABEL: sil shared @testTupleSplit : $@convention(method) (Builtin.RawPointer) -> State {
// CHECK: bb0(%0 : $Builtin.RawPointer):
// CHECK: [[HOISTADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0
// ...Preload stored element #1
// CHECK: [[PRELOADADR:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 1
// CHECK: [[PRELOAD:%.*]] = load [[PRELOADADR]] : $*Int64
// ...Split element 0
// CHECK: [[SPLIT0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0
// CHECK: [[ELT0:%.*]] = load [[SPLIT0]] : $*Int64
// ...Split element 2
// CHECK: [[SPLIT2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 2
// CHECK: [[ELT2:%.*]] = load [[SPLIT2]] : $*Int64
// ...Split State.singlevalue
// CHECK: [[SINGLEADR:%.*]] = struct_element_addr %{{.*}} : $*State, #State.singleValue
// CHECK: [[SINGLEVAL:%.*]] = load [[SINGLEADR]] : $*Int64
// ...Hoisted element 0
// CHECK: [[HOISTLOAD:%.*]] = load [[HOISTADR]] : $*Int64
// CHECK: [[HOISTVAL:%.*]] = struct_extract [[HOISTLOAD]] : $Int64, #Int64._value
// CHECK: br bb1([[PRELOAD]] : $Int64)
// ...Loop
// CHECK: bb1([[PHI:%.*]] : $Int64):
// CHECK-NEXT: [[TUPLE:%.*]] = tuple ([[ELT0]] : $Int64, [[PHI]] : $Int64, [[ELT2]] : $Int64)
// CHECK-NEXT: [[STRUCT:%.*]] = struct $State ([[TUPLE]] : $(Int64, Int64, Int64), [[SINGLEVAL]] : $Int64)
// CHECK-NEXT: [[ADDEND:%.*]] = struct_extract [[PHI]] : $Int64, #Int64._value
// CHECK-NEXT: [[UADD:%.*]] = builtin "uadd_with_overflow_Int32"([[HOISTVAL]] : $Builtin.Int64, [[ADDEND]] : $Builtin.Int64, %{{.*}} : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1)
// CHECK-NEXT: [[ADDVAL:%.*]] = tuple_extract [[UADD]] : $(Builtin.Int64, Builtin.Int1), 0
// CHECK-NEXT: [[ADDINT:%.*]] = struct $Int64 ([[ADDVAL]] : $Builtin.Int64)
// CHECK-NEXT: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK-NEXT: br bb1([[ADDINT]] : $Int64)
// CHECK: bb3:
// CHECK-NEXT: store [[ADDINT]] to [[PRELOADADR]] : $*Int64
// CHECK-NEXT: return [[STRUCT]] : $State
// CHECK-LABEL: } // end sil function 'testTupleSplit'
sil shared @testTupleSplit : $@convention(method) (Builtin.RawPointer) -> State {
bb0(%0 : $Builtin.RawPointer):
%stateAddr = pointer_to_address %0 : $Builtin.RawPointer to $*State
%tupleAddr0 = struct_element_addr %stateAddr : $*State, #State.valueSet
%elementAddr0 = tuple_element_addr %tupleAddr0 : $*(Int64, Int64, Int64), 0
%tupleAddr1 = struct_element_addr %stateAddr : $*State, #State.valueSet
%elementAddr1 = tuple_element_addr %tupleAddr1 : $*(Int64, Int64, Int64), 1
%tupleAddr11 = struct_element_addr %stateAddr : $*State, #State.valueSet
%elementAddr11 = tuple_element_addr %tupleAddr11 : $*(Int64, Int64, Int64), 1
br bb1
bb1:
%state = load %stateAddr : $*State
%element0 = load %elementAddr0 : $*Int64
%val0 = struct_extract %element0 : $Int64, #Int64._value
%element1 = load %elementAddr1 : $*Int64
%val1 = struct_extract %element1 : $Int64, #Int64._value
%zero = integer_literal $Builtin.Int1, 0
%add = builtin "uadd_with_overflow_Int32"(%val0 : $Builtin.Int64, %val1 : $Builtin.Int64, %zero : $Builtin.Int1) : $(Builtin.Int64, Builtin.Int1)
%addVal = tuple_extract %add : $(Builtin.Int64, Builtin.Int1), 0
%addInt = struct $Int64 (%addVal : $Builtin.Int64)
store %addInt to %elementAddr11 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
return %state : $State
}
// Test multiple stores to disjoint access paths with a single load
// that spans both of them. The load should be split and hosited and
// and the stores be sunk.
// testCommonSplitLoad
// CHECK-LABEL: sil shared @testCommonSplitLoad : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, Int64, Int64) {
// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
// CHECK: [[ELT0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 0
// CHECK: [[V0:%.*]] = load [[ELT0]] : $*Int64
// CHECK: [[ELT2:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 2
// CHECK: [[V2:%.*]] = load [[ELT2]] : $*Int64
// CHECK: [[ELT1:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64, Int64), 1
// CHECK: [[V1:%.*]] = load [[ELT1]] : $*Int64
// CHECK: br bb1([[V0]] : $Int64, [[V2]] : $Int64)
//
// Nothing in this loop except phis...
// CHECK: bb1([[PHI0:%.*]] : $Int64, [[PHI2:%.*]] : $Int64):
// CHECK-NEXT: [[RESULT:%.*]] = tuple ([[PHI0]] : $Int64, [[V1]] : $Int64, [[PHI2]] : $Int64)
// CHECK-NEXT: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK-NEXT: br bb1(%0 : $Int64, %0 : $Int64)
//
// Stores are all sunk...
// CHECK: bb3:
// CHECK: store %0 to [[ELT2]] : $*Int64
// CHECK: store %0 to [[ELT0]] : $*Int64
// CHECK: return [[RESULT]] : $(Int64, Int64, Int64)
// CHECK-LABEL: } // end sil function 'testCommonSplitLoad'
sil shared @testCommonSplitLoad : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, Int64, Int64) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, Int64, Int64)
br bb1
bb1:
%val1 = load %outerAddr1 : $*(Int64, Int64, Int64)
%elementAddr0 = tuple_element_addr %outerAddr1 : $*(Int64, Int64, Int64), 0
store %0 to %elementAddr0 : $*Int64
%elementAddr2 = tuple_element_addr %outerAddr1 : $*(Int64, Int64, Int64), 2
store %0 to %elementAddr2 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
return %val1 : $(Int64, Int64, Int64)
}
// Two stores, one to the outer tuple and one to the inner tuple. This
// results in two access paths that are only loaded/stored to. First
// split the outer tuple when processing the outer access path, then
// the inner tuple when processing the inner access path. All loads
// should be hoisted and all stores should be sunk.
//
// CHECK-LABEL: sil shared @testResplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
// CHECK: [[ELT_0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 0
// CHECK: [[V0:%.*]] = load [[ELT_0]] : $*Int64
// CHECK: [[ELT_1a:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 1
// CHECK: [[ELT_1_0:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 0
// CHECK: [[V_1_0:%.*]] = load [[ELT_1_0]] : $*Int64
// CHECK: [[ELT_1b:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, (Int64, Int64)), 1
// CHECK: [[ELT_1_1:%.*]] = tuple_element_addr %{{.*}} : $*(Int64, Int64), 1
// CHECK: [[V_1_1:%.*]] = load [[ELT_1_1]] : $*Int64
// CHECK: br bb1([[V_0:%.*]] : $Int64, [[V_1_0]] : $Int64)
//
// Nothing in this loop except phis and tuple reconstruction...
// CHECK: bb1([[PHI_0:%.*]] : $Int64, [[PHI_1_0:%.*]] : $Int64):
// CHECK: [[INNER:%.*]] = tuple ([[PHI_1_0]] : $Int64, [[V_1_1]] : $Int64)
// CHECK: [[OUTER:%.*]] = tuple ([[PHI_0]] : $Int64, [[INNER]] : $(Int64, Int64))
// CHECK: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK: br bb1(%0 : $Int64, %0 : $Int64)
//
// The two stores are sunk...
// CHECK: bb3:
// CHECK: store %0 to [[ELT_1_0]] : $*Int64
// CHECK: store %0 to [[ELT_0]] : $*Int64
// CHECK: return [[OUTER]] : $(Int64, (Int64, Int64))
// CHECK-LABEL: } // end sil function 'testResplit'
sil shared @testResplit : $@convention(method) (Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, (Int64, Int64))
br bb1
bb1:
%val1 = load %outerAddr1 : $*(Int64, (Int64, Int64))
%elementAddr0 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 0
store %0 to %elementAddr0 : $*Int64
%elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1
%elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0
store %0 to %elementAddr10 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
return %val1 : $(Int64, (Int64, Int64))
}
// Two stores to overlapping accesspaths. Combined load/store hoisting
// cannot currently handle stores to overlapping accesspaths, so
// nothing is optimized.
// CHECK-LABEL: sil shared @testTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
// CHECK-LABEL: bb0(%0 : $Int64, %1 : $Int64, %2 : $Builtin.RawPointer):
// CHECK-NOT: load
// CHECK: br bb1
// CHECK: bb1:
// CHECK: load %{{.*}} : $*(Int64, (Int64, Int64))
// CHECK: store {{.*}} : $*(Int64, Int64)
// CHECK: store {{.*}} : $*Int64
// CHECK: cond_br undef, bb2, bb3
// CHECK-NOT: store
// CHECK-LABEL: } // end sil function 'testTwoStores'
sil shared @testTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
bb0(%0 : $Int64, %1: $Int64, %2 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %2 : $Builtin.RawPointer to $*(Int64, (Int64, Int64))
br bb1
bb1:
%val1 = load %outerAddr1 : $*(Int64, (Int64, Int64))
%elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1
%tuple = tuple (%0 : $Int64, %1: $Int64)
store %tuple to %elementAddr1 : $*(Int64, Int64)
%elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0
store %1 to %elementAddr10 : $*Int64
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
return %val1 : $(Int64, (Int64, Int64))
}
// Two wide loads. The first can be successfully split and the second
// half hoisted. The second cannot be split because of a pointer
// cast. Make sure two remaining loads and the store are still in the loop.
//
// CHECK-LABEL: sil hidden @testSplitNonStandardProjection : $@convention(method) (Int64, Builtin.RawPointer) -> ((Int64, (Int64, Int64)), (Int64, Int64)) {
// CHECK: bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
//
// The first load was split, so one half is hoisted.
// CHECK: [[V1:%.*]] = load %{{.*}} : $*Int64
// CHECK: br bb1
// CHECK: bb1:
// CHECK: [[V0:%.*]] = load %{{.*}} : $*Int64
// CHECK: [[INNER:%.*]] = tuple ([[V0]] : $Int64, [[V1]] : $Int64)
// CHECK: store %0 to %{{.*}} : $*Int64
// CHECK: [[OUTER:%.*]] = load %{{.*}} : $*(Int64, (Int64, Int64))
// CHECK: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK: br bb1
// CHECK: bb3:
// CHECK: [[RESULT:%.*]] = tuple ([[OUTER]] : $(Int64, (Int64, Int64)), [[INNER]] : $(Int64, Int64))
// CHECK: return [[RESULT]] : $((Int64, (Int64, Int64)), (Int64, Int64))
// CHECK-LABEL: } // end sil function 'testSplitNonStandardProjection'
sil hidden @testSplitNonStandardProjection : $@convention(method) (Int64, Builtin.RawPointer) -> ((Int64, (Int64, Int64)), (Int64, Int64)) {
bb0(%0 : $Int64, %1 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %1 : $Builtin.RawPointer to $*(Int64, (Int64, Int64))
br bb1
bb1:
%elt1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1
%ptr = address_to_pointer %elt1 : $*(Int64, Int64) to $Builtin.RawPointer
%ptrAdr = pointer_to_address %ptr : $Builtin.RawPointer to [strict] $*(Int64, Int64)
%val2 = load %ptrAdr : $*(Int64, Int64)
%eltptr0 = tuple_element_addr %ptrAdr : $*(Int64, Int64), 0
store %0 to %eltptr0 : $*Int64
// Process the outermost load after splitting the inner load
%val1 = load %outerAddr1 : $*(Int64, (Int64, Int64))
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%result = tuple (%val1 : $(Int64, (Int64, Int64)), %val2 : $(Int64, Int64))
return %result : $((Int64, (Int64, Int64)), (Int64, Int64))
}
// CHECK-LABEL: sil shared @testSameTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
// CHECK: bb0(%0 : $Int64, %1 : $Int64, %2 : $Builtin.RawPointer):
// CHECK: [[ELT_1:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 1
// CHECK: [[V1:%.*]] = load %4 : $*(Int64, Int64)
// CHECK: [[ELT_0:%.*]] = tuple_element_addr %3 : $*(Int64, (Int64, Int64)), 0
// CHECK: [[V0:%.*]] = load %6 : $*Int64
// CHECK: [[ARG0:%.*]] = tuple (%0 : $Int64, %0 : $Int64)
// CHECK: [[ARG0_0:%.*]] = tuple_extract %8 : $(Int64, Int64), 0
// CHECK: [[ARG1:%.*]] = tuple (%1 : $Int64, %1 : $Int64)
// CHECK: br bb1([[V1]] : $(Int64, Int64))
// CHECK: bb1([[PHI:%.*]] : $(Int64, Int64)):
// CHECK: [[LOOPVAL:%.*]] = tuple ([[V0]] : $Int64, [[PHI]] : $(Int64, Int64))
// CHECK: cond_br undef, bb2, bb3
// CHECK: bb2:
// CHECK: br bb1([[ARG1]] : $(Int64, Int64))
// CHECK: bb3:
// CHECK: store [[ARG1]] to [[ELT_1]] : $*(Int64, Int64)
// CHECK: [[EXTRACT0:%.*]] = tuple_extract [[LOOPVAL]] : $(Int64, (Int64, Int64)), 0
// CHECK: [[EXTRACT1:%.*]] = tuple_extract [[LOOPVAL]] : $(Int64, (Int64, Int64)), 1
// CHECK: [[EXTRACT1_1:%.*]] = tuple_extract [[EXTRACT1]] : $(Int64, Int64), 1
// CHECK: [[TUPLE1:%.*]] = tuple ([[ARG0_0]] : $Int64, [[EXTRACT1_1]] : $Int64)
// CHECK: [[RESULT:%.*]] = tuple ([[EXTRACT0]] : $Int64, [[TUPLE1]] : $(Int64, Int64))
// CHECK: return [[RESULT]] : $(Int64, (Int64, Int64))
// CHECK-LABEL: } // end sil function 'testSameTwoStores'
sil shared @testSameTwoStores : $@convention(method) (Int64, Int64, Builtin.RawPointer) -> (Int64, (Int64, Int64)) {
bb0(%0 : $Int64, %1: $Int64, %2 : $Builtin.RawPointer):
%outerAddr1 = pointer_to_address %2 : $Builtin.RawPointer to $*(Int64, (Int64, Int64))
br bb1
bb1:
%val = load %outerAddr1 : $*(Int64, (Int64, Int64))
%elementAddr1 = tuple_element_addr %outerAddr1 : $*(Int64, (Int64, Int64)), 1
%tupleA = tuple (%0 : $Int64, %0: $Int64)
store %tupleA to %elementAddr1 : $*(Int64, Int64)
%elementAddr10 = tuple_element_addr %elementAddr1 : $*(Int64, Int64), 0
%val10 = load %elementAddr10 : $*Int64
%tupleB = tuple (%1 : $Int64, %1: $Int64)
store %tupleB to %elementAddr1 : $*(Int64, Int64)
cond_br undef, bb2, bb3
bb2:
br bb1
bb3:
%extract0 = tuple_extract %val : $(Int64, (Int64, Int64)), 0
%extract1 = tuple_extract %val : $(Int64, (Int64, Int64)), 1
%extract11 = tuple_extract %extract1 : $(Int64, Int64), 1
%inner = tuple (%val10 : $Int64, %extract11: $Int64)
%outer = tuple (%extract0 : $Int64, %inner: $(Int64, Int64))
return %outer : $(Int64, (Int64, Int64))
}