blob: 7a17a0638011a76607d58be2fae650bcb0eb1f19 [file] [log] [blame]
// RUN: %target-sil-opt -enable-sil-verify-all -inline -generic-specializer -inline %s > %t.sil
// RUN: %target-sil-opt -sil-full-demangle -enable-sil-verify-all -specdevirt %t.sil | FileCheck %s
// Check that the @_semantics("optimize.sil.never") annotation prevents
// any optimizations of the annotated function:
// - No functions are inlined into the annotated function.
// - No calls are devirtualized in the body of the annotated function.
// - No generic specializations of the annotated function are generated.
// - The annotated function is not inlined into any other function.
sil_stage canonical
import Builtin
import Swift
import SwiftShims
public class C {
@inline(never) func foo() -> Int32
deinit
init()
}
public class D : C {
@inline(never) override func foo() -> Int32
deinit
override init()
}
public class Base {
@_semantics("optimize.sil.never") func foo() -> Int32
func boo() -> Int32
deinit
init()
}
public class Derived1 : Base {
override func foo() -> Int32
@_semantics("optimize.sil.never") override func boo() -> Int32
deinit
override init()
}
public class Derived2 : Base {
override func foo() -> Int32
override func boo() -> Int32
deinit
override init()
}
public func transform1<T>(x: T) -> Int32
@_semantics("optimize.sil.never") public func foo1<T>(x: T, _ c: C) -> Int32
public func boo1(c: C) -> Int32
public func transform2(x: Int32) -> Int32
@_semantics("optimize.sil.never") public func foo2(x: Int32, _ c: C) -> Int32
public func boo2(c: C) -> Int32
public func transform3<T>(x: T) -> Int32
public func foo3<T>(x: T, _ c: C) -> Int32
public func boo3(c: C) -> Int32
public func transform4(x: Int32) -> Int32
public func foo4(x: Int32, _ c: C) -> Int32
public func boo4(c: C) -> Int32
sil_global [fragile] @_TZvOs7Process5_argcVs5Int32 : $Int32
sil_global private_external [fragile] @globalinit_33_1BDF70FFC18749BAB495A73B459ED2F0_token5 : $Builtin.Word
sil_global [fragile] @_TZvOs7Process11_unsafeArgvGSpGSpVs4Int8__ : $UnsafeMutablePointer<UnsafeMutablePointer<Int8>>
sil hidden [noinline] @_TFC14optimize_never1C3foofT_Vs5Int32 : $@convention(method) (@guaranteed C) -> Int32 {
bb0(%0 : $C):
%2 = integer_literal $Builtin.Int32, 1
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil [transparent] [fragile] @_TFVs5Int32CfT22_builtinIntegerLiteralBi2048__S_ : $@convention(thin) (Builtin.Int2048, @thin Int32.Type) -> Int32
sil @_TFC14optimize_never1CD : $@convention(method) (@owned C) -> () {
bb0(%0 : $C):
%2 = function_ref @_TFC14optimize_never1Cd : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject
%3 = apply %2(%0) : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject
%4 = unchecked_ref_cast %3 : $Builtin.NativeObject to $C
dealloc_ref %4 : $C
%6 = tuple ()
return %6 : $()
}
sil @_TFC14optimize_never1Cd : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject {
bb0(%0 : $C):
%2 = unchecked_ref_cast %0 : $C to $Builtin.NativeObject
return %2 : $Builtin.NativeObject
}
sil hidden @_TFC14optimize_never1CcfT_S0_ : $@convention(method) (@owned C) -> @owned C {
bb0(%0 : $C):
return %0 : $C
}
sil hidden [noinline] @_TFC14optimize_never1D3foofT_Vs5Int32 : $@convention(method) (@guaranteed D) -> Int32 {
bb0(%0 : $D):
%2 = integer_literal $Builtin.Int32, 11
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil @_TFC14optimize_never1DD : $@convention(method) (@owned D) -> () {
bb0(%0 : $D):
%2 = function_ref @_TFC14optimize_never1Dd : $@convention(method) (@guaranteed D) -> @owned Builtin.NativeObject
%3 = apply %2(%0) : $@convention(method) (@guaranteed D) -> @owned Builtin.NativeObject
%4 = unchecked_ref_cast %3 : $Builtin.NativeObject to $D
dealloc_ref %4 : $D
%6 = tuple ()
return %6 : $()
}
sil @_TFC14optimize_never1Dd : $@convention(method) (@guaranteed D) -> @owned Builtin.NativeObject {
bb0(%0 : $D):
%2 = upcast %0 : $D to $C
%3 = function_ref @_TFC14optimize_never1Cd : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject
%4 = apply %3(%2) : $@convention(method) (@guaranteed C) -> @owned Builtin.NativeObject
return %4 : $Builtin.NativeObject
}
sil hidden @_TFC14optimize_never1DcfT_S0_ : $@convention(method) (@owned D) -> @owned D {
bb0(%0 : $D):
%1 = alloc_stack $D
store %0 to %1 : $*D
%3 = upcast %0 : $D to $C
%4 = function_ref @_TFC14optimize_never1CcfT_S0_ : $@convention(method) (@owned C) -> @owned C
%5 = apply %4(%3) : $@convention(method) (@owned C) -> @owned C
%6 = unchecked_ref_cast %5 : $C to $D
store %6 to %1 : $*D
dealloc_stack %1 : $*D
return %6 : $D
}
sil @_TF14optimize_never10transform1urFxVs5Int32 : $@convention(thin) <T> (@in T) -> Int32 {
bb0(%0 : $*T):
%2 = integer_literal $Builtin.Int32, 2
%3 = struct $Int32 (%2 : $Builtin.Int32)
destroy_addr %0 : $*T
return %3 : $Int32
}
// Check that both calls are not inlined or devirtualized.
// CHECK-LABEL: sil [_semantics "optimize.sil.never"] @_TF14optimize_never4foo1urFTxCS_1C_Vs5Int32
// CHECK-NOT: checked_cast_br
// CHECK: function_ref @_TF14optimize_never10transform1urFxVs5Int32
// CHECK: apply
// CHECK-NOT: checked_cast_br
// CHECK: class_method {{%.*}} : $C, #C.foo!1 : (C) -> () -> Int32 , $@convention(method) (@guaranteed C) -> Int32
// CHECK: apply
// CHECK: return
sil [_semantics "optimize.sil.never"] @_TF14optimize_never4foo1urFTxCS_1C_Vs5Int32 : $@convention(thin) <T> (@in T, @owned C) -> Int32 {
bb0(%0 : $*T, %1 : $C):
%4 = function_ref @_TF14optimize_never10transform1urFxVs5Int32 : $@convention(thin) _0_0> (@in τ_0_0) -> Int32
%5 = alloc_stack $T
copy_addr %0 to [initialization] %5 : $*T
%7 = apply %4<T>(%5) : $@convention(thin) _0_0> (@in τ_0_0) -> Int32
%8 = class_method %1 : $C, #C.foo!1 : (C) -> () -> Int32 , $@convention(method) (@guaranteed C) -> Int32
%9 = apply %8(%1) : $@convention(method) (@guaranteed C) -> Int32
%10 = struct_extract %7 : $Int32, #Int32._value
%11 = struct_extract %9 : $Int32, #Int32._value
%12 = integer_literal $Builtin.Int1, -1
%13 = builtin "sadd_with_overflow_Int32"(%10 : $Builtin.Int32, %11 : $Builtin.Int32, %12 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%14 = tuple_extract %13 : $(Builtin.Int32, Builtin.Int1), 0
%15 = tuple_extract %13 : $(Builtin.Int32, Builtin.Int1), 1
cond_fail %15 : $Builtin.Int1
%17 = struct $Int32 (%14 : $Builtin.Int32)
dealloc_stack %5 : $*T
strong_release %1 : $C
destroy_addr %0 : $*T
return %17 : $Int32
}
sil [transparent] [fragile] @_TZFsoi1pFTVs5Int32S__S_ : $@convention(thin) (Int32, Int32) -> Int32
// CHECK-LABEL: sil @_TF14optimize_never4boo1FCS_1CVs5Int32
// CHECK-NOT: return
// This call should not be inlined, because the callee is annotated with @_semantics("optimize.sil.never")
// CHECK: function_ref @_TF14optimize_never4foo1urFTxCS_1C_Vs5Int32
// CHECK: apply
// CHECK: return
sil @_TF14optimize_never4boo1FCS_1CVs5Int32 : $@convention(thin) (@owned C) -> Int32 {
bb0(%0 : $C):
%2 = function_ref @_TF14optimize_never4foo1urFTxCS_1C_Vs5Int32 : $@convention(thin) _0_0> (@in τ_0_0, @owned C) -> Int32
%3 = integer_literal $Builtin.Int32, 1
%4 = struct $Int32 (%3 : $Builtin.Int32)
%5 = alloc_stack $Int32
store %4 to %5 : $*Int32
strong_retain %0 : $C
%8 = apply %2<Int32>(%5, %0) : $@convention(thin) _0_0> (@in τ_0_0, @owned C) -> Int32
dealloc_stack %5 : $*Int32
strong_release %0 : $C
return %8 : $Int32
}
sil [transparent] [fragile] @_TFSiCfT22_builtinIntegerLiteralBi2048__Si : $@convention(thin) (Builtin.Int2048, @thin Int.Type) -> Int
sil @_TF14optimize_never10transform2FVs5Int32S0_ : $@convention(thin) (Int32) -> Int32 {
bb0(%0 : $Int32):
return %0 : $Int32
}
// Check that both calls are not inlined or devirtualized.
// CHECK-LABEL: sil [_semantics "optimize.sil.never"] @_TF14optimize_never4foo2FTVs5Int32CS_1C_S0_
// CHECK-NOT: checked_cast_br
// CHECK: function_ref @_TF14optimize_never10transform2FVs5Int32S0_
// CHECK: apply
// CHECK-NOT: checked_cast_br
// CHECK: class_method {{%.*}} : $C, #C.foo!1 : (C) -> () -> Int32 , $@convention(method) (@guaranteed C) -> Int32
// CHECK: apply
// CHECK: return
sil [_semantics "optimize.sil.never"] @_TF14optimize_never4foo2FTVs5Int32CS_1C_S0_ : $@convention(thin) (Int32, @owned C) -> Int32 {
bb0(%0 : $Int32, %1 : $C):
%4 = function_ref @_TF14optimize_never10transform2FVs5Int32S0_ : $@convention(thin) (Int32) -> Int32
%5 = apply %4(%0) : $@convention(thin) (Int32) -> Int32
%6 = class_method %1 : $C, #C.foo!1 : (C) -> () -> Int32 , $@convention(method) (@guaranteed C) -> Int32
%7 = apply %6(%1) : $@convention(method) (@guaranteed C) -> Int32
%8 = struct_extract %5 : $Int32, #Int32._value
%9 = struct_extract %7 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, -1
%11 = builtin "sadd_with_overflow_Int32"(%8 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%12 = tuple_extract %11 : $(Builtin.Int32, Builtin.Int1), 0
%13 = tuple_extract %11 : $(Builtin.Int32, Builtin.Int1), 1
cond_fail %13 : $Builtin.Int1
%15 = struct $Int32 (%12 : $Builtin.Int32)
strong_release %1 : $C
return %15 : $Int32
}
// CHECK-LABEL: sil @_TF14optimize_never4boo2FCS_1CVs5Int32
// CHECK-NOT: return
// This call should not be inlined, because the callee is annotated with @_semantics("optimize.sil.never")
// CHECK: function_ref @_TF14optimize_never4foo2FTVs5Int32CS_1C_S0_
// CHECK: apply
// CHECK: return
sil @_TF14optimize_never4boo2FCS_1CVs5Int32 : $@convention(thin) (@owned C) -> Int32 {
bb0(%0 : $C):
%2 = function_ref @_TF14optimize_never4foo2FTVs5Int32CS_1C_S0_ : $@convention(thin) (Int32, @owned C) -> Int32
%3 = integer_literal $Builtin.Int32, 1
%4 = struct $Int32 (%3 : $Builtin.Int32)
strong_retain %0 : $C
%6 = apply %2(%4, %0) : $@convention(thin) (Int32, @owned C) -> Int32
strong_release %0 : $C
return %6 : $Int32
}
// Check that no generic specialization was produced for foo1 because it is excluded from optimizations.
// CHECK-NOT: generic specialization <Swift.Int> of {{.*}}.foo1
// But foo3 was specialized, because it is not annotated with @_semantics("optimize.sil.never")
// CHECK: generic specialization <Swift.Int32> of {{.*}}.foo3
sil @_TF14optimize_never10transform3urFxVs5Int32 : $@convention(thin) <T> (@in T) -> Int32 {
bb0(%0 : $*T):
%2 = integer_literal $Builtin.Int32, 2
%3 = struct $Int32 (%2 : $Builtin.Int32)
destroy_addr %0 : $*T
return %3 : $Int32
}
sil @_TF14optimize_never4foo3urFTxCS_1C_Vs5Int32 : $@convention(thin) <T> (@in T, @owned C) -> Int32 {
bb0(%0 : $*T, %1 : $C):
%4 = function_ref @_TF14optimize_never10transform3urFxVs5Int32 : $@convention(thin) _0_0> (@in τ_0_0) -> Int32
%5 = alloc_stack $T
copy_addr %0 to [initialization] %5 : $*T
%7 = apply %4<T>(%5) : $@convention(thin) _0_0> (@in τ_0_0) -> Int32
%8 = class_method %1 : $C, #C.foo!1 : (C) -> () -> Int32 , $@convention(method) (@guaranteed C) -> Int32
%9 = apply %8(%1) : $@convention(method) (@guaranteed C) -> Int32
%10 = struct_extract %7 : $Int32, #Int32._value
%11 = struct_extract %9 : $Int32, #Int32._value
%12 = integer_literal $Builtin.Int1, -1
%13 = builtin "sadd_with_overflow_Int32"(%10 : $Builtin.Int32, %11 : $Builtin.Int32, %12 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%14 = tuple_extract %13 : $(Builtin.Int32, Builtin.Int1), 0
%15 = tuple_extract %13 : $(Builtin.Int32, Builtin.Int1), 1
cond_fail %15 : $Builtin.Int1
%17 = struct $Int32 (%14 : $Builtin.Int32)
dealloc_stack %5 : $*T
strong_release %1 : $C
destroy_addr %0 : $*T
return %17 : $Int32
}
// Everything in this function should get devirtualized, inlined and folded together,
// because neither this function nor any of its callees are annotated to be
// excluded from the optimization.
// CHECK-LABEL: sil @_TF14optimize_never4boo3FCS_1CVs5Int32
// CHECK: bb0
// CHECK-NOT: function_ref @_TF14optimize_never4foo3urFTxCS_1C_Vs5Int32
// Presence of checked_cast_br indicates that the speculative devirtualization
// was performed. And this is only possible, if the original call is inlined.
// CHECK: checked_cast_br [exact] {{%.*}} : $C to $C, bb2, bb3
// CHECK: return
sil @_TF14optimize_never4boo3FCS_1CVs5Int32 : $@convention(thin) (@owned C) -> Int32 {
bb0(%0 : $C):
%2 = function_ref @_TF14optimize_never4foo3urFTxCS_1C_Vs5Int32 : $@convention(thin) _0_0> (@in τ_0_0, @owned C) -> Int32
%3 = integer_literal $Builtin.Int32, 1
%4 = struct $Int32 (%3 : $Builtin.Int32)
%5 = alloc_stack $Int32
store %4 to %5 : $*Int32
strong_retain %0 : $C
%8 = apply %2<Int32>(%5, %0) : $@convention(thin) _0_0> (@in τ_0_0, @owned C) -> Int32
dealloc_stack %5 : $*Int32
strong_release %0 : $C
return %8 : $Int32
}
sil @_TF14optimize_never10transform4FVs5Int32S0_ : $@convention(thin) (Int32) -> Int32 {
bb0(%0 : $Int32):
return %0 : $Int32
}
sil @_TF14optimize_never4foo4FTVs5Int32CS_1C_S0_ : $@convention(thin) (Int32, @owned C) -> Int32 {
bb0(%0 : $Int32, %1 : $C):
%4 = function_ref @_TF14optimize_never10transform4FVs5Int32S0_ : $@convention(thin) (Int32) -> Int32
%5 = apply %4(%0) : $@convention(thin) (Int32) -> Int32
%6 = class_method %1 : $C, #C.foo!1 : (C) -> () -> Int32 , $@convention(method) (@guaranteed C) -> Int32
%7 = apply %6(%1) : $@convention(method) (@guaranteed C) -> Int32
%8 = struct_extract %5 : $Int32, #Int32._value
%9 = struct_extract %7 : $Int32, #Int32._value
%10 = integer_literal $Builtin.Int1, -1
%11 = builtin "sadd_with_overflow_Int32"(%8 : $Builtin.Int32, %9 : $Builtin.Int32, %10 : $Builtin.Int1) : $(Builtin.Int32, Builtin.Int1)
%12 = tuple_extract %11 : $(Builtin.Int32, Builtin.Int1), 0
%13 = tuple_extract %11 : $(Builtin.Int32, Builtin.Int1), 1
cond_fail %13 : $Builtin.Int1
%15 = struct $Int32 (%12 : $Builtin.Int32)
strong_release %1 : $C
return %15 : $Int32
}
// Everything in this function should get devirtualized and inlined,
// because neither this function nor any of its callees are annotated to be
// excluded from the optimization.
// CHECK-LABEL: sil @_TF14optimize_never4boo4FCS_1CVs5Int32
// CHECK: bb0
// CHECK-NOT: function_ref @_TF14optimize_never4foo4FTVs5Int32CS_1C_S0_
// Presence of checked_cast_br indicates that the speculative devirtualization
// was performed. And this is only possible, if the original call is inlined.
// CHECK: checked_cast_br [exact] {{%.*}} : $C to $C, bb2, bb3
// CHECK: return
sil @_TF14optimize_never4boo4FCS_1CVs5Int32 : $@convention(thin) (@owned C) -> Int32 {
bb0(%0 : $C):
%2 = function_ref @_TF14optimize_never4foo4FTVs5Int32CS_1C_S0_ : $@convention(thin) (Int32, @owned C) -> Int32
%3 = integer_literal $Builtin.Int32, 1
%4 = struct $Int32 (%3 : $Builtin.Int32)
strong_retain %0 : $C
%6 = apply %2(%4, %0) : $@convention(thin) (Int32, @owned C) -> Int32
strong_release %0 : $C
return %6 : $Int32
}
sil hidden [_semantics "optimize.sil.never"] @_TFC14optimize_never4Base3foofT_Vs5Int32 : $@convention(method) (@guaranteed Base) -> Int32 {
bb0(%0 : $Base):
%2 = integer_literal $Builtin.Int32, 1
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden @_TFC14optimize_never4Base3boofT_Vs5Int32 : $@convention(method) (@guaranteed Base) -> Int32 {
bb0(%0 : $Base):
%2 = integer_literal $Builtin.Int32, 2
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden @_TFC14optimize_never8Derived13foofT_Vs5Int32 : $@convention(method) (@guaranteed Derived1) -> Int32 {
bb0(%0 : $Derived1):
%2 = integer_literal $Builtin.Int32, 3
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden [_semantics "optimize.sil.never"] @_TFC14optimize_never8Derived13boofT_Vs5Int32 : $@convention(method) (@guaranteed Derived1) -> Int32 {
bb0(%0 : $Derived1):
%2 = integer_literal $Builtin.Int32, 4
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden @_TFC14optimize_never8Derived23foofT_Vs5Int32 : $@convention(method) (@guaranteed Derived2) -> Int32 {
bb0(%0 : $Derived2):
%2 = integer_literal $Builtin.Int32, 5
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
sil hidden @_TFC14optimize_never8Derived23boofT_Vs5Int32 : $@convention(method) (@guaranteed Derived2) -> Int32 {
bb0(%0 : $Derived2):
%2 = integer_literal $Builtin.Int32, 6
%3 = struct $Int32 (%2 : $Builtin.Int32)
return %3 : $Int32
}
// Check that class_method is not speculatively devirtualized, because the
// method foo in the class Base, which mentioned in the class_method
// instruction is marked with @_semantics("optimize.sil.never").
//
// CHECK-LABEL: sil @_TF14optimize_never19testDoNotDevirtBaseFT1oCS_4Base_Vs5Int32
// CHECK: bb0
// CHECK-NOT: checked_cast_br
// CHECK: class_method {{%[0-9]+}} : $Base, #Base.foo!1 : (Base) -> () -> Int32
// CHECK-NOT: checked_cast_br
// CHECK: return
sil @_TF14optimize_never19testDoNotDevirtBaseFT1oCS_4Base_Vs5Int32 : $@convention(thin) (@owned Base) -> Int32 {
bb0(%0 : $Base):
%2 = class_method %0 : $Base, #Base.foo!1 : (Base) -> () -> Int32, $@convention(method) (@guaranteed Base) -> Int32
%3 = apply %2(%0) : $@convention(method) (@guaranteed Base) -> Int32
strong_release %0 : $Base
return %3 : $Int32
}
// Check that Derived1.boo is not devirtualized, because this method
// is marked with @_semantics("optimize.sil.never") and thus should not
// participate in speculative devirtualization.
//
// CHECK-LABEL: sil @_TF14optimize_never23testDoNotDevirtDerived1FT1oCS_4Base_Vs5Int32
// CHECK: bb0
// CHECK: class_method {{%[0-9]+}} : $Base, #Base.boo!1 : (Base) -> () -> Int32
// CHECK: checked_cast_br [exact] {{%[0-9]+}} : $Base to $Base
// CHECK: checked_cast_br [exact] {{%[0-9]+}} : $Base to $Derived2
// CHECK-NOT: class_method
// CHECK: }
sil @_TF14optimize_never23testDoNotDevirtDerived1FT1oCS_4Base_Vs5Int32 : $@convention(thin) (@owned Base) -> Int32 {
bb0(%0 : $Base):
%2 = class_method %0 : $Base, #Base.boo!1 : (Base) -> () -> Int32 , $@convention(method) (@guaranteed Base) -> Int32
%3 = apply %2(%0) : $@convention(method) (@guaranteed Base) -> Int32
strong_release %0 : $Base
return %3 : $Int32
}
sil_vtable C {
#C.foo!1: _TFC14optimize_never1C3foofT_Vs5Int32
#C.deinit!deallocator: _TFC14optimize_never1CD
#C.init!initializer.1: _TFC14optimize_never1CcfT_S0_
}
sil_vtable D {
#C.foo!1: _TFC14optimize_never1D3foofT_Vs5Int32
#C.init!initializer.1: _TFC14optimize_never1DcfT_S0_
#D.deinit!deallocator: _TFC14optimize_never1DD
}
sil_vtable Base {
#Base.foo!1: _TFC14optimize_never4Base3foofT_Vs5Int32
#Base.boo!1: _TFC14optimize_never4Base3boofT_Vs5Int32
}
sil_vtable Derived1 {
#Base.foo!1: _TFC14optimize_never8Derived13foofT_Vs5Int32
#Base.boo!1: _TFC14optimize_never8Derived13boofT_Vs5Int32
}
sil_vtable Derived2 {
#Base.foo!1: _TFC14optimize_never8Derived23foofT_Vs5Int32
#Base.boo!1: _TFC14optimize_never8Derived23boofT_Vs5Int32
}