| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes |
| ; RUN: opt -attributor -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=2 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_NPM,NOT_CGSCC_OPM,NOT_TUNIT_NPM,IS__TUNIT____,IS________OPM,IS__TUNIT_OPM |
| ; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=2 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_OPM,NOT_CGSCC_NPM,NOT_TUNIT_OPM,IS__TUNIT____,IS________NPM,IS__TUNIT_NPM |
| ; RUN: opt -attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_NPM,IS__CGSCC____,IS________OPM,IS__CGSCC_OPM |
| ; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_OPM,IS__CGSCC____,IS________NPM,IS__CGSCC_NPM |
| |
| ; Determine dereference-ability before unused loads get deleted: |
| ; https://bugs.llvm.org/show_bug.cgi?id=21780 |
| |
| define <4 x double> @PR21780(double* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@PR21780 |
| ; CHECK-SAME: (double* nocapture nofree nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) |
| ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 1 |
| ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 2 |
| ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 3 |
| ; CHECK-NEXT: [[T0:%.*]] = load double, double* [[PTR]], align 8 |
| ; CHECK-NEXT: [[T1:%.*]] = load double, double* [[ARRAYIDX1]], align 8 |
| ; CHECK-NEXT: [[T2:%.*]] = load double, double* [[ARRAYIDX2]], align 8 |
| ; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8 |
| ; CHECK-NEXT: [[VECINIT0:%.*]] = insertelement <4 x double> undef, double [[T0]], i32 0 |
| ; CHECK-NEXT: [[VECINIT1:%.*]] = insertelement <4 x double> [[VECINIT0]], double [[T1]], i32 1 |
| ; CHECK-NEXT: [[VECINIT2:%.*]] = insertelement <4 x double> [[VECINIT1]], double [[T2]], i32 2 |
| ; CHECK-NEXT: [[VECINIT3:%.*]] = insertelement <4 x double> [[VECINIT2]], double [[T3]], i32 3 |
| ; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <4 x double> [[VECINIT3]], <4 x double> [[VECINIT3]], <4 x i32> <i32 0, i32 0, i32 2, i32 2> |
| ; CHECK-NEXT: ret <4 x double> [[SHUFFLE]] |
| ; |
| |
| ; GEP of index 0 is simplified away. |
| %arrayidx1 = getelementptr inbounds double, double* %ptr, i64 1 |
| %arrayidx2 = getelementptr inbounds double, double* %ptr, i64 2 |
| %arrayidx3 = getelementptr inbounds double, double* %ptr, i64 3 |
| |
| %t0 = load double, double* %ptr, align 8 |
| %t1 = load double, double* %arrayidx1, align 8 |
| %t2 = load double, double* %arrayidx2, align 8 |
| %t3 = load double, double* %arrayidx3, align 8 |
| |
| %vecinit0 = insertelement <4 x double> undef, double %t0, i32 0 |
| %vecinit1 = insertelement <4 x double> %vecinit0, double %t1, i32 1 |
| %vecinit2 = insertelement <4 x double> %vecinit1, double %t2, i32 2 |
| %vecinit3 = insertelement <4 x double> %vecinit2, double %t3, i32 3 |
| %shuffle = shufflevector <4 x double> %vecinit3, <4 x double> %vecinit3, <4 x i32> <i32 0, i32 0, i32 2, i32 2> |
| ret <4 x double> %shuffle |
| } |
| |
| |
| define double @PR21780_only_access3_with_inbounds(double* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@PR21780_only_access3_with_inbounds |
| ; CHECK-SAME: (double* nocapture nofree nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) |
| ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[PTR]], i64 3 |
| ; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8 |
| ; CHECK-NEXT: ret double [[T3]] |
| ; |
| |
| %arrayidx3 = getelementptr inbounds double, double* %ptr, i64 3 |
| %t3 = load double, double* %arrayidx3, align 8 |
| ret double %t3 |
| } |
| |
| define double @PR21780_only_access3_without_inbounds(double* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@PR21780_only_access3_without_inbounds |
| ; CHECK-SAME: (double* nocapture nofree readonly align 8 [[PTR:%.*]]) |
| ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, double* [[PTR]], i64 3 |
| ; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8 |
| ; CHECK-NEXT: ret double [[T3]] |
| ; |
| %arrayidx3 = getelementptr double, double* %ptr, i64 3 |
| %t3 = load double, double* %arrayidx3, align 8 |
| ret double %t3 |
| } |
| |
| define double @PR21780_without_inbounds(double* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@PR21780_without_inbounds |
| ; CHECK-SAME: (double* nocapture nofree nonnull readonly align 8 dereferenceable(32) [[PTR:%.*]]) |
| ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr double, double* [[PTR]], i64 3 |
| ; CHECK-NEXT: [[T3:%.*]] = load double, double* [[ARRAYIDX3]], align 8 |
| ; CHECK-NEXT: ret double [[T3]] |
| ; |
| |
| %arrayidx1 = getelementptr double, double* %ptr, i64 1 |
| %arrayidx2 = getelementptr double, double* %ptr, i64 2 |
| %arrayidx3 = getelementptr double, double* %ptr, i64 3 |
| |
| %t0 = load double, double* %ptr, align 8 |
| %t1 = load double, double* %arrayidx1, align 8 |
| %t2 = load double, double* %arrayidx2, align 8 |
| %t3 = load double, double* %arrayidx3, align 8 |
| |
| ret double %t3 |
| } |
| |
| ; Unsimplified, but still valid. Also, throw in some bogus arguments. |
| |
| define void @gep0(i8* %unused, i8* %other, i8* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@gep0 |
| ; CHECK-SAME: (i8* nocapture nofree readnone [[UNUSED:%.*]], i8* nocapture nofree nonnull writeonly dereferenceable(1) [[OTHER:%.*]], i8* nocapture nofree nonnull readonly dereferenceable(3) [[PTR:%.*]]) |
| ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr i8, i8* [[PTR]], i64 2 |
| ; CHECK-NEXT: [[T2:%.*]] = load i8, i8* [[ARRAYIDX2]], align 1 |
| ; CHECK-NEXT: store i8 [[T2]], i8* [[OTHER]], align 1 |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx0 = getelementptr i8, i8* %ptr, i64 0 |
| %arrayidx1 = getelementptr i8, i8* %ptr, i64 1 |
| %arrayidx2 = getelementptr i8, i8* %ptr, i64 2 |
| %t0 = load i8, i8* %arrayidx0 |
| %t1 = load i8, i8* %arrayidx1 |
| %t2 = load i8, i8* %arrayidx2 |
| store i8 %t2, i8* %other |
| ret void |
| } |
| |
| ; Order of accesses does not change computation. |
| ; Multiple arguments may be dereferenceable. |
| |
| define void @ordering(i8* %ptr1, i32* %ptr2) { |
| ; CHECK-LABEL: define {{[^@]+}}@ordering |
| ; CHECK-SAME: (i8* nocapture nofree nonnull readnone dereferenceable(3) [[PTR1:%.*]], i32* nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR2:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %a20 = getelementptr i32, i32* %ptr2, i64 0 |
| %a12 = getelementptr i8, i8* %ptr1, i64 2 |
| %t12 = load i8, i8* %a12 |
| %a11 = getelementptr i8, i8* %ptr1, i64 1 |
| %t20 = load i32, i32* %a20 |
| %a10 = getelementptr i8, i8* %ptr1, i64 0 |
| %t10 = load i8, i8* %a10 |
| %t11 = load i8, i8* %a11 |
| %a21 = getelementptr i32, i32* %ptr2, i64 1 |
| %t21 = load i32, i32* %a21 |
| ret void |
| } |
| |
| ; Not in entry block. |
| |
| define void @not_entry_but_guaranteed_to_execute(i8* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@not_entry_but_guaranteed_to_execute |
| ; CHECK-SAME: (i8* nocapture nofree nonnull readnone dereferenceable(3) [[PTR:%.*]]) |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br label [[EXIT:%.*]] |
| ; CHECK: exit: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br label %exit |
| exit: |
| %arrayidx0 = getelementptr i8, i8* %ptr, i64 0 |
| %arrayidx1 = getelementptr i8, i8* %ptr, i64 1 |
| %arrayidx2 = getelementptr i8, i8* %ptr, i64 2 |
| %t0 = load i8, i8* %arrayidx0 |
| %t1 = load i8, i8* %arrayidx1 |
| %t2 = load i8, i8* %arrayidx2 |
| ret void |
| } |
| |
| ; Not in entry block and not guaranteed to execute. |
| |
| define void @not_entry_not_guaranteed_to_execute(i8* %ptr, i1 %cond) { |
| ; CHECK-LABEL: define {{[^@]+}}@not_entry_not_guaranteed_to_execute |
| ; CHECK-SAME: (i8* nocapture nofree readnone [[PTR:%.*]], i1 [[COND:%.*]]) |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br i1 [[COND]], label [[LOADS:%.*]], label [[EXIT:%.*]] |
| ; CHECK: loads: |
| ; CHECK-NEXT: ret void |
| ; CHECK: exit: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| br i1 %cond, label %loads, label %exit |
| loads: |
| %arrayidx0 = getelementptr i8, i8* %ptr, i64 0 |
| %arrayidx1 = getelementptr i8, i8* %ptr, i64 1 |
| %arrayidx2 = getelementptr i8, i8* %ptr, i64 2 |
| %t0 = load i8, i8* %arrayidx0 |
| %t1 = load i8, i8* %arrayidx1 |
| %t2 = load i8, i8* %arrayidx2 |
| ret void |
| exit: |
| ret void |
| } |
| |
| ; The last load may not execute, so derefenceable bytes only covers the 1st two loads. |
| |
| define void @partial_in_entry(i16* %ptr, i1 %cond) { |
| ; CHECK-LABEL: define {{[^@]+}}@partial_in_entry |
| ; CHECK-SAME: (i16* nocapture nofree nonnull readnone align 2 dereferenceable(4) [[PTR:%.*]], i1 [[COND:%.*]]) |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br i1 [[COND]], label [[LOADS:%.*]], label [[EXIT:%.*]] |
| ; CHECK: loads: |
| ; CHECK-NEXT: ret void |
| ; CHECK: exit: |
| ; CHECK-NEXT: ret void |
| ; |
| entry: |
| %arrayidx0 = getelementptr i16, i16* %ptr, i64 0 |
| %arrayidx1 = getelementptr i16, i16* %ptr, i64 1 |
| %arrayidx2 = getelementptr i16, i16* %ptr, i64 2 |
| %t0 = load i16, i16* %arrayidx0 |
| %t1 = load i16, i16* %arrayidx1 |
| br i1 %cond, label %loads, label %exit |
| loads: |
| %t2 = load i16, i16* %arrayidx2 |
| ret void |
| exit: |
| ret void |
| } |
| |
| ; The volatile load can't be used to prove a non-volatile access is allowed. |
| ; The 2nd and 3rd loads may never execute. |
| |
| define void @volatile_is_not_dereferenceable(i16* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@volatile_is_not_dereferenceable |
| ; CHECK-SAME: (i16* nofree align 2 [[PTR:%.*]]) |
| ; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr i16, i16* [[PTR]], i64 0 |
| ; CHECK-NEXT: [[T0:%.*]] = load volatile i16, i16* [[ARRAYIDX0]], align 2 |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx0 = getelementptr i16, i16* %ptr, i64 0 |
| %arrayidx1 = getelementptr i16, i16* %ptr, i64 1 |
| %arrayidx2 = getelementptr i16, i16* %ptr, i64 2 |
| %t0 = load volatile i16, i16* %arrayidx0 |
| %t1 = load i16, i16* %arrayidx1 |
| %t2 = load i16, i16* %arrayidx2 |
| ret void |
| } |
| |
| ; TODO: We should allow inference for atomic (but not volatile) ops. |
| |
| define void @atomic_is_alright(i16* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@atomic_is_alright |
| ; CHECK-SAME: (i16* nocapture nofree nonnull readnone align 2 dereferenceable(6) [[PTR:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx0 = getelementptr i16, i16* %ptr, i64 0 |
| %arrayidx1 = getelementptr i16, i16* %ptr, i64 1 |
| %arrayidx2 = getelementptr i16, i16* %ptr, i64 2 |
| %t0 = load atomic i16, i16* %arrayidx0 unordered, align 2 |
| %t1 = load i16, i16* %arrayidx1 |
| %t2 = load i16, i16* %arrayidx2 |
| ret void |
| } |
| |
| declare void @may_not_return() |
| |
| define void @not_guaranteed_to_transfer_execution(i16* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@not_guaranteed_to_transfer_execution |
| ; CHECK-SAME: (i16* nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) |
| ; CHECK-NEXT: call void @may_not_return() |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx0 = getelementptr i16, i16* %ptr, i64 0 |
| %arrayidx1 = getelementptr i16, i16* %ptr, i64 1 |
| %arrayidx2 = getelementptr i16, i16* %ptr, i64 2 |
| %t0 = load i16, i16* %arrayidx0 |
| call void @may_not_return() |
| %t1 = load i16, i16* %arrayidx1 |
| %t2 = load i16, i16* %arrayidx2 |
| ret void |
| } |
| |
| ; We must have consecutive accesses. |
| |
| define void @variable_gep_index(i8* %unused, i8* %ptr, i64 %variable_index) { |
| ; CHECK-LABEL: define {{[^@]+}}@variable_gep_index |
| ; CHECK-SAME: (i8* nocapture nofree readnone [[UNUSED:%.*]], i8* nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]], i64 [[VARIABLE_INDEX:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx1 = getelementptr i8, i8* %ptr, i64 %variable_index |
| %arrayidx2 = getelementptr i8, i8* %ptr, i64 2 |
| %t0 = load i8, i8* %ptr |
| %t1 = load i8, i8* %arrayidx1 |
| %t2 = load i8, i8* %arrayidx2 |
| ret void |
| } |
| |
| ; Deal with >1 GEP index. |
| |
| define void @multi_index_gep(<4 x i8>* %ptr) { |
| ; FIXME: %ptr should be dereferenceable(4) |
| ; CHECK-LABEL: define {{[^@]+}}@multi_index_gep |
| ; CHECK-SAME: (<4 x i8>* nocapture nofree nonnull readnone dereferenceable(1) [[PTR:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx00 = getelementptr <4 x i8>, <4 x i8>* %ptr, i64 0, i64 0 |
| %t0 = load i8, i8* %arrayidx00 |
| ret void |
| } |
| |
| ; Could round weird bitwidths down? |
| |
| define void @not_byte_multiple(i9* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@not_byte_multiple |
| ; CHECK-SAME: (i9* nocapture nofree nonnull readnone align 2 dereferenceable(2) [[PTR:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx0 = getelementptr i9, i9* %ptr, i64 0 |
| %t0 = load i9, i9* %arrayidx0 |
| ret void |
| } |
| |
| ; Missing direct access from the pointer. |
| |
| define void @no_pointer_deref(i16* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@no_pointer_deref |
| ; CHECK-SAME: (i16* nocapture nofree readnone align 2 [[PTR:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx1 = getelementptr i16, i16* %ptr, i64 1 |
| %arrayidx2 = getelementptr i16, i16* %ptr, i64 2 |
| %t1 = load i16, i16* %arrayidx1 |
| %t2 = load i16, i16* %arrayidx2 |
| ret void |
| } |
| |
| ; Out-of-order is ok, but missing access concludes dereferenceable range. |
| |
| define void @non_consecutive(i32* %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@non_consecutive |
| ; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(8) [[PTR:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx1 = getelementptr i32, i32* %ptr, i64 1 |
| %arrayidx0 = getelementptr i32, i32* %ptr, i64 0 |
| %arrayidx3 = getelementptr i32, i32* %ptr, i64 3 |
| %t1 = load i32, i32* %arrayidx1 |
| %t0 = load i32, i32* %arrayidx0 |
| %t3 = load i32, i32* %arrayidx3 |
| ret void |
| } |
| |
| ; Improve on existing dereferenceable attribute. |
| |
| define void @more_bytes(i32* dereferenceable(8) %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@more_bytes |
| ; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx3 = getelementptr i32, i32* %ptr, i64 3 |
| %arrayidx1 = getelementptr i32, i32* %ptr, i64 1 |
| %arrayidx0 = getelementptr i32, i32* %ptr, i64 0 |
| %arrayidx2 = getelementptr i32, i32* %ptr, i64 2 |
| %t3 = load i32, i32* %arrayidx3 |
| %t1 = load i32, i32* %arrayidx1 |
| %t2 = load i32, i32* %arrayidx2 |
| %t0 = load i32, i32* %arrayidx0 |
| ret void |
| } |
| |
| ; Improve on existing dereferenceable_or_null attribute. |
| |
| define void @more_bytes_and_not_null(i32* dereferenceable_or_null(8) %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@more_bytes_and_not_null |
| ; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(16) [[PTR:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx3 = getelementptr i32, i32* %ptr, i64 3 |
| %arrayidx1 = getelementptr i32, i32* %ptr, i64 1 |
| %arrayidx0 = getelementptr i32, i32* %ptr, i64 0 |
| %arrayidx2 = getelementptr i32, i32* %ptr, i64 2 |
| %t3 = load i32, i32* %arrayidx3 |
| %t1 = load i32, i32* %arrayidx1 |
| %t2 = load i32, i32* %arrayidx2 |
| %t0 = load i32, i32* %arrayidx0 |
| ret void |
| } |
| |
| ; But don't pessimize existing dereferenceable attribute. |
| |
| define void @better_bytes(i32* dereferenceable(100) %ptr) { |
| ; CHECK-LABEL: define {{[^@]+}}@better_bytes |
| ; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(100) [[PTR:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %arrayidx3 = getelementptr i32, i32* %ptr, i64 3 |
| %arrayidx1 = getelementptr i32, i32* %ptr, i64 1 |
| %arrayidx0 = getelementptr i32, i32* %ptr, i64 0 |
| %arrayidx2 = getelementptr i32, i32* %ptr, i64 2 |
| %t3 = load i32, i32* %arrayidx3 |
| %t1 = load i32, i32* %arrayidx1 |
| %t2 = load i32, i32* %arrayidx2 |
| %t0 = load i32, i32* %arrayidx0 |
| ret void |
| } |
| |
| define void @bitcast(i32* %arg) { |
| ; CHECK-LABEL: define {{[^@]+}}@bitcast |
| ; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(8) [[ARG:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %ptr = bitcast i32* %arg to float* |
| %arrayidx0 = getelementptr float, float* %ptr, i64 0 |
| %arrayidx1 = getelementptr float, float* %ptr, i64 1 |
| %t0 = load float, float* %arrayidx0 |
| %t1 = load float, float* %arrayidx1 |
| ret void |
| } |
| |
| define void @bitcast_different_sizes(double* %arg1, i8* %arg2) { |
| ; CHECK-LABEL: define {{[^@]+}}@bitcast_different_sizes |
| ; CHECK-SAME: (double* nocapture nofree nonnull readnone align 4 dereferenceable(12) [[ARG1:%.*]], i8* nocapture nofree nonnull readnone align 4 dereferenceable(16) [[ARG2:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %ptr1 = bitcast double* %arg1 to float* |
| %a10 = getelementptr float, float* %ptr1, i64 0 |
| %a11 = getelementptr float, float* %ptr1, i64 1 |
| %a12 = getelementptr float, float* %ptr1, i64 2 |
| %ld10 = load float, float* %a10 |
| %ld11 = load float, float* %a11 |
| %ld12 = load float, float* %a12 |
| |
| %ptr2 = bitcast i8* %arg2 to i64* |
| %a20 = getelementptr i64, i64* %ptr2, i64 0 |
| %a21 = getelementptr i64, i64* %ptr2, i64 1 |
| %ld20 = load i64, i64* %a20 |
| %ld21 = load i64, i64* %a21 |
| ret void |
| } |
| |
| define void @negative_offset(i32* %arg) { |
| ; CHECK-LABEL: define {{[^@]+}}@negative_offset |
| ; CHECK-SAME: (i32* nocapture nofree nonnull readnone align 4 dereferenceable(4) [[ARG:%.*]]) |
| ; CHECK-NEXT: ret void |
| ; |
| %ptr = bitcast i32* %arg to float* |
| %arrayidx0 = getelementptr float, float* %ptr, i64 0 |
| %arrayidx1 = getelementptr float, float* %ptr, i64 -1 |
| %t0 = load float, float* %arrayidx0 |
| %t1 = load float, float* %arrayidx1 |
| ret void |
| } |
| |
| define void @stores(i32* %arg) { |
| ; CHECK-LABEL: define {{[^@]+}}@stores |
| ; CHECK-SAME: (i32* nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) |
| ; CHECK-NEXT: [[PTR:%.*]] = bitcast i32* [[ARG]] to float* |
| ; CHECK-NEXT: [[ARRAYIDX0:%.*]] = getelementptr float, float* [[PTR]], i64 0 |
| ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, float* [[PTR]], i64 1 |
| ; CHECK-NEXT: store float 1.000000e+00, float* [[ARRAYIDX0]], align 4 |
| ; CHECK-NEXT: store float 2.000000e+00, float* [[ARRAYIDX1]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| %ptr = bitcast i32* %arg to float* |
| %arrayidx0 = getelementptr float, float* %ptr, i64 0 |
| %arrayidx1 = getelementptr float, float* %ptr, i64 1 |
| store float 1.0, float* %arrayidx0 |
| store float 2.0, float* %arrayidx1 |
| ret void |
| } |
| |
| define void @load_store(i32* %arg) { |
| ; CHECK-LABEL: define {{[^@]+}}@load_store |
| ; CHECK-SAME: (i32* nocapture nofree nonnull writeonly align 4 dereferenceable(8) [[ARG:%.*]]) |
| ; CHECK-NEXT: [[PTR:%.*]] = bitcast i32* [[ARG]] to float* |
| ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr float, float* [[PTR]], i64 1 |
| ; CHECK-NEXT: store float 2.000000e+00, float* [[ARRAYIDX1]], align 4 |
| ; CHECK-NEXT: ret void |
| ; |
| %ptr = bitcast i32* %arg to float* |
| %arrayidx0 = getelementptr float, float* %ptr, i64 0 |
| %arrayidx1 = getelementptr float, float* %ptr, i64 1 |
| %t1 = load float, float* %arrayidx0 |
| store float 2.0, float* %arrayidx1 |
| ret void |
| } |
| |
| define void @different_size1(i32* %arg) { |
| ; CHECK-LABEL: define {{[^@]+}}@different_size1 |
| ; CHECK-SAME: (i32* nocapture nofree nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) |
| ; CHECK-NEXT: [[ARG_CAST:%.*]] = bitcast i32* [[ARG]] to double* |
| ; CHECK-NEXT: store double 0.000000e+00, double* [[ARG_CAST]], align 8 |
| ; CHECK-NEXT: store i32 0, i32* [[ARG]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| %arg-cast = bitcast i32* %arg to double* |
| store double 0.000000e+00, double* %arg-cast |
| store i32 0, i32* %arg |
| ret void |
| } |
| |
| define void @different_size2(i32* %arg) { |
| ; CHECK-LABEL: define {{[^@]+}}@different_size2 |
| ; CHECK-SAME: (i32* nocapture nofree nonnull writeonly align 8 dereferenceable(8) [[ARG:%.*]]) |
| ; CHECK-NEXT: store i32 0, i32* [[ARG]], align 8 |
| ; CHECK-NEXT: [[ARG_CAST:%.*]] = bitcast i32* [[ARG]] to double* |
| ; CHECK-NEXT: store double 0.000000e+00, double* [[ARG_CAST]], align 8 |
| ; CHECK-NEXT: ret void |
| ; |
| store i32 0, i32* %arg |
| %arg-cast = bitcast i32* %arg to double* |
| store double 0.000000e+00, double* %arg-cast |
| ret void |
| } |
| |
| ; Make use of MustBeExecuted Explorer |
| ; |
| ; [CFG] |
| ; entry |
| ; / \ |
| ; l1 l2 |
| ; | X | |
| ; l3 l4 |
| ; \ / |
| ; l5 |
| ; / \ |
| ; l6 l7 |
| ; \ / |
| ; end |
| ; According to the above CFG, we can see that instructions in l5 Block must be executed. |
| ; Therefore, %p must be dereferenced. |
| ; |
| ; ATTRIBUTOR_CGSCC_NPM-LABEL: define i32 @require_cfg_analysis(i32 %c, i32* {{.*}} dereferenceable(4) %p) |
| define i32 @require_cfg_analysis(i32 %c, i32* %p) { |
| ; IS________OPM-LABEL: define {{[^@]+}}@require_cfg_analysis |
| ; IS________OPM-SAME: (i32 [[C:%.*]], i32* nocapture nofree writeonly [[P:%.*]]) |
| ; IS________OPM-NEXT: [[TOBOOL1:%.*]] = icmp eq i32 [[C]], 0 |
| ; IS________OPM-NEXT: br i1 [[TOBOOL1]], label [[L1:%.*]], label [[L2:%.*]] |
| ; IS________OPM: l1: |
| ; IS________OPM-NEXT: [[TOBOOL2:%.*]] = icmp eq i32 [[C]], 1 |
| ; IS________OPM-NEXT: br i1 [[TOBOOL2]], label [[L3:%.*]], label [[L4:%.*]] |
| ; IS________OPM: l2: |
| ; IS________OPM-NEXT: [[TOBOOL3:%.*]] = icmp eq i32 [[C]], 2 |
| ; IS________OPM-NEXT: br i1 [[TOBOOL3]], label [[L3]], label [[L4]] |
| ; IS________OPM: l3: |
| ; IS________OPM-NEXT: br label [[L5:%.*]] |
| ; IS________OPM: l4: |
| ; IS________OPM-NEXT: br label [[L5]] |
| ; IS________OPM: l5: |
| ; IS________OPM-NEXT: [[TOBOOL4:%.*]] = icmp eq i32 [[C]], 4 |
| ; IS________OPM-NEXT: br i1 [[TOBOOL4]], label [[L6:%.*]], label [[L7:%.*]] |
| ; IS________OPM: l6: |
| ; IS________OPM-NEXT: store i32 0, i32* [[P]], align 4 |
| ; IS________OPM-NEXT: br label [[END:%.*]] |
| ; IS________OPM: l7: |
| ; IS________OPM-NEXT: store i32 1, i32* [[P]], align 4 |
| ; IS________OPM-NEXT: br label [[END]] |
| ; IS________OPM: end: |
| ; IS________OPM-NEXT: ret i32 1 |
| ; |
| ; IS________NPM-LABEL: define {{[^@]+}}@require_cfg_analysis |
| ; IS________NPM-SAME: (i32 [[C:%.*]], i32* nocapture nofree nonnull writeonly align 4 dereferenceable(4) [[P:%.*]]) |
| ; IS________NPM-NEXT: [[TOBOOL1:%.*]] = icmp eq i32 [[C]], 0 |
| ; IS________NPM-NEXT: br i1 [[TOBOOL1]], label [[L1:%.*]], label [[L2:%.*]] |
| ; IS________NPM: l1: |
| ; IS________NPM-NEXT: br label [[L4:%.*]] |
| ; IS________NPM: l2: |
| ; IS________NPM-NEXT: [[TOBOOL3:%.*]] = icmp eq i32 [[C]], 2 |
| ; IS________NPM-NEXT: br i1 [[TOBOOL3]], label [[L3:%.*]], label [[L4]] |
| ; IS________NPM: l3: |
| ; IS________NPM-NEXT: br label [[L5:%.*]] |
| ; IS________NPM: l4: |
| ; IS________NPM-NEXT: br label [[L5]] |
| ; IS________NPM: l5: |
| ; IS________NPM-NEXT: [[TOBOOL4:%.*]] = icmp eq i32 [[C]], 4 |
| ; IS________NPM-NEXT: br i1 [[TOBOOL4]], label [[L6:%.*]], label [[L7:%.*]] |
| ; IS________NPM: l6: |
| ; IS________NPM-NEXT: store i32 0, i32* [[P]], align 4 |
| ; IS________NPM-NEXT: br label [[END:%.*]] |
| ; IS________NPM: l7: |
| ; IS________NPM-NEXT: store i32 1, i32* [[P]], align 4 |
| ; IS________NPM-NEXT: br label [[END]] |
| ; IS________NPM: end: |
| ; IS________NPM-NEXT: ret i32 1 |
| ; |
| %tobool1 = icmp eq i32 %c, 0 |
| br i1 %tobool1, label %l1, label %l2 |
| l1: |
| %tobool2 = icmp eq i32 %c, 1 |
| br i1 %tobool2, label %l3, label %l4 |
| l2: |
| %tobool3 = icmp eq i32 %c, 2 |
| br i1 %tobool3, label %l3, label %l4 |
| l3: |
| br label %l5 |
| l4: |
| br label %l5 |
| l5: |
| %tobool4 = icmp eq i32 %c, 4 |
| br i1 %tobool4, label %l6, label %l7 |
| l6: |
| store i32 0, i32* %p |
| br label %end |
| l7: |
| store i32 1, i32* %p |
| br label %end |
| end: |
| ret i32 1 |
| } |