| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: opt < %s -passes=loop-vectorize,simplifycfg,instcombine -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s |
| ; RUN: opt < %s -passes=loop-vectorize,simplifycfg,instcombine -force-vector-interleave=2 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s --check-prefix=INTERLEAVE |
| |
| target triple = "aarch64-unknown-linux-gnu" |
| |
| ; A call whose argument can remain a scalar for a vectorized function variant |
| ; with a uniform argument because it's loop invariant |
| define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i64 %n) #0 { |
| ; CHECK-LABEL: define void @test_uniform |
| ; CHECK-SAME: (ptr noalias [[DST:%.*]], ptr readonly [[SRC:%.*]], i64 [[UNIFORM:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 1 |
| ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 1 |
| ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP3]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[N]]) |
| ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK: vector.body: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison) |
| ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x double> @foo_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD]], i64 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) |
| ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP6]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]]) |
| ; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 |
| ; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK: for.cond.cleanup: |
| ; CHECK-NEXT: ret void |
| ; |
| ; INTERLEAVE-LABEL: define void @test_uniform |
| ; INTERLEAVE-SAME: (ptr noalias [[DST:%.*]], ptr readonly [[SRC:%.*]], i64 [[UNIFORM:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; INTERLEAVE-NEXT: entry: |
| ; INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2 |
| ; INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2 |
| ; INTERLEAVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP3]]) |
| ; INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 1 |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[N]]) |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP9]], i64 [[N]]) |
| ; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; INTERLEAVE: vector.body: |
| ; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], [[VECTOR_BODY]] ] |
| ; INTERLEAVE-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]] |
| ; INTERLEAVE-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP12:%.*]] = shl i64 [[TMP11]], 1 |
| ; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i64 [[TMP12]] |
| ; INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP10]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison) |
| ; INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP13]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x double> poison) |
| ; INTERLEAVE-NEXT: [[TMP14:%.*]] = call <vscale x 2 x double> @foo_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD]], i64 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) |
| ; INTERLEAVE-NEXT: [[TMP15:%.*]] = call <vscale x 2 x double> @foo_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD3]], i64 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]]) |
| ; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]] |
| ; INTERLEAVE-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP18:%.*]] = shl i64 [[TMP17]], 1 |
| ; INTERLEAVE-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[TMP16]], i64 [[TMP18]] |
| ; INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP14]], ptr [[TMP16]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) |
| ; INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP15]], ptr [[TMP19]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]]) |
| ; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] |
| ; INTERLEAVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP21:%.*]] = shl i64 [[TMP20]], 1 |
| ; INTERLEAVE-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], [[TMP21]] |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]]) |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP4]]) |
| ; INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 |
| ; INTERLEAVE-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; INTERLEAVE: for.cond.cleanup: |
| ; INTERLEAVE-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] |
| %gepsrc = getelementptr double, ptr %src, i64 %indvars.iv |
| %data = load double, ptr %gepsrc, align 8 |
| %call = call double @foo(double %data, i64 %uniform) #1 |
| %gepdst = getelementptr inbounds double, ptr %dst, i64 %indvars.iv |
| store double %call, ptr %gepdst |
| %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| %exitcond = icmp eq i64 %indvars.iv.next, %n |
| br i1 %exitcond, label %for.cond.cleanup, label %for.body |
| |
| for.cond.cleanup: |
| ret void |
| } |
| |
| define void @test_uniform_smaller_scalar(ptr noalias %dst, ptr readonly %src, i32 %uniform , i64 %n) #0 { |
| ; CHECK-LABEL: define void @test_uniform_smaller_scalar |
| ; CHECK-SAME: (ptr noalias [[DST:%.*]], ptr readonly [[SRC:%.*]], i32 [[UNIFORM:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 1 |
| ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 1 |
| ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP3]]) |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[N]]) |
| ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; CHECK: vector.body: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP5]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison) |
| ; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 2 x double> @bar_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD]], i32 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) |
| ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]] |
| ; CHECK-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP6]], ptr [[TMP7]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] |
| ; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]]) |
| ; CHECK-NEXT: [[TMP8:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 |
| ; CHECK-NEXT: br i1 [[TMP8]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: for.cond.cleanup: |
| ; CHECK-NEXT: ret void |
| ; |
| ; INTERLEAVE-LABEL: define void @test_uniform_smaller_scalar |
| ; INTERLEAVE-SAME: (ptr noalias [[DST:%.*]], ptr readonly [[SRC:%.*]], i32 [[UNIFORM:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { |
| ; INTERLEAVE-NEXT: entry: |
| ; INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2 |
| ; INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 2 |
| ; INTERLEAVE-NEXT: [[TMP4:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 [[TMP3]]) |
| ; INTERLEAVE-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP9:%.*]] = shl i64 [[TMP8]], 1 |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[N]]) |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP9]], i64 [[N]]) |
| ; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; INTERLEAVE: vector.body: |
| ; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi <vscale x 2 x i1> [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT4:%.*]], [[VECTOR_BODY]] ] |
| ; INTERLEAVE-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]] |
| ; INTERLEAVE-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP12:%.*]] = shl i64 [[TMP11]], 1 |
| ; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr double, ptr [[TMP10]], i64 [[TMP12]] |
| ; INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP10]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x double> poison) |
| ; INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call <vscale x 2 x double> @llvm.masked.load.nxv2f64.p0(ptr [[TMP13]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x double> poison) |
| ; INTERLEAVE-NEXT: [[TMP14:%.*]] = call <vscale x 2 x double> @bar_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD]], i32 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) |
| ; INTERLEAVE-NEXT: [[TMP15:%.*]] = call <vscale x 2 x double> @bar_uniform(<vscale x 2 x double> [[WIDE_MASKED_LOAD3]], i32 [[UNIFORM]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]]) |
| ; INTERLEAVE-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]] |
| ; INTERLEAVE-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP18:%.*]] = shl i64 [[TMP17]], 1 |
| ; INTERLEAVE-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[TMP16]], i64 [[TMP18]] |
| ; INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP14]], ptr [[TMP16]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]]) |
| ; INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2f64.p0(<vscale x 2 x double> [[TMP15]], ptr [[TMP19]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]]) |
| ; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP1]] |
| ; INTERLEAVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() |
| ; INTERLEAVE-NEXT: [[TMP21:%.*]] = shl i64 [[TMP20]], 1 |
| ; INTERLEAVE-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], [[TMP21]] |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX]], i64 [[TMP4]]) |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT4]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP22]], i64 [[TMP4]]) |
| ; INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], i64 0 |
| ; INTERLEAVE-NEXT: br i1 [[TMP23]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; INTERLEAVE: for.cond.cleanup: |
| ; INTERLEAVE-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] |
| %gepsrc = getelementptr double, ptr %src, i64 %indvars.iv |
| %data = load double, ptr %gepsrc, align 8 |
| %call = call double @bar(double %data, i32 %uniform) #2 |
| %gepdst = getelementptr inbounds double, ptr %dst, i64 %indvars.iv |
| store double %call, ptr %gepdst |
| %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| %exitcond = icmp eq i64 %indvars.iv.next, %n |
| br i1 %exitcond, label %for.cond.cleanup, label %for.body |
| |
| for.cond.cleanup: |
| ret void |
| } |
| |
| ; If the parameter is not uniform, then we can't use the vector variant. |
| define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64 %n) #0 { |
| ; CHECK-LABEL: define void @test_uniform_not_invariant |
| ; CHECK-SAME: (ptr noalias [[DST:%.*]], ptr readonly [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { |
| ; CHECK-NEXT: entry: |
| ; CHECK-NEXT: br label [[FOR_BODY:%.*]] |
| ; CHECK: for.body: |
| ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] |
| ; CHECK-NEXT: [[GEPSRC:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDVARS_IV]] |
| ; CHECK-NEXT: [[DATA:%.*]] = load double, ptr [[GEPSRC]], align 8 |
| ; CHECK-NEXT: [[CALL:%.*]] = call double @foo(double [[DATA]], i64 [[INDVARS_IV]]) #[[ATTR5:[0-9]+]] |
| ; CHECK-NEXT: [[GEPDST:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDVARS_IV]] |
| ; CHECK-NEXT: store double [[CALL]], ptr [[GEPDST]], align 8 |
| ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 |
| ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]] |
| ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] |
| ; CHECK: for.cond.cleanup: |
| ; CHECK-NEXT: ret void |
| ; |
| ; INTERLEAVE-LABEL: define void @test_uniform_not_invariant |
| ; INTERLEAVE-SAME: (ptr noalias [[DST:%.*]], ptr readonly [[SRC:%.*]], i64 [[N:%.*]]) #[[ATTR0]] { |
| ; INTERLEAVE-NEXT: entry: |
| ; INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.usub.sat.i64(i64 [[N]], i64 2) |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = icmp ne i64 [[N]], 0 |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = icmp ugt i64 [[N]], 1 |
| ; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] |
| ; INTERLEAVE: vector.body: |
| ; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ] |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi i1 [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[PRED_STORE_CONTINUE4]] ] |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi i1 [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[PRED_STORE_CONTINUE4]] ] |
| ; INTERLEAVE-NEXT: br i1 [[ACTIVE_LANE_MASK]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] |
| ; INTERLEAVE: pred.store.if: |
| ; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[SRC]], i64 [[INDEX]] |
| ; INTERLEAVE-NEXT: [[TMP3:%.*]] = load double, ptr [[TMP2]], align 8 |
| ; INTERLEAVE-NEXT: [[TMP4:%.*]] = call double @foo(double [[TMP3]], i64 [[INDEX]]) #[[ATTR5:[0-9]+]] |
| ; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[INDEX]] |
| ; INTERLEAVE-NEXT: store double [[TMP4]], ptr [[TMP5]], align 8 |
| ; INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE]] |
| ; INTERLEAVE: pred.store.continue: |
| ; INTERLEAVE-NEXT: br i1 [[ACTIVE_LANE_MASK2]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4]] |
| ; INTERLEAVE: pred.store.if3: |
| ; INTERLEAVE-NEXT: [[TMP6:%.*]] = or disjoint i64 [[INDEX]], 1 |
| ; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[SRC]], i64 [[TMP6]] |
| ; INTERLEAVE-NEXT: [[TMP8:%.*]] = load double, ptr [[TMP7]], align 8 |
| ; INTERLEAVE-NEXT: [[TMP9:%.*]] = call double @foo(double [[TMP8]], i64 [[TMP6]]) #[[ATTR5]] |
| ; INTERLEAVE-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[DST]], i64 [[TMP6]] |
| ; INTERLEAVE-NEXT: store double [[TMP9]], ptr [[TMP10]], align 8 |
| ; INTERLEAVE-NEXT: br label [[PRED_STORE_CONTINUE4]] |
| ; INTERLEAVE: pred.store.continue4: |
| ; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 2 |
| ; INTERLEAVE-NEXT: [[TMP11:%.*]] = or disjoint i64 [[INDEX]], 1 |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = icmp ult i64 [[INDEX]], [[TMP0]] |
| ; INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT5]] = icmp ult i64 [[TMP11]], [[TMP0]] |
| ; INTERLEAVE-NEXT: br i1 [[ACTIVE_LANE_MASK_NEXT]], label [[VECTOR_BODY]], label [[FOR_COND_CLEANUP:%.*]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; INTERLEAVE: for.cond.cleanup: |
| ; INTERLEAVE-NEXT: ret void |
| ; |
| entry: |
| br label %for.body |
| |
| for.body: |
| %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] |
| %gepsrc = getelementptr double, ptr %src, i64 %indvars.iv |
| %data = load double, ptr %gepsrc, align 8 |
| %call = call double @foo(double %data, i64 %indvars.iv) #1 |
| %gepdst = getelementptr inbounds double, ptr %dst, i64 %indvars.iv |
| store double %call, ptr %gepdst |
| %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 |
| %exitcond = icmp eq i64 %indvars.iv.next, %n |
| br i1 %exitcond, label %for.cond.cleanup, label %for.body |
| |
| for.cond.cleanup: |
| ret void |
| } |
| |
| ; Scalar functions |
| declare double @foo(double, i64) |
| declare double @bar(double, i32) |
| |
| ; Vector variants |
| declare <vscale x 2 x double> @foo_uniform(<vscale x 2 x double>, i64, <vscale x 2 x i1>) |
| declare <vscale x 2 x double> @bar_uniform(<vscale x 2 x double>, i32, <vscale x 2 x i1>) |
| |
| attributes #0 = { "target-features"="+sve" } |
| |
| ; Mappings |
| attributes #1 = { nounwind "vector-function-abi-variant"="_ZGVsMxvu_foo(foo_uniform)" } |
| attributes #2 = { nounwind "vector-function-abi-variant"="_ZGVsMxvu_bar(bar_uniform)" } |