| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -force-streaming < %s | FileCheck %s |
| |
| ; |
| ; WHILEGE |
| ; |
| |
| define <vscale x 16 x i1> @whilege_b_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilege_b_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilege p0.b, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 %a, i32 %b) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @whilege_b_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilege_b_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilege p0.b, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 %a, i64 %b) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 8 x i1> @whilege_h_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilege_h_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilege p0.h, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32 %a, i32 %b) |
| ret <vscale x 8 x i1> %out |
| } |
| |
| define <vscale x 8 x i1> @whilege_h_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilege_h_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilege p0.h, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64 %a, i64 %b) |
| ret <vscale x 8 x i1> %out |
| } |
| |
| define <vscale x 4 x i1> @whilege_s_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilege_s_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilege p0.s, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32 %a, i32 %b) |
| ret <vscale x 4 x i1> %out |
| } |
| |
| define <vscale x 4 x i1> @whilege_s_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilege_s_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilege p0.s, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64 %a, i64 %b) |
| ret <vscale x 4 x i1> %out |
| } |
| |
| define <vscale x 2 x i1> @whilege_d_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilege_d_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilege p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %a, i32 %b) |
| ret <vscale x 2 x i1> %out |
| } |
| |
| define <vscale x 2 x i1> @whilege_d_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilege_d_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilege p0.d, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 %a, i64 %b) |
| ret <vscale x 2 x i1> %out |
| } |
| |
| ; Ensure we don't convert constant decrementing while instructions to ptrue. |
| define <vscale x 16 x i1> @whilege_b_ii() { |
| ; CHECK-LABEL: whilege_b_ii: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: mov w8, #-2 // =0xfffffffe |
| ; CHECK-NEXT: mov w9, #3 // =0x3 |
| ; CHECK-NEXT: whilege p0.b, w9, w8 |
| ; CHECK-NEXT: ret |
| entry: |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 3, i32 -2) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| ; |
| ; WHILEHS |
| ; |
| |
| define <vscale x 16 x i1> @whilehs_b_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehs_b_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehs p0.b, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 %a, i32 %b) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @whilehs_b_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilehs_b_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehs p0.b, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 %a, i64 %b) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 8 x i1> @whilehs_h_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehs_h_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehs p0.h, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32 %a, i32 %b) |
| ret <vscale x 8 x i1> %out |
| } |
| |
| define <vscale x 8 x i1> @whilehs_h_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilehs_h_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehs p0.h, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64 %a, i64 %b) |
| ret <vscale x 8 x i1> %out |
| } |
| |
| define <vscale x 4 x i1> @whilehs_s_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehs_s_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehs p0.s, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32 %a, i32 %b) |
| ret <vscale x 4 x i1> %out |
| } |
| |
| define <vscale x 4 x i1> @whilehs_s_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilehs_s_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehs p0.s, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64 %a, i64 %b) |
| ret <vscale x 4 x i1> %out |
| } |
| |
| define <vscale x 2 x i1> @whilehs_d_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehs_d_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehs p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %a, i32 %b) |
| ret <vscale x 2 x i1> %out |
| } |
| |
| define <vscale x 2 x i1> @whilehs_d_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilehs_d_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehs p0.d, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 %a, i64 %b) |
| ret <vscale x 2 x i1> %out |
| } |
| |
| ; Ensure we don't convert constant decrementing while instructions to ptrue. |
| define <vscale x 16 x i1> @whilehs_b_ii() { |
| ; CHECK-LABEL: whilehs_b_ii: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: mov w8, #2 // =0x2 |
| ; CHECK-NEXT: mov w9, #8 // =0x8 |
| ; CHECK-NEXT: whilehs p0.b, x9, x8 |
| ; CHECK-NEXT: ret |
| entry: |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 8, i64 2) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| ; |
| ; WHILEGT |
| ; |
| |
| define <vscale x 16 x i1> @whilegt_b_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilegt_b_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilegt p0.b, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 %a, i32 %b) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @whilegt_b_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilegt_b_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilegt p0.b, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 %a, i64 %b) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 8 x i1> @whilegt_h_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilegt_h_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilegt p0.h, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32 %a, i32 %b) |
| ret <vscale x 8 x i1> %out |
| } |
| |
| define <vscale x 8 x i1> @whilegt_h_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilegt_h_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilegt p0.h, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64 %a, i64 %b) |
| ret <vscale x 8 x i1> %out |
| } |
| |
| define <vscale x 4 x i1> @whilegt_s_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilegt_s_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilegt p0.s, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32 %a, i32 %b) |
| ret <vscale x 4 x i1> %out |
| } |
| |
| define <vscale x 4 x i1> @whilegt_s_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilegt_s_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilegt p0.s, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64 %a, i64 %b) |
| ret <vscale x 4 x i1> %out |
| } |
| |
| define <vscale x 2 x i1> @whilegt_d_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilegt_d_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilegt p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %a, i32 %b) |
| ret <vscale x 2 x i1> %out |
| } |
| |
| define <vscale x 2 x i1> @whilegt_d_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilegt_d_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilegt p0.d, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 %a, i64 %b) |
| ret <vscale x 2 x i1> %out |
| } |
| |
| ; Ensure we don't convert constant decrementing while instructions to ptrue. |
| define <vscale x 16 x i1> @whilegt_b_ii() { |
| ; CHECK-LABEL: whilegt_b_ii: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: mov w8, #-2 // =0xfffffffe |
| ; CHECK-NEXT: mov w9, #3 // =0x3 |
| ; CHECK-NEXT: whilegt p0.b, w9, w8 |
| ; CHECK-NEXT: ret |
| entry: |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 3, i32 -2) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| ; |
| ; WHILEHI |
| ; |
| |
| define <vscale x 16 x i1> @whilehi_b_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehi_b_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehi p0.b, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 %a, i32 %b) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @whilehi_b_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilehi_b_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehi p0.b, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 %a, i64 %b) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 8 x i1> @whilehi_h_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehi_h_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehi p0.h, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32 %a, i32 %b) |
| ret <vscale x 8 x i1> %out |
| } |
| |
| define <vscale x 8 x i1> @whilehi_h_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilehi_h_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehi p0.h, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64 %a, i64 %b) |
| ret <vscale x 8 x i1> %out |
| } |
| |
| define <vscale x 4 x i1> @whilehi_s_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehi_s_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehi p0.s, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32 %a, i32 %b) |
| ret <vscale x 4 x i1> %out |
| } |
| |
| define <vscale x 4 x i1> @whilehi_s_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilehi_s_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehi p0.s, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64 %a, i64 %b) |
| ret <vscale x 4 x i1> %out |
| } |
| |
| define <vscale x 2 x i1> @whilehi_d_ww(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehi_d_ww: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehi p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %a, i32 %b) |
| ret <vscale x 2 x i1> %out |
| } |
| |
| define <vscale x 2 x i1> @whilehi_d_xx(i64 %a, i64 %b) { |
| ; CHECK-LABEL: whilehi_d_xx: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: whilehi p0.d, x0, x1 |
| ; CHECK-NEXT: ret |
| %out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 %a, i64 %b) |
| ret <vscale x 2 x i1> %out |
| } |
| |
| ; Ensure we don't convert constant decrementing while instructions to ptrue. |
| define <vscale x 16 x i1> @whilehi_b_ii() { |
| ; CHECK-LABEL: whilehi_b_ii: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: mov w8, #2 // =0x2 |
| ; CHECK-NEXT: mov w9, #8 // =0x8 |
| ; CHECK-NEXT: whilehi p0.b, x9, x8 |
| ; CHECK-NEXT: ret |
| entry: |
| %out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 8, i64 2) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32, i32) |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64, i64) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32, i32) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64, i64) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32, i32) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64, i64) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64, i64) |
| |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32, i32) |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64, i64) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32, i32) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64, i64) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32, i32) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64, i64) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64, i64) |
| |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32, i32) |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64, i64) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32, i32) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64, i64) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32, i32) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64, i64) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64, i64) |
| |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32, i32) |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64, i64) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32, i32) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64, i64) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32, i32) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64, i64) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64, i64) |