| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 < %s | FileCheck %s |
| |
| ; Merge |
| |
| define <vscale x 8 x i16> @test_pmov_to_vector_i16(<vscale x 8 x i16> %zn, <vscale x 8 x i1> %pn) { |
| ; CHECK-LABEL: test_pmov_to_vector_i16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: pmov z0[1], p0.h |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16(<vscale x 8 x i16> %zn, <vscale x 8 x i1> %pn, i32 1) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| define <vscale x 4 x i32> @test_pmov_to_vector_i32(<vscale x 4 x i32> %zn, <vscale x 4 x i1> %pn) { |
| ; CHECK-LABEL: test_pmov_to_vector_i32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: pmov z0[3], p0.s |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32(<vscale x 4 x i32> %zn, <vscale x 4 x i1> %pn, i32 3) |
| ret <vscale x 4 x i32> %res |
| } |
| |
| define <vscale x 2 x i64> @test_pmov_to_vector_i64(<vscale x 2 x i64> %zn, <vscale x 2 x i1> %pn) { |
| ; CHECK-LABEL: test_pmov_to_vector_i64: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: pmov z0[7], p0.d |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64(<vscale x 2 x i64> %zn, <vscale x 2 x i1> %pn, i32 7) |
| ret <vscale x 2 x i64> %res |
| } |
| |
| |
| ; Zero |
| |
| define <vscale x 16 x i8> @test_pmov_to_vector_zero_i8(<vscale x 16 x i1> %pn) { |
| ; CHECK-LABEL: test_pmov_to_vector_zero_i8: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: pmov z0, p0.b |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <vscale x 16 x i8> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8(<vscale x 16 x i1> %pn) |
| ret <vscale x 16 x i8> %res |
| } |
| |
| define <vscale x 8 x i16> @test_pmov_to_vector_zero_i16(<vscale x 8 x i1> %pn) { |
| ; CHECK-LABEL: test_pmov_to_vector_zero_i16: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: pmov z0[0], p0.h |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16(<vscale x 8 x i1> %pn) |
| ret <vscale x 8 x i16> %res |
| } |
| |
| define <vscale x 4 x i32> @test_pmov_to_vector_zero_i32(<vscale x 4 x i1> %pn) { |
| ; CHECK-LABEL: test_pmov_to_vector_zero_i32: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: pmov z0[0], p0.s |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32(<vscale x 4 x i1> %pn) |
| ret <vscale x 4 x i32> %res |
| } |
| |
| define <vscale x 2 x i64> @test_pmov_to_vector_zero_i64(<vscale x 2 x i1> %pn) { |
| ; CHECK-LABEL: test_pmov_to_vector_zero_i64: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: pmov z0[0], p0.d |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64(<vscale x 2 x i1> %pn) |
| ret <vscale x 2 x i64> %res |
| } |
| |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i32) |
| declare <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32) |
| declare <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.merging.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv16i8(<vscale x 16 x i1>) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv8i16(<vscale x 8 x i1>) |
| declare <vscale x 4 x i32> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv4i32(<vscale x 4 x i1>) |
| declare <vscale x 2 x i64> @llvm.aarch64.sve.pmov.to.vector.lane.zeroing.nxv2i64(<vscale x 2 x i1>) |