| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
 | ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s | 
 | ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -force-streaming < %s | FileCheck %s | 
 |  | 
 | ; | 
 | ; RBIT | 
 | ; | 
 |  | 
 | define <vscale x 16 x i8> @rbit_i8(<vscale x 16 x i8> %a, <vscale x 16 x i1> %pg, <vscale x 16 x i8> %b) { | 
 | ; CHECK-LABEL: rbit_i8: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    rbit z0.b, p0/m, z1.b | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 16 x i8> @llvm.aarch64.sve.rbit.nxv16i8(<vscale x 16 x i8> %a, | 
 |                                                                 <vscale x 16 x i1> %pg, | 
 |                                                                 <vscale x 16 x i8> %b) | 
 |   ret <vscale x 16 x i8> %out | 
 | } | 
 |  | 
 | define <vscale x 8 x i16> @rbit_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) { | 
 | ; CHECK-LABEL: rbit_i16: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    rbit z0.h, p0/m, z1.h | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.rbit.nxv8i16(<vscale x 8 x i16> %a, | 
 |                                                                 <vscale x 8 x i1> %pg, | 
 |                                                                 <vscale x 8 x i16> %b) | 
 |   ret <vscale x 8 x i16> %out | 
 | } | 
 |  | 
 | define <vscale x 4 x i32> @rbit_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) { | 
 | ; CHECK-LABEL: rbit_i32: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    rbit z0.s, p0/m, z1.s | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.rbit.nxv4i32(<vscale x 4 x i32> %a, | 
 |                                                                 <vscale x 4 x i1> %pg, | 
 |                                                                 <vscale x 4 x i32> %b) | 
 |   ret <vscale x 4 x i32> %out | 
 | } | 
 |  | 
 | define <vscale x 2 x i64> @rbit_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { | 
 | ; CHECK-LABEL: rbit_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    rbit z0.d, p0/m, z1.d | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.rbit.nxv2i64(<vscale x 2 x i64> %a, | 
 |                                                                 <vscale x 2 x i1> %pg, | 
 |                                                                 <vscale x 2 x i64> %b) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | ; | 
 | ; REVB | 
 | ; | 
 |  | 
 | define <vscale x 8 x i16> @revb_i16(<vscale x 8 x i16> %a, <vscale x 8 x i1> %pg, <vscale x 8 x i16> %b) { | 
 | ; CHECK-LABEL: revb_i16: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    revb z0.h, p0/m, z1.h | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 8 x i16> @llvm.aarch64.sve.revb.nxv8i16(<vscale x 8 x i16> %a, | 
 |                                                                 <vscale x 8 x i1> %pg, | 
 |                                                                 <vscale x 8 x i16> %b) | 
 |   ret <vscale x 8 x i16> %out | 
 | } | 
 |  | 
 | define <vscale x 4 x i32> @revb_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) { | 
 | ; CHECK-LABEL: revb_i32: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    revb z0.s, p0/m, z1.s | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.revb.nxv4i32(<vscale x 4 x i32> %a, | 
 |                                                                 <vscale x 4 x i1> %pg, | 
 |                                                                 <vscale x 4 x i32> %b) | 
 |   ret <vscale x 4 x i32> %out | 
 | } | 
 |  | 
 | define <vscale x 2 x i64> @revb_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { | 
 | ; CHECK-LABEL: revb_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    revb z0.d, p0/m, z1.d | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.revb.nxv2i64(<vscale x 2 x i64> %a, | 
 |                                                                 <vscale x 2 x i1> %pg, | 
 |                                                                 <vscale x 2 x i64> %b) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | ; | 
 | ; REVH | 
 | ; | 
 |  | 
 | define <vscale x 4 x i32> @revh_i32(<vscale x 4 x i32> %a, <vscale x 4 x i1> %pg, <vscale x 4 x i32> %b) { | 
 | ; CHECK-LABEL: revh_i32: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    revh z0.s, p0/m, z1.s | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 4 x i32> @llvm.aarch64.sve.revh.nxv4i32(<vscale x 4 x i32> %a, | 
 |                                                                 <vscale x 4 x i1> %pg, | 
 |                                                                 <vscale x 4 x i32> %b) | 
 |   ret <vscale x 4 x i32> %out | 
 | } | 
 |  | 
 | define <vscale x 2 x i64> @revh_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { | 
 | ; CHECK-LABEL: revh_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    revh z0.d, p0/m, z1.d | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.revh.nxv2i64(<vscale x 2 x i64> %a, | 
 |                                                                 <vscale x 2 x i1> %pg, | 
 |                                                                 <vscale x 2 x i64> %b) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | ; | 
 | ; REVW | 
 | ; | 
 |  | 
 | define <vscale x 2 x i64> @revw_i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %pg, <vscale x 2 x i64> %b) { | 
 | ; CHECK-LABEL: revw_i64: | 
 | ; CHECK:       // %bb.0: | 
 | ; CHECK-NEXT:    revw z0.d, p0/m, z1.d | 
 | ; CHECK-NEXT:    ret | 
 |   %out = call <vscale x 2 x i64> @llvm.aarch64.sve.revw.nxv2i64(<vscale x 2 x i64> %a, | 
 |                                                                 <vscale x 2 x i1> %pg, | 
 |                                                                 <vscale x 2 x i64> %b) | 
 |   ret <vscale x 2 x i64> %out | 
 | } | 
 |  | 
 | declare <vscale x 16 x i8> @llvm.aarch64.sve.rbit.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>) | 
 | declare <vscale x 8 x i16> @llvm.aarch64.sve.rbit.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>) | 
 | declare <vscale x 4 x i32> @llvm.aarch64.sve.rbit.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>) | 
 | declare <vscale x 2 x i64> @llvm.aarch64.sve.rbit.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>) | 
 |  | 
 | declare <vscale x 8 x i16> @llvm.aarch64.sve.revb.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>) | 
 | declare <vscale x 4 x i32> @llvm.aarch64.sve.revb.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>) | 
 | declare <vscale x 2 x i64> @llvm.aarch64.sve.revb.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>) | 
 |  | 
 | declare <vscale x 4 x i32> @llvm.aarch64.sve.revh.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>) | 
 | declare <vscale x 2 x i64> @llvm.aarch64.sve.revh.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>) | 
 |  | 
 | declare <vscale x 2 x i64> @llvm.aarch64.sve.revw.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>) |