| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVI,RV32I |
| ; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVI,RV64I |
| ; RUN: llc -mtriple=riscv32 -mattr=+m,+zve64f,+zvl128b,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVF,RV32F |
| ; RUN: llc -mtriple=riscv64 -mattr=+m,+zve64f,+zvl128b,+f -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVF,RV64F |
| ; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVD,RV32D |
| ; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RVD,RV64D |
| ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB |
| ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVBB |
| |
| define void @ctlz_v16i8(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_v16i8: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; RVI-NEXT: vle8.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: li a1, 85 |
| ; RVI-NEXT: vand.vx v9, v9, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v9 |
| ; RVI-NEXT: li a1, 51 |
| ; RVI-NEXT: vand.vx v9, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v9, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v9 |
| ; RVI-NEXT: vand.vi v8, v8, 15 |
| ; RVI-NEXT: vse8.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_v16i8: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVF-NEXT: vle8.v v8, (a0) |
| ; RVF-NEXT: vzext.vf2 v10, v8 |
| ; RVF-NEXT: vfwcvt.f.xu.v v12, v10 |
| ; RVF-NEXT: vnsrl.wi v8, v12, 23 |
| ; RVF-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; RVF-NEXT: vnsrl.wi v10, v8, 0 |
| ; RVF-NEXT: li a1, 134 |
| ; RVF-NEXT: vrsub.vx v8, v10, a1 |
| ; RVF-NEXT: li a1, 8 |
| ; RVF-NEXT: vminu.vx v8, v8, a1 |
| ; RVF-NEXT: vse8.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_v16i8: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVD-NEXT: vle8.v v8, (a0) |
| ; RVD-NEXT: vzext.vf2 v10, v8 |
| ; RVD-NEXT: vfwcvt.f.xu.v v12, v10 |
| ; RVD-NEXT: vnsrl.wi v8, v12, 23 |
| ; RVD-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; RVD-NEXT: vnsrl.wi v10, v8, 0 |
| ; RVD-NEXT: li a1, 134 |
| ; RVD-NEXT: vrsub.vx v8, v10, a1 |
| ; RVD-NEXT: li a1, 8 |
| ; RVD-NEXT: vminu.vx v8, v8, a1 |
| ; RVD-NEXT: vse8.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_v16i8: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; ZVBB-NEXT: vle8.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse8.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <16 x i8>, ptr %x |
| %b = load <16 x i8>, ptr %y |
| %c = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false) |
| store <16 x i8> %c, ptr %x |
| ret void |
| } |
| declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1) |
| |
| define void @ctlz_v8i16(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_v8i16: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; RVI-NEXT: vle16.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 8 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: lui a1, 5 |
| ; RVI-NEXT: addi a1, a1, 1365 |
| ; RVI-NEXT: vand.vx v9, v9, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v9 |
| ; RVI-NEXT: lui a1, 3 |
| ; RVI-NEXT: addi a1, a1, 819 |
| ; RVI-NEXT: vand.vx v9, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v9, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v9 |
| ; RVI-NEXT: lui a1, 1 |
| ; RVI-NEXT: addi a1, a1, -241 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: li a1, 257 |
| ; RVI-NEXT: vmul.vx v8, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 8 |
| ; RVI-NEXT: vse16.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_v8i16: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; RVF-NEXT: vle16.v v8, (a0) |
| ; RVF-NEXT: vfwcvt.f.xu.v v10, v8 |
| ; RVF-NEXT: vnsrl.wi v8, v10, 23 |
| ; RVF-NEXT: li a1, 142 |
| ; RVF-NEXT: vrsub.vx v8, v8, a1 |
| ; RVF-NEXT: li a1, 16 |
| ; RVF-NEXT: vminu.vx v8, v8, a1 |
| ; RVF-NEXT: vse16.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_v8i16: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; RVD-NEXT: vle16.v v8, (a0) |
| ; RVD-NEXT: vfwcvt.f.xu.v v10, v8 |
| ; RVD-NEXT: vnsrl.wi v8, v10, 23 |
| ; RVD-NEXT: li a1, 142 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 16 |
| ; RVD-NEXT: vminu.vx v8, v8, a1 |
| ; RVD-NEXT: vse16.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_v8i16: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVBB-NEXT: vle16.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse16.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <8 x i16>, ptr %x |
| %b = load <8 x i16>, ptr %y |
| %c = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false) |
| store <8 x i16> %c, ptr %x |
| ret void |
| } |
| declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1) |
| |
| define void @ctlz_v4i32(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_v4i32: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RVI-NEXT: vle32.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 8 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 16 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: lui a1, 349525 |
| ; RVI-NEXT: addi a1, a1, 1365 |
| ; RVI-NEXT: vand.vx v9, v9, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v9 |
| ; RVI-NEXT: lui a1, 209715 |
| ; RVI-NEXT: addi a1, a1, 819 |
| ; RVI-NEXT: vand.vx v9, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v9, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v9 |
| ; RVI-NEXT: lui a1, 61681 |
| ; RVI-NEXT: addi a1, a1, -241 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: lui a1, 4112 |
| ; RVI-NEXT: addi a1, a1, 257 |
| ; RVI-NEXT: vmul.vx v8, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 24 |
| ; RVI-NEXT: vse32.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_v4i32: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RVF-NEXT: vle32.v v8, (a0) |
| ; RVF-NEXT: fsrmi a1, 1 |
| ; RVF-NEXT: vfcvt.f.xu.v v8, v8 |
| ; RVF-NEXT: fsrm a1 |
| ; RVF-NEXT: vsrl.vi v8, v8, 23 |
| ; RVF-NEXT: li a1, 158 |
| ; RVF-NEXT: vrsub.vx v8, v8, a1 |
| ; RVF-NEXT: li a1, 32 |
| ; RVF-NEXT: vminu.vx v8, v8, a1 |
| ; RVF-NEXT: vse32.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_v4i32: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RVD-NEXT: vle32.v v8, (a0) |
| ; RVD-NEXT: vfwcvt.f.xu.v v10, v8 |
| ; RVD-NEXT: li a1, 52 |
| ; RVD-NEXT: vnsrl.wx v8, v10, a1 |
| ; RVD-NEXT: li a1, 1054 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 32 |
| ; RVD-NEXT: vminu.vx v8, v8, a1 |
| ; RVD-NEXT: vse32.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_v4i32: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; ZVBB-NEXT: vle32.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse32.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <4 x i32>, ptr %x |
| %b = load <4 x i32>, ptr %y |
| %c = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false) |
| store <4 x i32> %c, ptr %x |
| ret void |
| } |
| declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) |
| |
| define void @ctlz_v2i64(ptr %x, ptr %y) nounwind { |
| ; RV32I-LABEL: ctlz_v2i64: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vle64.v v8, (a0) |
| ; RV32I-NEXT: vsrl.vi v9, v8, 1 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 2 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 4 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 8 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 16 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: li a1, 32 |
| ; RV32I-NEXT: vsrl.vx v9, v8, a1 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vnot.v v8, v8 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 1 |
| ; RV32I-NEXT: lui a1, 349525 |
| ; RV32I-NEXT: addi a1, a1, 1365 |
| ; RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV32I-NEXT: vmv.v.x v10, a1 |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vand.vv v9, v9, v10 |
| ; RV32I-NEXT: vsub.vv v8, v8, v9 |
| ; RV32I-NEXT: lui a1, 209715 |
| ; RV32I-NEXT: addi a1, a1, 819 |
| ; RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV32I-NEXT: vmv.v.x v9, a1 |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vand.vv v10, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v8, v8, 2 |
| ; RV32I-NEXT: vand.vv v8, v8, v9 |
| ; RV32I-NEXT: vadd.vv v8, v10, v8 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 4 |
| ; RV32I-NEXT: vadd.vv v8, v8, v9 |
| ; RV32I-NEXT: lui a1, 61681 |
| ; RV32I-NEXT: addi a1, a1, -241 |
| ; RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV32I-NEXT: vmv.v.x v9, a1 |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vand.vv v8, v8, v9 |
| ; RV32I-NEXT: lui a1, 4112 |
| ; RV32I-NEXT: addi a1, a1, 257 |
| ; RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV32I-NEXT: vmv.v.x v9, a1 |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vmul.vv v8, v8, v9 |
| ; RV32I-NEXT: li a1, 56 |
| ; RV32I-NEXT: vsrl.vx v8, v8, a1 |
| ; RV32I-NEXT: vse64.v v8, (a0) |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: ctlz_v2i64: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV64I-NEXT: vle64.v v8, (a0) |
| ; RV64I-NEXT: vsrl.vi v9, v8, 1 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 2 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 4 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 8 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 16 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: li a1, 32 |
| ; RV64I-NEXT: vsrl.vx v9, v8, a1 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vnot.v v8, v8 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 1 |
| ; RV64I-NEXT: lui a1, 349525 |
| ; RV64I-NEXT: addiw a1, a1, 1365 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v9, v9, a1 |
| ; RV64I-NEXT: vsub.vv v8, v8, v9 |
| ; RV64I-NEXT: lui a1, 209715 |
| ; RV64I-NEXT: addiw a1, a1, 819 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v9, v8, a1 |
| ; RV64I-NEXT: vsrl.vi v8, v8, 2 |
| ; RV64I-NEXT: vand.vx v8, v8, a1 |
| ; RV64I-NEXT: vadd.vv v8, v9, v8 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 4 |
| ; RV64I-NEXT: vadd.vv v8, v8, v9 |
| ; RV64I-NEXT: lui a1, 61681 |
| ; RV64I-NEXT: addiw a1, a1, -241 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v8, v8, a1 |
| ; RV64I-NEXT: lui a1, 4112 |
| ; RV64I-NEXT: addiw a1, a1, 257 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vmul.vx v8, v8, a1 |
| ; RV64I-NEXT: li a1, 56 |
| ; RV64I-NEXT: vsrl.vx v8, v8, a1 |
| ; RV64I-NEXT: vse64.v v8, (a0) |
| ; RV64I-NEXT: ret |
| ; |
| ; RV32F-LABEL: ctlz_v2i64: |
| ; RV32F: # %bb.0: |
| ; RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32F-NEXT: vle64.v v8, (a0) |
| ; RV32F-NEXT: li a1, 190 |
| ; RV32F-NEXT: vmv.v.x v9, a1 |
| ; RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma |
| ; RV32F-NEXT: fsrmi a1, 1 |
| ; RV32F-NEXT: vfncvt.f.xu.w v10, v8 |
| ; RV32F-NEXT: fsrm a1 |
| ; RV32F-NEXT: vsrl.vi v8, v10, 23 |
| ; RV32F-NEXT: vwsubu.wv v9, v9, v8 |
| ; RV32F-NEXT: li a1, 64 |
| ; RV32F-NEXT: vsetvli zero, zero, e64, m1, ta, ma |
| ; RV32F-NEXT: vminu.vx v8, v9, a1 |
| ; RV32F-NEXT: vse64.v v8, (a0) |
| ; RV32F-NEXT: ret |
| ; |
| ; RV64F-LABEL: ctlz_v2i64: |
| ; RV64F: # %bb.0: |
| ; RV64F-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
| ; RV64F-NEXT: vle64.v v8, (a0) |
| ; RV64F-NEXT: li a1, 190 |
| ; RV64F-NEXT: vmv.v.x v9, a1 |
| ; RV64F-NEXT: fsrmi a1, 1 |
| ; RV64F-NEXT: vfncvt.f.xu.w v10, v8 |
| ; RV64F-NEXT: fsrm a1 |
| ; RV64F-NEXT: vsrl.vi v8, v10, 23 |
| ; RV64F-NEXT: vwsubu.vv v10, v9, v8 |
| ; RV64F-NEXT: li a1, 64 |
| ; RV64F-NEXT: vsetvli zero, zero, e64, m1, ta, ma |
| ; RV64F-NEXT: vminu.vx v8, v10, a1 |
| ; RV64F-NEXT: vse64.v v8, (a0) |
| ; RV64F-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_v2i64: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RVD-NEXT: vle64.v v8, (a0) |
| ; RVD-NEXT: fsrmi a1, 1 |
| ; RVD-NEXT: vfcvt.f.xu.v v8, v8 |
| ; RVD-NEXT: fsrm a1 |
| ; RVD-NEXT: li a1, 52 |
| ; RVD-NEXT: vsrl.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 1086 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 64 |
| ; RVD-NEXT: vminu.vx v8, v8, a1 |
| ; RVD-NEXT: vse64.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_v2i64: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; ZVBB-NEXT: vle64.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse64.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <2 x i64>, ptr %x |
| %b = load <2 x i64>, ptr %y |
| %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 false) |
| store <2 x i64> %c, ptr %x |
| ret void |
| } |
| declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) |
| |
| define void @ctlz_v32i8(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_v32i8: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: li a1, 32 |
| ; RVI-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; RVI-NEXT: vle8.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: li a1, 85 |
| ; RVI-NEXT: vand.vx v10, v10, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v10 |
| ; RVI-NEXT: li a1, 51 |
| ; RVI-NEXT: vand.vx v10, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v10, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v10 |
| ; RVI-NEXT: vand.vi v8, v8, 15 |
| ; RVI-NEXT: vse8.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_v32i8: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: li a1, 32 |
| ; RVF-NEXT: vsetvli zero, a1, e16, m4, ta, ma |
| ; RVF-NEXT: vle8.v v8, (a0) |
| ; RVF-NEXT: vzext.vf2 v12, v8 |
| ; RVF-NEXT: vfwcvt.f.xu.v v16, v12 |
| ; RVF-NEXT: vnsrl.wi v8, v16, 23 |
| ; RVF-NEXT: vsetvli zero, zero, e8, m2, ta, ma |
| ; RVF-NEXT: vnsrl.wi v12, v8, 0 |
| ; RVF-NEXT: li a1, 134 |
| ; RVF-NEXT: vrsub.vx v8, v12, a1 |
| ; RVF-NEXT: li a1, 8 |
| ; RVF-NEXT: vminu.vx v8, v8, a1 |
| ; RVF-NEXT: vse8.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_v32i8: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: li a1, 32 |
| ; RVD-NEXT: vsetvli zero, a1, e16, m4, ta, ma |
| ; RVD-NEXT: vle8.v v8, (a0) |
| ; RVD-NEXT: vzext.vf2 v12, v8 |
| ; RVD-NEXT: vfwcvt.f.xu.v v16, v12 |
| ; RVD-NEXT: vnsrl.wi v8, v16, 23 |
| ; RVD-NEXT: vsetvli zero, zero, e8, m2, ta, ma |
| ; RVD-NEXT: vnsrl.wi v12, v8, 0 |
| ; RVD-NEXT: li a1, 134 |
| ; RVD-NEXT: vrsub.vx v8, v12, a1 |
| ; RVD-NEXT: li a1, 8 |
| ; RVD-NEXT: vminu.vx v8, v8, a1 |
| ; RVD-NEXT: vse8.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_v32i8: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: li a1, 32 |
| ; ZVBB-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; ZVBB-NEXT: vle8.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse8.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <32 x i8>, ptr %x |
| %b = load <32 x i8>, ptr %y |
| %c = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %a, i1 false) |
| store <32 x i8> %c, ptr %x |
| ret void |
| } |
| declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>, i1) |
| |
| define void @ctlz_v16i16(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_v16i16: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVI-NEXT: vle16.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 8 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: lui a1, 5 |
| ; RVI-NEXT: addi a1, a1, 1365 |
| ; RVI-NEXT: vand.vx v10, v10, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v10 |
| ; RVI-NEXT: lui a1, 3 |
| ; RVI-NEXT: addi a1, a1, 819 |
| ; RVI-NEXT: vand.vx v10, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v10, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v10 |
| ; RVI-NEXT: lui a1, 1 |
| ; RVI-NEXT: addi a1, a1, -241 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: li a1, 257 |
| ; RVI-NEXT: vmul.vx v8, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 8 |
| ; RVI-NEXT: vse16.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_v16i16: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVF-NEXT: vle16.v v8, (a0) |
| ; RVF-NEXT: vfwcvt.f.xu.v v12, v8 |
| ; RVF-NEXT: vnsrl.wi v8, v12, 23 |
| ; RVF-NEXT: li a1, 142 |
| ; RVF-NEXT: vrsub.vx v8, v8, a1 |
| ; RVF-NEXT: li a1, 16 |
| ; RVF-NEXT: vminu.vx v8, v8, a1 |
| ; RVF-NEXT: vse16.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_v16i16: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVD-NEXT: vle16.v v8, (a0) |
| ; RVD-NEXT: vfwcvt.f.xu.v v12, v8 |
| ; RVD-NEXT: vnsrl.wi v8, v12, 23 |
| ; RVD-NEXT: li a1, 142 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 16 |
| ; RVD-NEXT: vminu.vx v8, v8, a1 |
| ; RVD-NEXT: vse16.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_v16i16: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; ZVBB-NEXT: vle16.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse16.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <16 x i16>, ptr %x |
| %b = load <16 x i16>, ptr %y |
| %c = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %a, i1 false) |
| store <16 x i16> %c, ptr %x |
| ret void |
| } |
| declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1) |
| |
| define void @ctlz_v8i32(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_v8i32: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RVI-NEXT: vle32.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 8 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 16 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: lui a1, 349525 |
| ; RVI-NEXT: addi a1, a1, 1365 |
| ; RVI-NEXT: vand.vx v10, v10, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v10 |
| ; RVI-NEXT: lui a1, 209715 |
| ; RVI-NEXT: addi a1, a1, 819 |
| ; RVI-NEXT: vand.vx v10, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v10, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v10 |
| ; RVI-NEXT: lui a1, 61681 |
| ; RVI-NEXT: addi a1, a1, -241 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: lui a1, 4112 |
| ; RVI-NEXT: addi a1, a1, 257 |
| ; RVI-NEXT: vmul.vx v8, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 24 |
| ; RVI-NEXT: vse32.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_v8i32: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RVF-NEXT: vle32.v v8, (a0) |
| ; RVF-NEXT: fsrmi a1, 1 |
| ; RVF-NEXT: vfcvt.f.xu.v v8, v8 |
| ; RVF-NEXT: fsrm a1 |
| ; RVF-NEXT: vsrl.vi v8, v8, 23 |
| ; RVF-NEXT: li a1, 158 |
| ; RVF-NEXT: vrsub.vx v8, v8, a1 |
| ; RVF-NEXT: li a1, 32 |
| ; RVF-NEXT: vminu.vx v8, v8, a1 |
| ; RVF-NEXT: vse32.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_v8i32: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RVD-NEXT: vle32.v v8, (a0) |
| ; RVD-NEXT: vfwcvt.f.xu.v v12, v8 |
| ; RVD-NEXT: li a1, 52 |
| ; RVD-NEXT: vnsrl.wx v8, v12, a1 |
| ; RVD-NEXT: li a1, 1054 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 32 |
| ; RVD-NEXT: vminu.vx v8, v8, a1 |
| ; RVD-NEXT: vse32.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_v8i32: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; ZVBB-NEXT: vle32.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse32.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <8 x i32>, ptr %x |
| %b = load <8 x i32>, ptr %y |
| %c = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %a, i1 false) |
| store <8 x i32> %c, ptr %x |
| ret void |
| } |
| declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1) |
| |
| define void @ctlz_v4i64(ptr %x, ptr %y) nounwind { |
| ; RV32I-LABEL: ctlz_v4i64: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vle64.v v8, (a0) |
| ; RV32I-NEXT: vsrl.vi v10, v8, 1 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 2 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 4 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 8 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 16 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: li a1, 32 |
| ; RV32I-NEXT: vsrl.vx v10, v8, a1 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vnot.v v8, v8 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 1 |
| ; RV32I-NEXT: lui a1, 349525 |
| ; RV32I-NEXT: addi a1, a1, 1365 |
| ; RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV32I-NEXT: vmv.v.x v12, a1 |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vand.vv v10, v10, v12 |
| ; RV32I-NEXT: vsub.vv v8, v8, v10 |
| ; RV32I-NEXT: lui a1, 209715 |
| ; RV32I-NEXT: addi a1, a1, 819 |
| ; RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV32I-NEXT: vmv.v.x v10, a1 |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vand.vv v12, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v8, v8, 2 |
| ; RV32I-NEXT: vand.vv v8, v8, v10 |
| ; RV32I-NEXT: vadd.vv v8, v12, v8 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 4 |
| ; RV32I-NEXT: vadd.vv v8, v8, v10 |
| ; RV32I-NEXT: lui a1, 61681 |
| ; RV32I-NEXT: addi a1, a1, -241 |
| ; RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV32I-NEXT: vmv.v.x v10, a1 |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vand.vv v8, v8, v10 |
| ; RV32I-NEXT: lui a1, 4112 |
| ; RV32I-NEXT: addi a1, a1, 257 |
| ; RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV32I-NEXT: vmv.v.x v10, a1 |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vmul.vv v8, v8, v10 |
| ; RV32I-NEXT: li a1, 56 |
| ; RV32I-NEXT: vsrl.vx v8, v8, a1 |
| ; RV32I-NEXT: vse64.v v8, (a0) |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: ctlz_v4i64: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV64I-NEXT: vle64.v v8, (a0) |
| ; RV64I-NEXT: vsrl.vi v10, v8, 1 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 2 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 4 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 8 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 16 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: li a1, 32 |
| ; RV64I-NEXT: vsrl.vx v10, v8, a1 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vnot.v v8, v8 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 1 |
| ; RV64I-NEXT: lui a1, 349525 |
| ; RV64I-NEXT: addiw a1, a1, 1365 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v10, v10, a1 |
| ; RV64I-NEXT: vsub.vv v8, v8, v10 |
| ; RV64I-NEXT: lui a1, 209715 |
| ; RV64I-NEXT: addiw a1, a1, 819 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v10, v8, a1 |
| ; RV64I-NEXT: vsrl.vi v8, v8, 2 |
| ; RV64I-NEXT: vand.vx v8, v8, a1 |
| ; RV64I-NEXT: vadd.vv v8, v10, v8 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 4 |
| ; RV64I-NEXT: vadd.vv v8, v8, v10 |
| ; RV64I-NEXT: lui a1, 61681 |
| ; RV64I-NEXT: addiw a1, a1, -241 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v8, v8, a1 |
| ; RV64I-NEXT: lui a1, 4112 |
| ; RV64I-NEXT: addiw a1, a1, 257 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vmul.vx v8, v8, a1 |
| ; RV64I-NEXT: li a1, 56 |
| ; RV64I-NEXT: vsrl.vx v8, v8, a1 |
| ; RV64I-NEXT: vse64.v v8, (a0) |
| ; RV64I-NEXT: ret |
| ; |
| ; RV32F-LABEL: ctlz_v4i64: |
| ; RV32F: # %bb.0: |
| ; RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32F-NEXT: vle64.v v8, (a0) |
| ; RV32F-NEXT: li a1, 190 |
| ; RV32F-NEXT: vmv.v.x v10, a1 |
| ; RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma |
| ; RV32F-NEXT: fsrmi a1, 1 |
| ; RV32F-NEXT: vfncvt.f.xu.w v12, v8 |
| ; RV32F-NEXT: fsrm a1 |
| ; RV32F-NEXT: vsrl.vi v8, v12, 23 |
| ; RV32F-NEXT: vwsubu.wv v10, v10, v8 |
| ; RV32F-NEXT: li a1, 64 |
| ; RV32F-NEXT: vsetvli zero, zero, e64, m2, ta, ma |
| ; RV32F-NEXT: vminu.vx v8, v10, a1 |
| ; RV32F-NEXT: vse64.v v8, (a0) |
| ; RV32F-NEXT: ret |
| ; |
| ; RV64F-LABEL: ctlz_v4i64: |
| ; RV64F: # %bb.0: |
| ; RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV64F-NEXT: vle64.v v8, (a0) |
| ; RV64F-NEXT: li a1, 190 |
| ; RV64F-NEXT: vmv.v.x v10, a1 |
| ; RV64F-NEXT: fsrmi a1, 1 |
| ; RV64F-NEXT: vfncvt.f.xu.w v11, v8 |
| ; RV64F-NEXT: fsrm a1 |
| ; RV64F-NEXT: vsrl.vi v8, v11, 23 |
| ; RV64F-NEXT: vwsubu.vv v12, v10, v8 |
| ; RV64F-NEXT: li a1, 64 |
| ; RV64F-NEXT: vsetvli zero, zero, e64, m2, ta, ma |
| ; RV64F-NEXT: vminu.vx v8, v12, a1 |
| ; RV64F-NEXT: vse64.v v8, (a0) |
| ; RV64F-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_v4i64: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RVD-NEXT: vle64.v v8, (a0) |
| ; RVD-NEXT: fsrmi a1, 1 |
| ; RVD-NEXT: vfcvt.f.xu.v v8, v8 |
| ; RVD-NEXT: fsrm a1 |
| ; RVD-NEXT: li a1, 52 |
| ; RVD-NEXT: vsrl.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 1086 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 64 |
| ; RVD-NEXT: vminu.vx v8, v8, a1 |
| ; RVD-NEXT: vse64.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_v4i64: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; ZVBB-NEXT: vle64.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse64.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <4 x i64>, ptr %x |
| %b = load <4 x i64>, ptr %y |
| %c = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %a, i1 false) |
| store <4 x i64> %c, ptr %x |
| ret void |
| } |
| declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) |
| |
| define void @ctlz_zero_undef_v16i8(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_zero_undef_v16i8: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; RVI-NEXT: vle8.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: li a1, 85 |
| ; RVI-NEXT: vand.vx v9, v9, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v9 |
| ; RVI-NEXT: li a1, 51 |
| ; RVI-NEXT: vand.vx v9, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v9, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v9 |
| ; RVI-NEXT: vand.vi v8, v8, 15 |
| ; RVI-NEXT: vse8.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_zero_undef_v16i8: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVF-NEXT: vle8.v v8, (a0) |
| ; RVF-NEXT: vzext.vf2 v10, v8 |
| ; RVF-NEXT: vfwcvt.f.xu.v v12, v10 |
| ; RVF-NEXT: vnsrl.wi v8, v12, 23 |
| ; RVF-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; RVF-NEXT: vnsrl.wi v10, v8, 0 |
| ; RVF-NEXT: li a1, 134 |
| ; RVF-NEXT: vrsub.vx v8, v10, a1 |
| ; RVF-NEXT: vse8.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_zero_undef_v16i8: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVD-NEXT: vle8.v v8, (a0) |
| ; RVD-NEXT: vzext.vf2 v10, v8 |
| ; RVD-NEXT: vfwcvt.f.xu.v v12, v10 |
| ; RVD-NEXT: vnsrl.wi v8, v12, 23 |
| ; RVD-NEXT: vsetvli zero, zero, e8, m1, ta, ma |
| ; RVD-NEXT: vnsrl.wi v10, v8, 0 |
| ; RVD-NEXT: li a1, 134 |
| ; RVD-NEXT: vrsub.vx v8, v10, a1 |
| ; RVD-NEXT: vse8.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_zero_undef_v16i8: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma |
| ; ZVBB-NEXT: vle8.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse8.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <16 x i8>, ptr %x |
| %b = load <16 x i8>, ptr %y |
| %c = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 true) |
| store <16 x i8> %c, ptr %x |
| ret void |
| } |
| |
| define void @ctlz_zero_undef_v8i16(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_zero_undef_v8i16: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; RVI-NEXT: vle16.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 8 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: lui a1, 5 |
| ; RVI-NEXT: addi a1, a1, 1365 |
| ; RVI-NEXT: vand.vx v9, v9, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v9 |
| ; RVI-NEXT: lui a1, 3 |
| ; RVI-NEXT: addi a1, a1, 819 |
| ; RVI-NEXT: vand.vx v9, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v9, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v9 |
| ; RVI-NEXT: lui a1, 1 |
| ; RVI-NEXT: addi a1, a1, -241 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: li a1, 257 |
| ; RVI-NEXT: vmul.vx v8, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 8 |
| ; RVI-NEXT: vse16.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_zero_undef_v8i16: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; RVF-NEXT: vle16.v v8, (a0) |
| ; RVF-NEXT: vfwcvt.f.xu.v v10, v8 |
| ; RVF-NEXT: vnsrl.wi v8, v10, 23 |
| ; RVF-NEXT: li a1, 142 |
| ; RVF-NEXT: vrsub.vx v8, v8, a1 |
| ; RVF-NEXT: vse16.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_zero_undef_v8i16: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; RVD-NEXT: vle16.v v8, (a0) |
| ; RVD-NEXT: vfwcvt.f.xu.v v10, v8 |
| ; RVD-NEXT: vnsrl.wi v8, v10, 23 |
| ; RVD-NEXT: li a1, 142 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: vse16.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_zero_undef_v8i16: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma |
| ; ZVBB-NEXT: vle16.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse16.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <8 x i16>, ptr %x |
| %b = load <8 x i16>, ptr %y |
| %c = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 true) |
| store <8 x i16> %c, ptr %x |
| ret void |
| } |
| |
| define void @ctlz_zero_undef_v4i32(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_zero_undef_v4i32: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RVI-NEXT: vle32.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 8 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vsrl.vi v9, v8, 16 |
| ; RVI-NEXT: vor.vv v8, v8, v9 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 1 |
| ; RVI-NEXT: lui a1, 349525 |
| ; RVI-NEXT: addi a1, a1, 1365 |
| ; RVI-NEXT: vand.vx v9, v9, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v9 |
| ; RVI-NEXT: lui a1, 209715 |
| ; RVI-NEXT: addi a1, a1, 819 |
| ; RVI-NEXT: vand.vx v9, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v9, v8 |
| ; RVI-NEXT: vsrl.vi v9, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v9 |
| ; RVI-NEXT: lui a1, 61681 |
| ; RVI-NEXT: addi a1, a1, -241 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: lui a1, 4112 |
| ; RVI-NEXT: addi a1, a1, 257 |
| ; RVI-NEXT: vmul.vx v8, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 24 |
| ; RVI-NEXT: vse32.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_zero_undef_v4i32: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RVF-NEXT: vle32.v v8, (a0) |
| ; RVF-NEXT: fsrmi a1, 1 |
| ; RVF-NEXT: vfcvt.f.xu.v v8, v8 |
| ; RVF-NEXT: fsrm a1 |
| ; RVF-NEXT: vsrl.vi v8, v8, 23 |
| ; RVF-NEXT: li a1, 158 |
| ; RVF-NEXT: vrsub.vx v8, v8, a1 |
| ; RVF-NEXT: vse32.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_zero_undef_v4i32: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RVD-NEXT: vle32.v v8, (a0) |
| ; RVD-NEXT: vfwcvt.f.xu.v v10, v8 |
| ; RVD-NEXT: li a1, 52 |
| ; RVD-NEXT: vnsrl.wx v8, v10, a1 |
| ; RVD-NEXT: li a1, 1054 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: vse32.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_zero_undef_v4i32: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; ZVBB-NEXT: vle32.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse32.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <4 x i32>, ptr %x |
| %b = load <4 x i32>, ptr %y |
| %c = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 true) |
| store <4 x i32> %c, ptr %x |
| ret void |
| } |
| |
| define void @ctlz_zero_undef_v2i64(ptr %x, ptr %y) nounwind { |
| ; RV32I-LABEL: ctlz_zero_undef_v2i64: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vle64.v v8, (a0) |
| ; RV32I-NEXT: vsrl.vi v9, v8, 1 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 2 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 4 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 8 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 16 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: li a1, 32 |
| ; RV32I-NEXT: vsrl.vx v9, v8, a1 |
| ; RV32I-NEXT: vor.vv v8, v8, v9 |
| ; RV32I-NEXT: vnot.v v8, v8 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 1 |
| ; RV32I-NEXT: lui a1, 349525 |
| ; RV32I-NEXT: addi a1, a1, 1365 |
| ; RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV32I-NEXT: vmv.v.x v10, a1 |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vand.vv v9, v9, v10 |
| ; RV32I-NEXT: vsub.vv v8, v8, v9 |
| ; RV32I-NEXT: lui a1, 209715 |
| ; RV32I-NEXT: addi a1, a1, 819 |
| ; RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV32I-NEXT: vmv.v.x v9, a1 |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vand.vv v10, v8, v9 |
| ; RV32I-NEXT: vsrl.vi v8, v8, 2 |
| ; RV32I-NEXT: vand.vv v8, v8, v9 |
| ; RV32I-NEXT: vadd.vv v8, v10, v8 |
| ; RV32I-NEXT: vsrl.vi v9, v8, 4 |
| ; RV32I-NEXT: vadd.vv v8, v8, v9 |
| ; RV32I-NEXT: lui a1, 61681 |
| ; RV32I-NEXT: addi a1, a1, -241 |
| ; RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV32I-NEXT: vmv.v.x v9, a1 |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vand.vv v8, v8, v9 |
| ; RV32I-NEXT: lui a1, 4112 |
| ; RV32I-NEXT: addi a1, a1, 257 |
| ; RV32I-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV32I-NEXT: vmv.v.x v9, a1 |
| ; RV32I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32I-NEXT: vmul.vv v8, v8, v9 |
| ; RV32I-NEXT: li a1, 56 |
| ; RV32I-NEXT: vsrl.vx v8, v8, a1 |
| ; RV32I-NEXT: vse64.v v8, (a0) |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: ctlz_zero_undef_v2i64: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV64I-NEXT: vle64.v v8, (a0) |
| ; RV64I-NEXT: vsrl.vi v9, v8, 1 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 2 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 4 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 8 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 16 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: li a1, 32 |
| ; RV64I-NEXT: vsrl.vx v9, v8, a1 |
| ; RV64I-NEXT: vor.vv v8, v8, v9 |
| ; RV64I-NEXT: vnot.v v8, v8 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 1 |
| ; RV64I-NEXT: lui a1, 349525 |
| ; RV64I-NEXT: addiw a1, a1, 1365 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v9, v9, a1 |
| ; RV64I-NEXT: vsub.vv v8, v8, v9 |
| ; RV64I-NEXT: lui a1, 209715 |
| ; RV64I-NEXT: addiw a1, a1, 819 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v9, v8, a1 |
| ; RV64I-NEXT: vsrl.vi v8, v8, 2 |
| ; RV64I-NEXT: vand.vx v8, v8, a1 |
| ; RV64I-NEXT: vadd.vv v8, v9, v8 |
| ; RV64I-NEXT: vsrl.vi v9, v8, 4 |
| ; RV64I-NEXT: vadd.vv v8, v8, v9 |
| ; RV64I-NEXT: lui a1, 61681 |
| ; RV64I-NEXT: addiw a1, a1, -241 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v8, v8, a1 |
| ; RV64I-NEXT: lui a1, 4112 |
| ; RV64I-NEXT: addiw a1, a1, 257 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vmul.vx v8, v8, a1 |
| ; RV64I-NEXT: li a1, 56 |
| ; RV64I-NEXT: vsrl.vx v8, v8, a1 |
| ; RV64I-NEXT: vse64.v v8, (a0) |
| ; RV64I-NEXT: ret |
| ; |
| ; RV32F-LABEL: ctlz_zero_undef_v2i64: |
| ; RV32F: # %bb.0: |
| ; RV32F-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RV32F-NEXT: vle64.v v8, (a0) |
| ; RV32F-NEXT: li a1, 190 |
| ; RV32F-NEXT: vmv.v.x v9, a1 |
| ; RV32F-NEXT: vsetvli zero, zero, e32, mf2, ta, ma |
| ; RV32F-NEXT: fsrmi a1, 1 |
| ; RV32F-NEXT: vfncvt.f.xu.w v10, v8 |
| ; RV32F-NEXT: fsrm a1 |
| ; RV32F-NEXT: vsrl.vi v8, v10, 23 |
| ; RV32F-NEXT: vwsubu.wv v9, v9, v8 |
| ; RV32F-NEXT: vse64.v v9, (a0) |
| ; RV32F-NEXT: ret |
| ; |
| ; RV64F-LABEL: ctlz_zero_undef_v2i64: |
| ; RV64F: # %bb.0: |
| ; RV64F-NEXT: vsetivli zero, 2, e32, mf2, ta, ma |
| ; RV64F-NEXT: vle64.v v8, (a0) |
| ; RV64F-NEXT: li a1, 190 |
| ; RV64F-NEXT: vmv.v.x v9, a1 |
| ; RV64F-NEXT: fsrmi a1, 1 |
| ; RV64F-NEXT: vfncvt.f.xu.w v10, v8 |
| ; RV64F-NEXT: fsrm a1 |
| ; RV64F-NEXT: vsrl.vi v8, v10, 23 |
| ; RV64F-NEXT: vwsubu.vv v10, v9, v8 |
| ; RV64F-NEXT: vse64.v v10, (a0) |
| ; RV64F-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_zero_undef_v2i64: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; RVD-NEXT: vle64.v v8, (a0) |
| ; RVD-NEXT: fsrmi a1, 1 |
| ; RVD-NEXT: vfcvt.f.xu.v v8, v8 |
| ; RVD-NEXT: fsrm a1 |
| ; RVD-NEXT: li a1, 52 |
| ; RVD-NEXT: vsrl.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 1086 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: vse64.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_zero_undef_v2i64: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 2, e64, m1, ta, ma |
| ; ZVBB-NEXT: vle64.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse64.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <2 x i64>, ptr %x |
| %b = load <2 x i64>, ptr %y |
| %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 true) |
| store <2 x i64> %c, ptr %x |
| ret void |
| } |
| |
| define void @ctlz_zero_undef_v32i8(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_zero_undef_v32i8: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: li a1, 32 |
| ; RVI-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; RVI-NEXT: vle8.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: li a1, 85 |
| ; RVI-NEXT: vand.vx v10, v10, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v10 |
| ; RVI-NEXT: li a1, 51 |
| ; RVI-NEXT: vand.vx v10, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v10, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v10 |
| ; RVI-NEXT: vand.vi v8, v8, 15 |
| ; RVI-NEXT: vse8.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_zero_undef_v32i8: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: li a1, 32 |
| ; RVF-NEXT: vsetvli zero, a1, e16, m4, ta, ma |
| ; RVF-NEXT: vle8.v v8, (a0) |
| ; RVF-NEXT: vzext.vf2 v12, v8 |
| ; RVF-NEXT: vfwcvt.f.xu.v v16, v12 |
| ; RVF-NEXT: vnsrl.wi v8, v16, 23 |
| ; RVF-NEXT: vsetvli zero, zero, e8, m2, ta, ma |
| ; RVF-NEXT: vnsrl.wi v12, v8, 0 |
| ; RVF-NEXT: li a1, 134 |
| ; RVF-NEXT: vrsub.vx v8, v12, a1 |
| ; RVF-NEXT: vse8.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_zero_undef_v32i8: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: li a1, 32 |
| ; RVD-NEXT: vsetvli zero, a1, e16, m4, ta, ma |
| ; RVD-NEXT: vle8.v v8, (a0) |
| ; RVD-NEXT: vzext.vf2 v12, v8 |
| ; RVD-NEXT: vfwcvt.f.xu.v v16, v12 |
| ; RVD-NEXT: vnsrl.wi v8, v16, 23 |
| ; RVD-NEXT: vsetvli zero, zero, e8, m2, ta, ma |
| ; RVD-NEXT: vnsrl.wi v12, v8, 0 |
| ; RVD-NEXT: li a1, 134 |
| ; RVD-NEXT: vrsub.vx v8, v12, a1 |
| ; RVD-NEXT: vse8.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_zero_undef_v32i8: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: li a1, 32 |
| ; ZVBB-NEXT: vsetvli zero, a1, e8, m2, ta, ma |
| ; ZVBB-NEXT: vle8.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse8.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <32 x i8>, ptr %x |
| %b = load <32 x i8>, ptr %y |
| %c = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %a, i1 true) |
| store <32 x i8> %c, ptr %x |
| ret void |
| } |
| |
| define void @ctlz_zero_undef_v16i16(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_zero_undef_v16i16: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVI-NEXT: vle16.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 8 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: lui a1, 5 |
| ; RVI-NEXT: addi a1, a1, 1365 |
| ; RVI-NEXT: vand.vx v10, v10, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v10 |
| ; RVI-NEXT: lui a1, 3 |
| ; RVI-NEXT: addi a1, a1, 819 |
| ; RVI-NEXT: vand.vx v10, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v10, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v10 |
| ; RVI-NEXT: lui a1, 1 |
| ; RVI-NEXT: addi a1, a1, -241 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: li a1, 257 |
| ; RVI-NEXT: vmul.vx v8, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 8 |
| ; RVI-NEXT: vse16.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_zero_undef_v16i16: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVF-NEXT: vle16.v v8, (a0) |
| ; RVF-NEXT: vfwcvt.f.xu.v v12, v8 |
| ; RVF-NEXT: vnsrl.wi v8, v12, 23 |
| ; RVF-NEXT: li a1, 142 |
| ; RVF-NEXT: vrsub.vx v8, v8, a1 |
| ; RVF-NEXT: vse16.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_zero_undef_v16i16: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; RVD-NEXT: vle16.v v8, (a0) |
| ; RVD-NEXT: vfwcvt.f.xu.v v12, v8 |
| ; RVD-NEXT: vnsrl.wi v8, v12, 23 |
| ; RVD-NEXT: li a1, 142 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: vse16.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_zero_undef_v16i16: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 16, e16, m2, ta, ma |
| ; ZVBB-NEXT: vle16.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse16.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <16 x i16>, ptr %x |
| %b = load <16 x i16>, ptr %y |
| %c = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %a, i1 true) |
| store <16 x i16> %c, ptr %x |
| ret void |
| } |
| |
| define void @ctlz_zero_undef_v8i32(ptr %x, ptr %y) nounwind { |
| ; RVI-LABEL: ctlz_zero_undef_v8i32: |
| ; RVI: # %bb.0: |
| ; RVI-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RVI-NEXT: vle32.v v8, (a0) |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 2 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 8 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vsrl.vi v10, v8, 16 |
| ; RVI-NEXT: vor.vv v8, v8, v10 |
| ; RVI-NEXT: vnot.v v8, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 1 |
| ; RVI-NEXT: lui a1, 349525 |
| ; RVI-NEXT: addi a1, a1, 1365 |
| ; RVI-NEXT: vand.vx v10, v10, a1 |
| ; RVI-NEXT: vsub.vv v8, v8, v10 |
| ; RVI-NEXT: lui a1, 209715 |
| ; RVI-NEXT: addi a1, a1, 819 |
| ; RVI-NEXT: vand.vx v10, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 2 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: vadd.vv v8, v10, v8 |
| ; RVI-NEXT: vsrl.vi v10, v8, 4 |
| ; RVI-NEXT: vadd.vv v8, v8, v10 |
| ; RVI-NEXT: lui a1, 61681 |
| ; RVI-NEXT: addi a1, a1, -241 |
| ; RVI-NEXT: vand.vx v8, v8, a1 |
| ; RVI-NEXT: lui a1, 4112 |
| ; RVI-NEXT: addi a1, a1, 257 |
| ; RVI-NEXT: vmul.vx v8, v8, a1 |
| ; RVI-NEXT: vsrl.vi v8, v8, 24 |
| ; RVI-NEXT: vse32.v v8, (a0) |
| ; RVI-NEXT: ret |
| ; |
| ; RVF-LABEL: ctlz_zero_undef_v8i32: |
| ; RVF: # %bb.0: |
| ; RVF-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RVF-NEXT: vle32.v v8, (a0) |
| ; RVF-NEXT: fsrmi a1, 1 |
| ; RVF-NEXT: vfcvt.f.xu.v v8, v8 |
| ; RVF-NEXT: fsrm a1 |
| ; RVF-NEXT: vsrl.vi v8, v8, 23 |
| ; RVF-NEXT: li a1, 158 |
| ; RVF-NEXT: vrsub.vx v8, v8, a1 |
| ; RVF-NEXT: vse32.v v8, (a0) |
| ; RVF-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_zero_undef_v8i32: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RVD-NEXT: vle32.v v8, (a0) |
| ; RVD-NEXT: vfwcvt.f.xu.v v12, v8 |
| ; RVD-NEXT: li a1, 52 |
| ; RVD-NEXT: vnsrl.wx v8, v12, a1 |
| ; RVD-NEXT: li a1, 1054 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: vse32.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_zero_undef_v8i32: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; ZVBB-NEXT: vle32.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse32.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <8 x i32>, ptr %x |
| %b = load <8 x i32>, ptr %y |
| %c = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %a, i1 true) |
| store <8 x i32> %c, ptr %x |
| ret void |
| } |
| |
| define void @ctlz_zero_undef_v4i64(ptr %x, ptr %y) nounwind { |
| ; RV32I-LABEL: ctlz_zero_undef_v4i64: |
| ; RV32I: # %bb.0: |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vle64.v v8, (a0) |
| ; RV32I-NEXT: vsrl.vi v10, v8, 1 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 2 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 4 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 8 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 16 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: li a1, 32 |
| ; RV32I-NEXT: vsrl.vx v10, v8, a1 |
| ; RV32I-NEXT: vor.vv v8, v8, v10 |
| ; RV32I-NEXT: vnot.v v8, v8 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 1 |
| ; RV32I-NEXT: lui a1, 349525 |
| ; RV32I-NEXT: addi a1, a1, 1365 |
| ; RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV32I-NEXT: vmv.v.x v12, a1 |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vand.vv v10, v10, v12 |
| ; RV32I-NEXT: vsub.vv v8, v8, v10 |
| ; RV32I-NEXT: lui a1, 209715 |
| ; RV32I-NEXT: addi a1, a1, 819 |
| ; RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV32I-NEXT: vmv.v.x v10, a1 |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vand.vv v12, v8, v10 |
| ; RV32I-NEXT: vsrl.vi v8, v8, 2 |
| ; RV32I-NEXT: vand.vv v8, v8, v10 |
| ; RV32I-NEXT: vadd.vv v8, v12, v8 |
| ; RV32I-NEXT: vsrl.vi v10, v8, 4 |
| ; RV32I-NEXT: vadd.vv v8, v8, v10 |
| ; RV32I-NEXT: lui a1, 61681 |
| ; RV32I-NEXT: addi a1, a1, -241 |
| ; RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV32I-NEXT: vmv.v.x v10, a1 |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vand.vv v8, v8, v10 |
| ; RV32I-NEXT: lui a1, 4112 |
| ; RV32I-NEXT: addi a1, a1, 257 |
| ; RV32I-NEXT: vsetivli zero, 8, e32, m2, ta, ma |
| ; RV32I-NEXT: vmv.v.x v10, a1 |
| ; RV32I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32I-NEXT: vmul.vv v8, v8, v10 |
| ; RV32I-NEXT: li a1, 56 |
| ; RV32I-NEXT: vsrl.vx v8, v8, a1 |
| ; RV32I-NEXT: vse64.v v8, (a0) |
| ; RV32I-NEXT: ret |
| ; |
| ; RV64I-LABEL: ctlz_zero_undef_v4i64: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV64I-NEXT: vle64.v v8, (a0) |
| ; RV64I-NEXT: vsrl.vi v10, v8, 1 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 2 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 4 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 8 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 16 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: li a1, 32 |
| ; RV64I-NEXT: vsrl.vx v10, v8, a1 |
| ; RV64I-NEXT: vor.vv v8, v8, v10 |
| ; RV64I-NEXT: vnot.v v8, v8 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 1 |
| ; RV64I-NEXT: lui a1, 349525 |
| ; RV64I-NEXT: addiw a1, a1, 1365 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v10, v10, a1 |
| ; RV64I-NEXT: vsub.vv v8, v8, v10 |
| ; RV64I-NEXT: lui a1, 209715 |
| ; RV64I-NEXT: addiw a1, a1, 819 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v10, v8, a1 |
| ; RV64I-NEXT: vsrl.vi v8, v8, 2 |
| ; RV64I-NEXT: vand.vx v8, v8, a1 |
| ; RV64I-NEXT: vadd.vv v8, v10, v8 |
| ; RV64I-NEXT: vsrl.vi v10, v8, 4 |
| ; RV64I-NEXT: vadd.vv v8, v8, v10 |
| ; RV64I-NEXT: lui a1, 61681 |
| ; RV64I-NEXT: addiw a1, a1, -241 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vand.vx v8, v8, a1 |
| ; RV64I-NEXT: lui a1, 4112 |
| ; RV64I-NEXT: addiw a1, a1, 257 |
| ; RV64I-NEXT: slli a2, a1, 32 |
| ; RV64I-NEXT: add a1, a1, a2 |
| ; RV64I-NEXT: vmul.vx v8, v8, a1 |
| ; RV64I-NEXT: li a1, 56 |
| ; RV64I-NEXT: vsrl.vx v8, v8, a1 |
| ; RV64I-NEXT: vse64.v v8, (a0) |
| ; RV64I-NEXT: ret |
| ; |
| ; RV32F-LABEL: ctlz_zero_undef_v4i64: |
| ; RV32F: # %bb.0: |
| ; RV32F-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RV32F-NEXT: vle64.v v8, (a0) |
| ; RV32F-NEXT: li a1, 190 |
| ; RV32F-NEXT: vmv.v.x v10, a1 |
| ; RV32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma |
| ; RV32F-NEXT: fsrmi a1, 1 |
| ; RV32F-NEXT: vfncvt.f.xu.w v12, v8 |
| ; RV32F-NEXT: fsrm a1 |
| ; RV32F-NEXT: vsrl.vi v8, v12, 23 |
| ; RV32F-NEXT: vwsubu.wv v10, v10, v8 |
| ; RV32F-NEXT: vse64.v v10, (a0) |
| ; RV32F-NEXT: ret |
| ; |
| ; RV64F-LABEL: ctlz_zero_undef_v4i64: |
| ; RV64F: # %bb.0: |
| ; RV64F-NEXT: vsetivli zero, 4, e32, m1, ta, ma |
| ; RV64F-NEXT: vle64.v v8, (a0) |
| ; RV64F-NEXT: li a1, 190 |
| ; RV64F-NEXT: vmv.v.x v10, a1 |
| ; RV64F-NEXT: fsrmi a1, 1 |
| ; RV64F-NEXT: vfncvt.f.xu.w v11, v8 |
| ; RV64F-NEXT: fsrm a1 |
| ; RV64F-NEXT: vsrl.vi v8, v11, 23 |
| ; RV64F-NEXT: vwsubu.vv v12, v10, v8 |
| ; RV64F-NEXT: vse64.v v12, (a0) |
| ; RV64F-NEXT: ret |
| ; |
| ; RVD-LABEL: ctlz_zero_undef_v4i64: |
| ; RVD: # %bb.0: |
| ; RVD-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; RVD-NEXT: vle64.v v8, (a0) |
| ; RVD-NEXT: fsrmi a1, 1 |
| ; RVD-NEXT: vfcvt.f.xu.v v8, v8 |
| ; RVD-NEXT: fsrm a1 |
| ; RVD-NEXT: li a1, 52 |
| ; RVD-NEXT: vsrl.vx v8, v8, a1 |
| ; RVD-NEXT: li a1, 1086 |
| ; RVD-NEXT: vrsub.vx v8, v8, a1 |
| ; RVD-NEXT: vse64.v v8, (a0) |
| ; RVD-NEXT: ret |
| ; |
| ; ZVBB-LABEL: ctlz_zero_undef_v4i64: |
| ; ZVBB: # %bb.0: |
| ; ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma |
| ; ZVBB-NEXT: vle64.v v8, (a0) |
| ; ZVBB-NEXT: vclz.v v8, v8 |
| ; ZVBB-NEXT: vse64.v v8, (a0) |
| ; ZVBB-NEXT: ret |
| %a = load <4 x i64>, ptr %x |
| %b = load <4 x i64>, ptr %y |
| %c = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %a, i1 true) |
| store <4 x i64> %c, ptr %x |
| ret void |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; RV32D: {{.*}} |
| ; RV64D: {{.*}} |