| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_256 |
| ; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 |
| ; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -check-prefixes=CHECK,VBITS_GE_512 |
| |
| target triple = "aarch64-unknown-linux-gnu" |
| |
| ; Don't use SVE for 128-bit vectors. |
| define void @sabd_v16i8_v16i16(ptr %a, ptr %b) #0 { |
| ; CHECK-LABEL: sabd_v16i8_v16i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: ldr q1, [x1] |
| ; CHECK-NEXT: sabd v0.16b, v0.16b, v1.16b |
| ; CHECK-NEXT: str q0, [x0] |
| ; CHECK-NEXT: ret |
| %a.ld = load <16 x i8>, ptr %a |
| %b.ld = load <16 x i8>, ptr %b |
| %a.sext = sext <16 x i8> %a.ld to <16 x i16> |
| %b.sext = sext <16 x i8> %b.ld to <16 x i16> |
| %sub = sub <16 x i16> %a.sext, %b.sext |
| %abs = call <16 x i16> @llvm.abs.v16i16(<16 x i16> %sub, i1 true) |
| %trunc = trunc <16 x i16> %abs to <16 x i8> |
| store <16 x i8> %trunc, ptr %a |
| ret void |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define void @sabd_v16i8_v16i32(ptr %a, ptr %b) #0 { |
| ; CHECK-LABEL: sabd_v16i8_v16i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: ldr q1, [x1] |
| ; CHECK-NEXT: sabd v0.16b, v0.16b, v1.16b |
| ; CHECK-NEXT: str q0, [x0] |
| ; CHECK-NEXT: ret |
| %a.ld = load <16 x i8>, ptr %a |
| %b.ld = load <16 x i8>, ptr %b |
| %a.sext = sext <16 x i8> %a.ld to <16 x i32> |
| %b.sext = sext <16 x i8> %b.ld to <16 x i32> |
| %sub = sub <16 x i32> %a.sext, %b.sext |
| %abs = call <16 x i32> @llvm.abs.v16i32(<16 x i32> %sub, i1 true) |
| %trunc = trunc <16 x i32> %abs to <16 x i8> |
| store <16 x i8> %trunc, ptr %a |
| ret void |
| } |
| |
| ; Don't use SVE for 128-bit vectors. |
| define void @sabd_v16i8_v16i64(ptr %a, ptr %b) #0 { |
| ; CHECK-LABEL: sabd_v16i8_v16i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ldr q0, [x0] |
| ; CHECK-NEXT: ldr q1, [x1] |
| ; CHECK-NEXT: sabd v0.16b, v0.16b, v1.16b |
| ; CHECK-NEXT: str q0, [x0] |
| ; CHECK-NEXT: ret |
| %a.ld = load <16 x i8>, ptr %a |
| %b.ld = load <16 x i8>, ptr %b |
| %a.sext = sext <16 x i8> %a.ld to <16 x i64> |
| %b.sext = sext <16 x i8> %b.ld to <16 x i64> |
| %sub = sub <16 x i64> %a.sext, %b.sext |
| %abs = call <16 x i64> @llvm.abs.v16i64(<16 x i64> %sub, i1 true) |
| %trunc = trunc <16 x i64> %abs to <16 x i8> |
| store <16 x i8> %trunc, ptr %a |
| ret void |
| } |
| |
| define void @sabd_v32i8_v32i16(ptr %a, ptr %b) #0 { |
| ; CHECK-LABEL: sabd_v32i8_v32i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl32 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] |
| ; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b |
| ; CHECK-NEXT: st1b { z0.b }, p0, [x0] |
| ; CHECK-NEXT: ret |
| %a.ld = load <32 x i8>, ptr %a |
| %b.ld = load <32 x i8>, ptr %b |
| %a.sext = sext <32 x i8> %a.ld to <32 x i16> |
| %b.sext = sext <32 x i8> %b.ld to <32 x i16> |
| %sub = sub <32 x i16> %a.sext, %b.sext |
| %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true) |
| %trunc = trunc <32 x i16> %abs to <32 x i8> |
| store <32 x i8> %trunc, ptr %a |
| ret void |
| } |
| |
| define void @uabd_v32i8_v32i16(ptr %a, ptr %b) #0 { |
| ; CHECK-LABEL: uabd_v32i8_v32i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl32 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] |
| ; CHECK-NEXT: uabd z0.b, p0/m, z0.b, z1.b |
| ; CHECK-NEXT: st1b { z0.b }, p0, [x0] |
| ; CHECK-NEXT: ret |
| %a.ld = load <32 x i8>, ptr %a |
| %b.ld = load <32 x i8>, ptr %b |
| %a.zext = zext <32 x i8> %a.ld to <32 x i16> |
| %b.zext = zext <32 x i8> %b.ld to <32 x i16> |
| %sub = sub <32 x i16> %a.zext, %b.zext |
| %abs = call <32 x i16> @llvm.abs.v32i16(<32 x i16> %sub, i1 true) |
| %trunc = trunc <32 x i16> %abs to <32 x i8> |
| store <32 x i8> %trunc, ptr %a |
| ret void |
| } |
| |
| define void @sabd_v32i8_v32i32(ptr %a, ptr %b) #0 { |
| ; CHECK-LABEL: sabd_v32i8_v32i32: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl32 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] |
| ; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b |
| ; CHECK-NEXT: st1b { z0.b }, p0, [x0] |
| ; CHECK-NEXT: ret |
| %a.ld = load <32 x i8>, ptr %a |
| %b.ld = load <32 x i8>, ptr %b |
| %a.sext = sext <32 x i8> %a.ld to <32 x i32> |
| %b.sext = sext <32 x i8> %b.ld to <32 x i32> |
| %sub = sub <32 x i32> %a.sext, %b.sext |
| %abs = call <32 x i32> @llvm.abs.v32i32(<32 x i32> %sub, i1 true) |
| %trunc = trunc <32 x i32> %abs to <32 x i8> |
| store <32 x i8> %trunc, ptr %a |
| ret void |
| } |
| |
| define void @sabd_v32i8_v32i64(ptr %a, ptr %b) #0 { |
| ; CHECK-LABEL: sabd_v32i8_v32i64: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.b, vl32 |
| ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; CHECK-NEXT: ld1b { z1.b }, p0/z, [x1] |
| ; CHECK-NEXT: sabd z0.b, p0/m, z0.b, z1.b |
| ; CHECK-NEXT: st1b { z0.b }, p0, [x0] |
| ; CHECK-NEXT: ret |
| %a.ld = load <32 x i8>, ptr %a |
| %b.ld = load <32 x i8>, ptr %b |
| %a.sext = sext <32 x i8> %a.ld to <32 x i64> |
| %b.sext = sext <32 x i8> %b.ld to <32 x i64> |
| %sub = sub <32 x i64> %a.sext, %b.sext |
| %abs = call <32 x i64> @llvm.abs.v32i64(<32 x i64> %sub, i1 true) |
| %trunc = trunc <32 x i64> %abs to <32 x i8> |
| store <32 x i8> %trunc, ptr %a |
| ret void |
| } |
| |
| define void @sabd_v64i8_v64i64(ptr %a, ptr %b) #0 { |
| ; VBITS_GE_256-LABEL: sabd_v64i8_v64i64: |
| ; VBITS_GE_256: // %bb.0: |
| ; VBITS_GE_256-NEXT: ptrue p0.b, vl32 |
| ; VBITS_GE_256-NEXT: mov w8, #32 // =0x20 |
| ; VBITS_GE_256-NEXT: ld1b { z0.b }, p0/z, [x0, x8] |
| ; VBITS_GE_256-NEXT: ld1b { z1.b }, p0/z, [x1, x8] |
| ; VBITS_GE_256-NEXT: ld1b { z2.b }, p0/z, [x0] |
| ; VBITS_GE_256-NEXT: ld1b { z3.b }, p0/z, [x1] |
| ; VBITS_GE_256-NEXT: sabd z0.b, p0/m, z0.b, z1.b |
| ; VBITS_GE_256-NEXT: movprfx z1, z2 |
| ; VBITS_GE_256-NEXT: sabd z1.b, p0/m, z1.b, z3.b |
| ; VBITS_GE_256-NEXT: st1b { z0.b }, p0, [x0, x8] |
| ; VBITS_GE_256-NEXT: st1b { z1.b }, p0, [x0] |
| ; VBITS_GE_256-NEXT: ret |
| ; |
| ; VBITS_GE_512-LABEL: sabd_v64i8_v64i64: |
| ; VBITS_GE_512: // %bb.0: |
| ; VBITS_GE_512-NEXT: ptrue p0.b, vl64 |
| ; VBITS_GE_512-NEXT: ld1b { z0.b }, p0/z, [x0] |
| ; VBITS_GE_512-NEXT: ld1b { z1.b }, p0/z, [x1] |
| ; VBITS_GE_512-NEXT: sabd z0.b, p0/m, z0.b, z1.b |
| ; VBITS_GE_512-NEXT: st1b { z0.b }, p0, [x0] |
| ; VBITS_GE_512-NEXT: ret |
| %a.ld = load <64 x i8>, ptr %a |
| %b.ld = load <64 x i8>, ptr %b |
| %a.sext = sext <64 x i8> %a.ld to <64 x i64> |
| %b.sext = sext <64 x i8> %b.ld to <64 x i64> |
| %sub = sub <64 x i64> %a.sext, %b.sext |
| %abs = call <64 x i64> @llvm.abs.v64i64(<64 x i64> %sub, i1 true) |
| %trunc = trunc <64 x i64> %abs to <64 x i8> |
| store <64 x i8> %trunc, ptr %a |
| ret void |
| } |
| |
| attributes #0 = { "target-features"="+neon,+sve" } |