blob: 416b8486487563430958296a40f699045ad501ac [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -force-streaming -enable-subreg-liveness -verify-machineinstrs < %s | FileCheck %s
target triple="aarch64-linux-gnu"
; == Multi, multi (16-bit float) ==
define void @fdot_multi_za32_f16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3) #0 {
; CHECK-LABEL: fdot_multi_za32_f16_vg1x2:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z5.d, z4.d
; CHECK-NEXT: mov z7.d, z2.d
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z4.d, z3.d
; CHECK-NEXT: mov z6.d, z1.d
; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h }
; CHECK-NEXT: fdot za.s[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h }
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3)
ret void
}
define void @fdot_multi_za32_f16_vg1x2_tuple(i64 %stride, ptr %ptr) #0 {
; CHECK-LABEL: fdot_multi_za32_f16_vg1x2_tuple:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x9, x1, x0
; CHECK-NEXT: mov w8, wzr
; CHECK-NEXT: ld1h { z16.h, z24.h }, pn8/z, [x1]
; CHECK-NEXT: ld1h { z17.h, z25.h }, pn8/z, [x9]
; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z16.h, z17.h }, { z24.h, z25.h }
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
%1 = tail call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x2.nxv8f16(target("aarch64.svcount") %0, ptr %ptr)
%2 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } %1, 0
%3 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } %1, 1
%arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
%4 = tail call { <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x2.nxv8f16(target("aarch64.svcount") %0, ptr %arrayidx2)
%5 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } %4, 0
%6 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half> } %4, 1
call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32 0, <vscale x 8 x half> %2, <vscale x 8 x half> %5, <vscale x 8 x half> %3, <vscale x 8 x half> %6)
ret void
}
define void @fdot_multi_za32_f16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
; CHECK-LABEL: fdot_multi_za32_f16_vg1x4:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov z26.d, z7.d
; CHECK-NEXT: mov z25.d, z6.d
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z24.d, z5.d
; CHECK-NEXT: ld1h { z27.h }, p0/z, [x1]
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
; CHECK-NEXT: ret
<vscale x 8 x half> %zn4, <vscale x 8 x half> %zn5, <vscale x 8 x half> %zn6, <vscale x 8 x half> %zn7) #0 {
call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
<vscale x 8 x half> %zn4, <vscale x 8 x half> %zn5, <vscale x 8 x half> %zn6, <vscale x 8 x half> %zn7)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
<vscale x 8 x half> %zn4, <vscale x 8 x half> %zn5, <vscale x 8 x half> %zn6, <vscale x 8 x half> %zn7)
ret void
}
define void @fdot_multi_za32_f16_vg1x4_tuple(i64 %stride, ptr %ptr) #0 {
; CHECK-LABEL: fdot_multi_za32_f16_vg1x4_tuple:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: add x9, x0, x0, lsl #1
; CHECK-NEXT: ptrue pn8.b
; CHECK-NEXT: add x10, x1, x0
; CHECK-NEXT: ld1h { z16.h, z20.h, z24.h, z28.h }, pn8/z, [x1]
; CHECK-NEXT: ld1h { z17.h, z21.h, z25.h, z29.h }, pn8/z, [x10]
; CHECK-NEXT: ld1h { z18.h, z22.h, z26.h, z30.h }, pn8/z, [x1, x0, lsl #1]
; CHECK-NEXT: add x9, x1, x9
; CHECK-NEXT: mov w8, wzr
; CHECK-NEXT: ld1h { z19.h, z23.h, z27.h, z31.h }, pn8/z, [x9]
; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z16.h - z19.h }, { z20.h - z23.h }
; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z24.h - z27.h }, { z28.h - z31.h }
; CHECK-NEXT: ret
entry:
%0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8()
%1 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %0, ptr %ptr)
%2 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %1, 0
%3 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %1, 1
%4 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %1, 2
%5 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %1, 3
%arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride
%6 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %0, ptr %arrayidx2)
%7 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %6, 0
%8 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %6, 1
%9 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %6, 2
%10 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %6, 3
%mul3 = shl i64 %stride, 1
%arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3
%11 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %0, ptr %arrayidx4)
%12 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %11, 0
%13 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %11, 1
%14 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %11, 2
%15 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %11, 3
%mul5 = mul i64 %stride, 3
%arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5
%16 = tail call { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } @llvm.aarch64.sve.ld1.pn.x4.nxv8f16(target("aarch64.svcount") %0, ptr %arrayidx6)
%17 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %16, 0
%18 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %16, 1
%19 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %16, 2
%20 = extractvalue { <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half> } %16, 3
call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 0, <vscale x 8 x half> %2, <vscale x 8 x half> %7, <vscale x 8 x half> %12, <vscale x 8 x half> %17,
<vscale x 8 x half> %3, <vscale x 8 x half> %8, <vscale x 8 x half> %13, <vscale x 8 x half> %18)
call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32 0, <vscale x 8 x half> %4, <vscale x 8 x half> %9, <vscale x 8 x half> %14, <vscale x 8 x half> %19,
<vscale x 8 x half> %5, <vscale x 8 x half> %10, <vscale x 8 x half> %15, <vscale x 8 x half> %20)
ret void
}
; == Multi, multi (16-bit bfloat) ==
define void @bfdot_multi_za32_bf16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3) #0 {
; CHECK-LABEL: bfdot_multi_za32_bf16_vg1x2:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z5.d, z4.d
; CHECK-NEXT: mov z7.d, z2.d
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z4.d, z3.d
; CHECK-NEXT: mov z6.d, z1.d
; CHECK-NEXT: bfdot za.s[w8, 0, vgx2], { z6.h, z7.h }, { z4.h, z5.h }
; CHECK-NEXT: bfdot za.s[w8, 7, vgx2], { z6.h, z7.h }, { z4.h, z5.h }
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3)
ret void
}
define void @fdot_multi_za32_bf16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
; CHECK-LABEL: fdot_multi_za32_bf16_vg1x4:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov z26.d, z7.d
; CHECK-NEXT: mov z25.d, z6.d
; CHECK-NEXT: mov z7.d, z4.d
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z24.d, z5.d
; CHECK-NEXT: ld1h { z27.h }, p0/z, [x1]
; CHECK-NEXT: mov z6.d, z3.d
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: bfdot za.s[w8, 0, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
; CHECK-NEXT: bfdot za.s[w8, 7, vgx4], { z4.h - z7.h }, { z24.h - z27.h }
; CHECK-NEXT: ret
<vscale x 8 x bfloat> %zn4, <vscale x 8 x bfloat> %zn5, <vscale x 8 x bfloat> %zn6, <vscale x 8 x bfloat> %zn7) #0 {
call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
<vscale x 8 x bfloat> %zn4, <vscale x 8 x bfloat> %zn5, <vscale x 8 x bfloat> %zn6, <vscale x 8 x bfloat> %zn7)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
<vscale x 8 x bfloat> %zn4, <vscale x 8 x bfloat> %zn5, <vscale x 8 x bfloat> %zn6, <vscale x 8 x bfloat> %zn7)
ret void
}
; == Multi, single (16-bit float) ==
define void @fdot_single_za32_f16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2) #0 {
; CHECK-LABEL: fdot_single_za32_f16_vg1x2:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h
; CHECK-NEXT: fdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2)
ret void
}
define void @fdot_single_za32_f16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) #0 {
; CHECK-LABEL: fdot_single_za32_f16_vg1x4:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h
; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4)
ret void
}
; == Multi, single (16-bit bfloat) ==
define void @bfdot_single_za32_bf16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2) #0 {
; CHECK-LABEL: bfdot_single_za32_bf16_vg1x2:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: bfdot za.s[w8, 0, vgx2], { z1.h, z2.h }, z3.h
; CHECK-NEXT: bfdot za.s[w8, 7, vgx2], { z1.h, z2.h }, z3.h
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2)
ret void
}
define void @bfdot_single_za32_bf16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) #0 {
; CHECK-LABEL: bfdot_single_za32_bf16_vg1x4:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: bfdot za.s[w8, 0, vgx4], { z1.h - z4.h }, z5.h
; CHECK-NEXT: bfdot za.s[w8, 7, vgx4], { z1.h - z4.h }, z5.h
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4)
ret void
}
; == Multi, indexed (16-bit float) ==
define void @fdot_lane_za32_f16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2) #0 {
; CHECK-LABEL: fdot_lane_za32_f16_vg1x2:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: fdot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3]
; CHECK-NEXT: fdot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3]
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, i32 3)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, i32 3)
ret void
}
define void @fdot_lane_za32_f16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3, <vscale x 8 x half> %zn4) #0 {
; CHECK-LABEL: fdot_lane_za32_f16_vg1x4:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z27.d, z4.d
; CHECK-NEXT: mov z26.d, z3.d
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z25.d, z2.d
; CHECK-NEXT: mov z24.d, z1.d
; CHECK-NEXT: fdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z5.h[3]
; CHECK-NEXT: fdot za.s[w8, 7, vgx4], { z24.h - z27.h }, z5.h[3]
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
<vscale x 8 x half> %zn4, i32 3)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32 %slice2, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3,
<vscale x 8 x half> %zn4, i32 3)
ret void
}
; == Multi, indexed (16-bit bfloat) ==
define void @bfdot_lane_za32_bf16_vg1x2(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2) #0 {
; CHECK-LABEL: bfdot_lane_za32_bf16_vg1x2:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z5.d, z2.d
; CHECK-NEXT: mov z4.d, z1.d
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: bfdot za.s[w8, 0, vgx2], { z4.h, z5.h }, z3.h[3]
; CHECK-NEXT: bfdot za.s[w8, 7, vgx2], { z4.h, z5.h }, z3.h[3]
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, i32 3)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, i32 3)
ret void
}
define void @bfdot_lane_za32_bf16_vg1x4(i32 %slice, <vscale x 16 x i8> %unused, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3, <vscale x 8 x bfloat> %zn4) #0 {
; CHECK-LABEL: bfdot_lane_za32_bf16_vg1x4:
; CHECK: // %bb.0:
; CHECK-NEXT: mov z27.d, z4.d
; CHECK-NEXT: mov z26.d, z3.d
; CHECK-NEXT: mov w8, w0
; CHECK-NEXT: mov z25.d, z2.d
; CHECK-NEXT: mov z24.d, z1.d
; CHECK-NEXT: bfdot za.s[w8, 0, vgx4], { z24.h - z27.h }, z5.h[3]
; CHECK-NEXT: bfdot za.s[w8, 7, vgx4], { z24.h - z27.h }, z5.h[3]
; CHECK-NEXT: ret
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
<vscale x 8 x bfloat> %zn4, i32 3)
%slice2 = add i32 %slice, 7
call void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32 %slice2, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3,
<vscale x 8 x bfloat> %zn4, i32 3)
ret void
}
attributes #0 = { nounwind "target-features"="+sme2" }
; == Multi, multi (16-bit float)
declare void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>,
<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
; == Multi, multi (16-bit bfloat)
declare void @llvm.aarch64.sme.fdot.za32.vg1x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fdot.za32.vg1x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>,
<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
; == Multi, single (16-bit float)
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>)
; == Multi, single (16-bit bfloat)
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
declare void @llvm.aarch64.sme.fdot.single.za32.vg1x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
; == Multi, indexed (16-bit float)
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8f16(i32, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, i32)
; == Multi, indexed (16-bit bfloat)
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x2.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)
declare void @llvm.aarch64.sme.fdot.lane.za32.vg1x4.nxv8bf16(i32, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i32)