| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s --check-prefixes=CHECK,SSE2 |
| ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,F16,BF16 |
| ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avx512bf16,avx512fp16,avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,F16,FP16 |
| ; RUN: llc < %s -mtriple=x86_64-linux-gnu -mattr=avxneconvert,f16c | FileCheck %s --check-prefixes=CHECK,AVX,AVXNC |
| |
| define void @add(ptr %pa, ptr %pb, ptr %pc) nounwind { |
| ; SSE2-LABEL: add: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: movq %rdx, %rbx |
| ; SSE2-NEXT: movzwl (%rsi), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: movzwl (%rdi), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movw %ax, (%rbx) |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: add: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: pushq %rbx |
| ; AVX-NEXT: movq %rdx, %rbx |
| ; AVX-NEXT: movzwl (%rsi), %eax |
| ; AVX-NEXT: shll $16, %eax |
| ; AVX-NEXT: vmovd %eax, %xmm0 |
| ; AVX-NEXT: movzwl (%rdi), %eax |
| ; AVX-NEXT: shll $16, %eax |
| ; AVX-NEXT: vmovd %eax, %xmm1 |
| ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVX-NEXT: callq __truncsfbf2@PLT |
| ; AVX-NEXT: vmovd %xmm0, %eax |
| ; AVX-NEXT: movw %ax, (%rbx) |
| ; AVX-NEXT: popq %rbx |
| ; AVX-NEXT: retq |
| %a = load bfloat, ptr %pa |
| %b = load bfloat, ptr %pb |
| %add = fadd bfloat %a, %b |
| store bfloat %add, ptr %pc |
| ret void |
| } |
| |
| define bfloat @add2(bfloat %a, bfloat %b) nounwind { |
| ; SSE2-LABEL: add2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rax |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movd %xmm1, %ecx |
| ; SSE2-NEXT: shll $16, %ecx |
| ; SSE2-NEXT: movd %ecx, %xmm1 |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: popq %rax |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: add2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: pushq %rax |
| ; AVX-NEXT: vmovd %xmm0, %eax |
| ; AVX-NEXT: vmovd %xmm1, %ecx |
| ; AVX-NEXT: shll $16, %ecx |
| ; AVX-NEXT: vmovd %ecx, %xmm0 |
| ; AVX-NEXT: shll $16, %eax |
| ; AVX-NEXT: vmovd %eax, %xmm1 |
| ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVX-NEXT: callq __truncsfbf2@PLT |
| ; AVX-NEXT: popq %rax |
| ; AVX-NEXT: retq |
| %add = fadd bfloat %a, %b |
| ret bfloat %add |
| } |
| |
| define void @add_double(ptr %pa, ptr %pb, ptr %pc) nounwind { |
| ; SSE2-LABEL: add_double: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbp |
| ; SSE2-NEXT: pushq %r14 |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: movq %rdx, %rbx |
| ; SSE2-NEXT: movq %rsi, %r14 |
| ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movd %ebp, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: cvtss2sd %xmm0, %xmm0 |
| ; SSE2-NEXT: movsd %xmm0, (%rbx) |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: popq %r14 |
| ; SSE2-NEXT: popq %rbp |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: add_double: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: pushq %rbp |
| ; AVX-NEXT: pushq %r14 |
| ; AVX-NEXT: pushq %rbx |
| ; AVX-NEXT: movq %rdx, %rbx |
| ; AVX-NEXT: movq %rsi, %r14 |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX-NEXT: callq __truncdfbf2@PLT |
| ; AVX-NEXT: vmovd %xmm0, %ebp |
| ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero |
| ; AVX-NEXT: callq __truncdfbf2@PLT |
| ; AVX-NEXT: vmovd %xmm0, %eax |
| ; AVX-NEXT: shll $16, %eax |
| ; AVX-NEXT: vmovd %eax, %xmm0 |
| ; AVX-NEXT: shll $16, %ebp |
| ; AVX-NEXT: vmovd %ebp, %xmm1 |
| ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVX-NEXT: callq __truncsfbf2@PLT |
| ; AVX-NEXT: vmovd %xmm0, %eax |
| ; AVX-NEXT: shll $16, %eax |
| ; AVX-NEXT: vmovd %eax, %xmm0 |
| ; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 |
| ; AVX-NEXT: vmovsd %xmm0, (%rbx) |
| ; AVX-NEXT: popq %rbx |
| ; AVX-NEXT: popq %r14 |
| ; AVX-NEXT: popq %rbp |
| ; AVX-NEXT: retq |
| %la = load double, ptr %pa |
| %a = fptrunc double %la to bfloat |
| %lb = load double, ptr %pb |
| %b = fptrunc double %lb to bfloat |
| %add = fadd bfloat %a, %b |
| %dadd = fpext bfloat %add to double |
| store double %dadd, ptr %pc |
| ret void |
| } |
| |
| define double @add_double2(double %da, double %db) nounwind { |
| ; SSE2-LABEL: add_double2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: subq $16, %rsp |
| ; SSE2-NEXT: movsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd %ebx, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: cvtss2sd %xmm0, %xmm0 |
| ; SSE2-NEXT: addq $16, %rsp |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: add_double2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: pushq %rbx |
| ; AVX-NEXT: subq $16, %rsp |
| ; AVX-NEXT: vmovsd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX-NEXT: callq __truncdfbf2@PLT |
| ; AVX-NEXT: vmovd %xmm0, %ebx |
| ; AVX-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload |
| ; AVX-NEXT: # xmm0 = mem[0],zero |
| ; AVX-NEXT: callq __truncdfbf2@PLT |
| ; AVX-NEXT: vmovd %xmm0, %eax |
| ; AVX-NEXT: shll $16, %eax |
| ; AVX-NEXT: vmovd %eax, %xmm0 |
| ; AVX-NEXT: shll $16, %ebx |
| ; AVX-NEXT: vmovd %ebx, %xmm1 |
| ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVX-NEXT: callq __truncsfbf2@PLT |
| ; AVX-NEXT: vmovd %xmm0, %eax |
| ; AVX-NEXT: shll $16, %eax |
| ; AVX-NEXT: vmovd %eax, %xmm0 |
| ; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 |
| ; AVX-NEXT: addq $16, %rsp |
| ; AVX-NEXT: popq %rbx |
| ; AVX-NEXT: retq |
| %a = fptrunc double %da to bfloat |
| %b = fptrunc double %db to bfloat |
| %add = fadd bfloat %a, %b |
| %dadd = fpext bfloat %add to double |
| ret double %dadd |
| } |
| |
| define void @add_constant(ptr %pa, ptr %pc) nounwind { |
| ; SSE2-LABEL: add_constant: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: movq %rsi, %rbx |
| ; SSE2-NEXT: movzwl (%rdi), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movw %ax, (%rbx) |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: add_constant: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: pushq %rbx |
| ; AVX-NEXT: movq %rsi, %rbx |
| ; AVX-NEXT: movzwl (%rdi), %eax |
| ; AVX-NEXT: shll $16, %eax |
| ; AVX-NEXT: vmovd %eax, %xmm0 |
| ; AVX-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: callq __truncsfbf2@PLT |
| ; AVX-NEXT: vmovd %xmm0, %eax |
| ; AVX-NEXT: movw %ax, (%rbx) |
| ; AVX-NEXT: popq %rbx |
| ; AVX-NEXT: retq |
| %a = load bfloat, ptr %pa |
| %add = fadd bfloat %a, 1.0 |
| store bfloat %add, ptr %pc |
| ret void |
| } |
| |
| define bfloat @add_constant2(bfloat %a) nounwind { |
| ; SSE2-LABEL: add_constant2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rax |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: popq %rax |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: add_constant2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: pushq %rax |
| ; AVX-NEXT: vmovd %xmm0, %eax |
| ; AVX-NEXT: shll $16, %eax |
| ; AVX-NEXT: vmovd %eax, %xmm0 |
| ; AVX-NEXT: vaddss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX-NEXT: callq __truncsfbf2@PLT |
| ; AVX-NEXT: popq %rax |
| ; AVX-NEXT: retq |
| %add = fadd bfloat %a, 1.0 |
| ret bfloat %add |
| } |
| |
| define void @store_constant(ptr %pc) nounwind { |
| ; CHECK-LABEL: store_constant: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movw $16256, (%rdi) # imm = 0x3F80 |
| ; CHECK-NEXT: retq |
| store bfloat 1.0, ptr %pc |
| ret void |
| } |
| |
| define void @fold_ext_trunc(ptr %pa, ptr %pc) nounwind { |
| ; CHECK-LABEL: fold_ext_trunc: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movzwl (%rdi), %eax |
| ; CHECK-NEXT: movw %ax, (%rsi) |
| ; CHECK-NEXT: retq |
| %a = load bfloat, ptr %pa |
| %ext = fpext bfloat %a to float |
| %trunc = fptrunc float %ext to bfloat |
| store bfloat %trunc, ptr %pc |
| ret void |
| } |
| |
| define bfloat @fold_ext_trunc2(bfloat %a) nounwind { |
| ; CHECK-LABEL: fold_ext_trunc2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: retq |
| %ext = fpext bfloat %a to float |
| %trunc = fptrunc float %ext to bfloat |
| ret bfloat %trunc |
| } |
| |
| define <8 x bfloat> @addv(<8 x bfloat> %a, <8 x bfloat> %b) nounwind { |
| ; SSE2-LABEL: addv: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbp |
| ; SSE2-NEXT: pushq %r15 |
| ; SSE2-NEXT: pushq %r14 |
| ; SSE2-NEXT: pushq %r13 |
| ; SSE2-NEXT: pushq %r12 |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: subq $56, %rsp |
| ; SSE2-NEXT: movq %xmm0, %rcx |
| ; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq %rcx, %rax |
| ; SSE2-NEXT: shrq $32, %rax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq %xmm1, %rdx |
| ; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq %rdx, %rax |
| ; SSE2-NEXT: shrq $32, %rax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq %rcx, %rax |
| ; SSE2-NEXT: shrq $48, %rax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq %rdx, %rax |
| ; SSE2-NEXT: shrq $48, %rax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: movq %xmm0, %r12 |
| ; SSE2-NEXT: movq %r12, %rax |
| ; SSE2-NEXT: shrq $32, %rax |
| ; SSE2-NEXT: movq %rax, (%rsp) # 8-byte Spill |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1] |
| ; SSE2-NEXT: movq %xmm1, %r14 |
| ; SSE2-NEXT: movq %r14, %rbp |
| ; SSE2-NEXT: shrq $32, %rbp |
| ; SSE2-NEXT: movq %r12, %r15 |
| ; SSE2-NEXT: shrq $48, %r15 |
| ; SSE2-NEXT: movq %r14, %r13 |
| ; SSE2-NEXT: shrq $48, %r13 |
| ; SSE2-NEXT: movl %r14d, %eax |
| ; SSE2-NEXT: andl $-65536, %eax # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: movl %r12d, %eax |
| ; SSE2-NEXT: andl $-65536, %eax # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: shll $16, %r14d |
| ; SSE2-NEXT: movd %r14d, %xmm1 |
| ; SSE2-NEXT: shll $16, %r12d |
| ; SSE2-NEXT: movd %r12d, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r12d |
| ; SSE2-NEXT: orl %ebx, %r12d |
| ; SSE2-NEXT: shll $16, %r13d |
| ; SSE2-NEXT: movd %r13d, %xmm1 |
| ; SSE2-NEXT: shll $16, %r15d |
| ; SSE2-NEXT: movd %r15d, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %r14d |
| ; SSE2-NEXT: shll $16, %r14d |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movd %ebp, %xmm1 |
| ; SSE2-NEXT: movq (%rsp), %rax # 8-byte Reload |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %ebx |
| ; SSE2-NEXT: orl %r14d, %ebx |
| ; SSE2-NEXT: shlq $32, %rbx |
| ; SSE2-NEXT: orq %r12, %rbx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; SSE2-NEXT: movl %r15d, %eax |
| ; SSE2-NEXT: andl $-65536, %eax # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; SSE2-NEXT: movl %r14d, %eax |
| ; SSE2-NEXT: andl $-65536, %eax # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movq %r15, %rax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: movq %r14, %rax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebp, %r14d |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: addss %xmm1, %xmm0 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebp, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm0 |
| ; SSE2-NEXT: movq %rbx, %xmm1 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: addq $56, %rsp |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: popq %r12 |
| ; SSE2-NEXT: popq %r13 |
| ; SSE2-NEXT: popq %r14 |
| ; SSE2-NEXT: popq %r15 |
| ; SSE2-NEXT: popq %rbp |
| ; SSE2-NEXT: retq |
| ; |
| ; BF16-LABEL: addv: |
| ; BF16: # %bb.0: |
| ; BF16-NEXT: pushq %rbp |
| ; BF16-NEXT: pushq %r15 |
| ; BF16-NEXT: pushq %r14 |
| ; BF16-NEXT: pushq %r13 |
| ; BF16-NEXT: pushq %r12 |
| ; BF16-NEXT: pushq %rbx |
| ; BF16-NEXT: subq $40, %rsp |
| ; BF16-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill |
| ; BF16-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; BF16-NEXT: vpextrw $7, %xmm1, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm2 |
| ; BF16-NEXT: vpextrw $7, %xmm0, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm1 |
| ; BF16-NEXT: vaddss %xmm2, %xmm1, %xmm0 |
| ; BF16-NEXT: callq __truncsfbf2@PLT |
| ; BF16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; BF16-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $6, %xmm0, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm0 |
| ; BF16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $6, %xmm1, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm1 |
| ; BF16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; BF16-NEXT: callq __truncsfbf2@PLT |
| ; BF16-NEXT: vmovd %xmm0, %ebp |
| ; BF16-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $5, %xmm0, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm0 |
| ; BF16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $5, %xmm1, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm1 |
| ; BF16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; BF16-NEXT: callq __truncsfbf2@PLT |
| ; BF16-NEXT: vmovd %xmm0, %r14d |
| ; BF16-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $4, %xmm0, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm0 |
| ; BF16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $4, %xmm1, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm1 |
| ; BF16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; BF16-NEXT: callq __truncsfbf2@PLT |
| ; BF16-NEXT: vmovd %xmm0, %r15d |
| ; BF16-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $3, %xmm0, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm0 |
| ; BF16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $3, %xmm1, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm1 |
| ; BF16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; BF16-NEXT: callq __truncsfbf2@PLT |
| ; BF16-NEXT: vmovd %xmm0, %r12d |
| ; BF16-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $2, %xmm0, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm0 |
| ; BF16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $2, %xmm1, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm1 |
| ; BF16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; BF16-NEXT: callq __truncsfbf2@PLT |
| ; BF16-NEXT: vmovd %xmm0, %r13d |
| ; BF16-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $1, %xmm0, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm0 |
| ; BF16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; BF16-NEXT: vpextrw $1, %xmm1, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm1 |
| ; BF16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; BF16-NEXT: callq __truncsfbf2@PLT |
| ; BF16-NEXT: vmovd %xmm0, %ebx |
| ; BF16-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; BF16-NEXT: vmovd %xmm0, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm0 |
| ; BF16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; BF16-NEXT: vmovd %xmm1, %eax |
| ; BF16-NEXT: shll $16, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm1 |
| ; BF16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; BF16-NEXT: callq __truncsfbf2@PLT |
| ; BF16-NEXT: vmovd %xmm0, %eax |
| ; BF16-NEXT: vmovd %eax, %xmm0 |
| ; BF16-NEXT: vpinsrw $1, %ebx, %xmm0, %xmm0 |
| ; BF16-NEXT: vpinsrw $2, %r13d, %xmm0, %xmm0 |
| ; BF16-NEXT: vpinsrw $3, %r12d, %xmm0, %xmm0 |
| ; BF16-NEXT: vpinsrw $4, %r15d, %xmm0, %xmm0 |
| ; BF16-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 |
| ; BF16-NEXT: vpinsrw $6, %ebp, %xmm0, %xmm0 |
| ; BF16-NEXT: vpinsrw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload |
| ; BF16-NEXT: addq $40, %rsp |
| ; BF16-NEXT: popq %rbx |
| ; BF16-NEXT: popq %r12 |
| ; BF16-NEXT: popq %r13 |
| ; BF16-NEXT: popq %r14 |
| ; BF16-NEXT: popq %r15 |
| ; BF16-NEXT: popq %rbp |
| ; BF16-NEXT: retq |
| ; |
| ; FP16-LABEL: addv: |
| ; FP16: # %bb.0: |
| ; FP16-NEXT: pushq %rbp |
| ; FP16-NEXT: pushq %r15 |
| ; FP16-NEXT: pushq %r14 |
| ; FP16-NEXT: pushq %r13 |
| ; FP16-NEXT: pushq %r12 |
| ; FP16-NEXT: pushq %rbx |
| ; FP16-NEXT: subq $40, %rsp |
| ; FP16-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; FP16-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill |
| ; FP16-NEXT: vmovw %xmm1, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm2 |
| ; FP16-NEXT: vmovw %xmm0, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm1 |
| ; FP16-NEXT: vaddss %xmm2, %xmm1, %xmm0 |
| ; FP16-NEXT: callq __truncsfbf2@PLT |
| ; FP16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $7, %xmm0, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm0 |
| ; FP16-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $7, %xmm1, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm1 |
| ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; FP16-NEXT: callq __truncsfbf2@PLT |
| ; FP16-NEXT: vmovd %xmm0, %ebp |
| ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $6, %xmm0, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm0 |
| ; FP16-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $6, %xmm1, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm1 |
| ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; FP16-NEXT: callq __truncsfbf2@PLT |
| ; FP16-NEXT: vmovd %xmm0, %r14d |
| ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $5, %xmm0, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm0 |
| ; FP16-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $5, %xmm1, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm1 |
| ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; FP16-NEXT: callq __truncsfbf2@PLT |
| ; FP16-NEXT: vmovd %xmm0, %r15d |
| ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $4, %xmm0, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm0 |
| ; FP16-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $4, %xmm1, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm1 |
| ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; FP16-NEXT: callq __truncsfbf2@PLT |
| ; FP16-NEXT: vmovd %xmm0, %r12d |
| ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $3, %xmm0, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm0 |
| ; FP16-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $3, %xmm1, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm1 |
| ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; FP16-NEXT: callq __truncsfbf2@PLT |
| ; FP16-NEXT: vmovd %xmm0, %r13d |
| ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $2, %xmm0, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm0 |
| ; FP16-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $2, %xmm1, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm1 |
| ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; FP16-NEXT: callq __truncsfbf2@PLT |
| ; FP16-NEXT: vmovd %xmm0, %ebx |
| ; FP16-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $1, %xmm0, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm0 |
| ; FP16-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload |
| ; FP16-NEXT: vpextrw $1, %xmm1, %eax |
| ; FP16-NEXT: shll $16, %eax |
| ; FP16-NEXT: vmovd %eax, %xmm1 |
| ; FP16-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; FP16-NEXT: callq __truncsfbf2@PLT |
| ; FP16-NEXT: vmovd %xmm0, %eax |
| ; FP16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; FP16-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; FP16-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 |
| ; FP16-NEXT: vpinsrw $2, %ebx, %xmm0, %xmm0 |
| ; FP16-NEXT: vpinsrw $3, %r13d, %xmm0, %xmm0 |
| ; FP16-NEXT: vpinsrw $4, %r12d, %xmm0, %xmm0 |
| ; FP16-NEXT: vpinsrw $5, %r15d, %xmm0, %xmm0 |
| ; FP16-NEXT: vpinsrw $6, %r14d, %xmm0, %xmm0 |
| ; FP16-NEXT: vpinsrw $7, %ebp, %xmm0, %xmm0 |
| ; FP16-NEXT: addq $40, %rsp |
| ; FP16-NEXT: popq %rbx |
| ; FP16-NEXT: popq %r12 |
| ; FP16-NEXT: popq %r13 |
| ; FP16-NEXT: popq %r14 |
| ; FP16-NEXT: popq %r15 |
| ; FP16-NEXT: popq %rbp |
| ; FP16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: addv: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: pushq %rbp |
| ; AVXNC-NEXT: pushq %r15 |
| ; AVXNC-NEXT: pushq %r14 |
| ; AVXNC-NEXT: pushq %r13 |
| ; AVXNC-NEXT: pushq %r12 |
| ; AVXNC-NEXT: pushq %rbx |
| ; AVXNC-NEXT: subq $40, %rsp |
| ; AVXNC-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill |
| ; AVXNC-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVXNC-NEXT: vpextrw $7, %xmm1, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm2 |
| ; AVXNC-NEXT: vpextrw $7, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vaddss %xmm2, %xmm1, %xmm0 |
| ; AVXNC-NEXT: callq __truncsfbf2@PLT |
| ; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $6, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $6, %xmm1, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVXNC-NEXT: callq __truncsfbf2@PLT |
| ; AVXNC-NEXT: vmovd %xmm0, %ebp |
| ; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $5, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $5, %xmm1, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVXNC-NEXT: callq __truncsfbf2@PLT |
| ; AVXNC-NEXT: vmovd %xmm0, %r14d |
| ; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $4, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $4, %xmm1, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVXNC-NEXT: callq __truncsfbf2@PLT |
| ; AVXNC-NEXT: vmovd %xmm0, %r15d |
| ; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $3, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $3, %xmm1, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVXNC-NEXT: callq __truncsfbf2@PLT |
| ; AVXNC-NEXT: vmovd %xmm0, %r12d |
| ; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $2, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $2, %xmm1, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVXNC-NEXT: callq __truncsfbf2@PLT |
| ; AVXNC-NEXT: vmovd %xmm0, %r13d |
| ; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $1, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVXNC-NEXT: vpextrw $1, %xmm1, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVXNC-NEXT: callq __truncsfbf2@PLT |
| ; AVXNC-NEXT: vmovd %xmm0, %ebx |
| ; AVXNC-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload |
| ; AVXNC-NEXT: vmovd %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; AVXNC-NEXT: vmovd %xmm1, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vaddss %xmm0, %xmm1, %xmm0 |
| ; AVXNC-NEXT: callq __truncsfbf2@PLT |
| ; AVXNC-NEXT: vmovd %xmm0, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $1, %ebx, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $2, %r13d, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $3, %r12d, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $4, %r15d, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $6, %ebp, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload |
| ; AVXNC-NEXT: addq $40, %rsp |
| ; AVXNC-NEXT: popq %rbx |
| ; AVXNC-NEXT: popq %r12 |
| ; AVXNC-NEXT: popq %r13 |
| ; AVXNC-NEXT: popq %r14 |
| ; AVXNC-NEXT: popq %r15 |
| ; AVXNC-NEXT: popq %rbp |
| ; AVXNC-NEXT: retq |
| %add = fadd <8 x bfloat> %a, %b |
| ret <8 x bfloat> %add |
| } |
| |
| define <2 x bfloat> @pr62997(bfloat %a, bfloat %b) { |
| ; SSE2-LABEL: pr62997: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movd %xmm1, %ecx |
| ; SSE2-NEXT: pinsrw $0, %ecx, %xmm1 |
| ; SSE2-NEXT: pinsrw $0, %eax, %xmm0 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: pr62997: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovd %xmm1, %eax |
| ; AVX-NEXT: vmovd %xmm0, %ecx |
| ; AVX-NEXT: vmovd %ecx, %xmm0 |
| ; AVX-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %1 = insertelement <2 x bfloat> undef, bfloat %a, i64 0 |
| %2 = insertelement <2 x bfloat> %1, bfloat %b, i64 1 |
| ret <2 x bfloat> %2 |
| } |
| |
| define <32 x bfloat> @pr63017() { |
| ; SSE2-LABEL: pr63017: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: xorps %xmm0, %xmm0 |
| ; SSE2-NEXT: xorps %xmm1, %xmm1 |
| ; SSE2-NEXT: xorps %xmm2, %xmm2 |
| ; SSE2-NEXT: xorps %xmm3, %xmm3 |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: pr63017: |
| ; F16: # %bb.0: |
| ; F16-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: pr63017: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: vxorps %xmm0, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; AVXNC-NEXT: retq |
| ret <32 x bfloat> zeroinitializer |
| } |
| |
| define <32 x bfloat> @pr63017_2() nounwind { |
| ; SSE2-LABEL: pr63017_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %r14 |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: subq $200, %rsp |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_1 |
| ; SSE2-NEXT: # %bb.2: # %cond.load |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: movdqa %xmm0, %xmm1 |
| ; SSE2-NEXT: jmp .LBB12_3 |
| ; SSE2-NEXT: .LBB12_1: |
| ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: .LBB12_3: # %else |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_5 |
| ; SSE2-NEXT: # %bb.4: # %cond.load1 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: .LBB12_5: # %else2 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_6 |
| ; SSE2-NEXT: # %bb.7: # %cond.load4 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movdqa %xmm1, %xmm14 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm15 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm12 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm13 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm10 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm11 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm8 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm9 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm6 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm7 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm5 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: jmp .LBB12_8 |
| ; SSE2-NEXT: .LBB12_6: |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movdqa %xmm1, %xmm14 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm15 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm12 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm13 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm10 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm11 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm8 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm9 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm6 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm7 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm5 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE2-NEXT: .LBB12_8: # %else5 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_10 |
| ; SSE2-NEXT: # %bb.9: # %cond.load7 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_10: # %else8 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_12 |
| ; SSE2-NEXT: # %bb.11: # %cond.load10 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_12: # %else11 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_14 |
| ; SSE2-NEXT: # %bb.13: # %cond.load13 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_14: # %else14 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_16 |
| ; SSE2-NEXT: # %bb.15: # %cond.load16 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_16: # %else17 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_18 |
| ; SSE2-NEXT: # %bb.17: # %cond.load19 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_18: # %else20 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_20 |
| ; SSE2-NEXT: # %bb.19: # %cond.load22 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_20: # %else23 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_22 |
| ; SSE2-NEXT: # %bb.21: # %cond.load25 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_22: # %else26 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_24 |
| ; SSE2-NEXT: # %bb.23: # %cond.load28 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_24: # %else29 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_26 |
| ; SSE2-NEXT: # %bb.25: # %cond.load31 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_26: # %else32 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_28 |
| ; SSE2-NEXT: # %bb.27: # %cond.load34 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_28: # %else35 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_30 |
| ; SSE2-NEXT: # %bb.29: # %cond.load37 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_30: # %else38 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_32 |
| ; SSE2-NEXT: # %bb.31: # %cond.load40 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_32: # %else41 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_34 |
| ; SSE2-NEXT: # %bb.33: # %cond.load43 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_34: # %else44 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_36 |
| ; SSE2-NEXT: # %bb.35: # %cond.load46 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_36: # %else47 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_38 |
| ; SSE2-NEXT: # %bb.37: # %cond.load49 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: .LBB12_38: # %else50 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_40 |
| ; SSE2-NEXT: # %bb.39: # %cond.load52 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm14 |
| ; SSE2-NEXT: .LBB12_40: # %else53 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_42 |
| ; SSE2-NEXT: # %bb.41: # %cond.load55 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm15 |
| ; SSE2-NEXT: .LBB12_42: # %else56 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_44 |
| ; SSE2-NEXT: # %bb.43: # %cond.load58 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm12 |
| ; SSE2-NEXT: .LBB12_44: # %else59 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_46 |
| ; SSE2-NEXT: # %bb.45: # %cond.load61 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm13 |
| ; SSE2-NEXT: .LBB12_46: # %else62 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_48 |
| ; SSE2-NEXT: # %bb.47: # %cond.load64 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm10 |
| ; SSE2-NEXT: .LBB12_48: # %else65 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_50 |
| ; SSE2-NEXT: # %bb.49: # %cond.load67 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm11 |
| ; SSE2-NEXT: .LBB12_50: # %else68 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_52 |
| ; SSE2-NEXT: # %bb.51: # %cond.load70 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm8 |
| ; SSE2-NEXT: .LBB12_52: # %else71 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_54 |
| ; SSE2-NEXT: # %bb.53: # %cond.load73 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm9 |
| ; SSE2-NEXT: .LBB12_54: # %else74 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_56 |
| ; SSE2-NEXT: # %bb.55: # %cond.load76 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm6 |
| ; SSE2-NEXT: .LBB12_56: # %else77 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_58 |
| ; SSE2-NEXT: # %bb.57: # %cond.load79 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm7 |
| ; SSE2-NEXT: .LBB12_58: # %else80 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_60 |
| ; SSE2-NEXT: # %bb.59: # %cond.load82 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm4 |
| ; SSE2-NEXT: .LBB12_60: # %else83 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_62 |
| ; SSE2-NEXT: # %bb.61: # %cond.load85 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm5 |
| ; SSE2-NEXT: .LBB12_62: # %else86 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: jne .LBB12_64 |
| ; SSE2-NEXT: # %bb.63: # %cond.load88 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm2 |
| ; SSE2-NEXT: .LBB12_64: # %else89 |
| ; SSE2-NEXT: xorl %eax, %eax |
| ; SSE2-NEXT: testb %al, %al |
| ; SSE2-NEXT: movd %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: movd %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: jne .LBB12_65 |
| ; SSE2-NEXT: # %bb.66: # %cond.load91 |
| ; SSE2-NEXT: movzwl (%rax), %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: jmp .LBB12_67 |
| ; SSE2-NEXT: .LBB12_65: |
| ; SSE2-NEXT: movd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill |
| ; SSE2-NEXT: .LBB12_67: # %else92 |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebx, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebx, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm0 |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebx, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebx, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm0 |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebx, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebx, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm0 |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebx, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm0 |
| ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebx, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm0 |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0] |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload |
| ; SSE2-NEXT: addq $200, %rsp |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: popq %r14 |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: pr63017_2: |
| ; F16: # %bb.0: |
| ; F16-NEXT: vpbroadcastw {{.*#+}} zmm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024] |
| ; F16-NEXT: vmovdqu16 (%rax), %zmm0 {%k1} |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: pr63017_2: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: vpbroadcastw {{.*#+}} ymm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024] |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: vmovdqa %ymm0, %ymm1 |
| ; AVXNC-NEXT: jne .LBB12_2 |
| ; AVXNC-NEXT: # %bb.1: # %cond.load |
| ; AVXNC-NEXT: vpbroadcastw {{.*#+}} ymm1 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024] |
| ; AVXNC-NEXT: vpbroadcastw {{.*#+}} ymm0 = [49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024,49024] |
| ; AVXNC-NEXT: vpinsrw $0, (%rax), %xmm0, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_2: # %else |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_4 |
| ; AVXNC-NEXT: # %bb.3: # %cond.load1 |
| ; AVXNC-NEXT: vpinsrw $1, (%rax), %xmm0, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_4: # %else2 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_6 |
| ; AVXNC-NEXT: # %bb.5: # %cond.load4 |
| ; AVXNC-NEXT: vpinsrw $2, (%rax), %xmm0, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_6: # %else5 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_8 |
| ; AVXNC-NEXT: # %bb.7: # %cond.load7 |
| ; AVXNC-NEXT: vpinsrw $3, (%rax), %xmm0, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_8: # %else8 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_10 |
| ; AVXNC-NEXT: # %bb.9: # %cond.load10 |
| ; AVXNC-NEXT: vpinsrw $4, (%rax), %xmm0, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_10: # %else11 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_12 |
| ; AVXNC-NEXT: # %bb.11: # %cond.load13 |
| ; AVXNC-NEXT: vpinsrw $5, (%rax), %xmm0, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_12: # %else14 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_14 |
| ; AVXNC-NEXT: # %bb.13: # %cond.load16 |
| ; AVXNC-NEXT: vpinsrw $6, (%rax), %xmm0, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_14: # %else17 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_16 |
| ; AVXNC-NEXT: # %bb.15: # %cond.load19 |
| ; AVXNC-NEXT: vpinsrw $7, (%rax), %xmm0, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_16: # %else20 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_18 |
| ; AVXNC-NEXT: # %bb.17: # %cond.load22 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm0[1,2,3,4,5,6,7],ymm2[8],ymm0[9,10,11,12,13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_18: # %else23 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_20 |
| ; AVXNC-NEXT: # %bb.19: # %cond.load25 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2,3,4,5,6,7,8],ymm2[9],ymm0[10,11,12,13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_20: # %else26 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_22 |
| ; AVXNC-NEXT: # %bb.21: # %cond.load28 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm2[2],ymm0[3,4,5,6,7,8,9],ymm2[10],ymm0[11,12,13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_22: # %else29 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_24 |
| ; AVXNC-NEXT: # %bb.23: # %cond.load31 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2],ymm2[3],ymm0[4,5,6,7,8,9,10],ymm2[11],ymm0[12,13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_24: # %else32 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_26 |
| ; AVXNC-NEXT: # %bb.25: # %cond.load34 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm2[4],ymm0[5,6,7,8,9,10,11],ymm2[12],ymm0[13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_26: # %else35 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_28 |
| ; AVXNC-NEXT: # %bb.27: # %cond.load37 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4],ymm2[5],ymm0[6,7,8,9,10,11,12],ymm2[13],ymm0[14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_28: # %else38 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_30 |
| ; AVXNC-NEXT: # %bb.29: # %cond.load40 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5],ymm2[6],ymm0[7,8,9,10,11,12,13],ymm2[14],ymm0[15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_30: # %else41 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_32 |
| ; AVXNC-NEXT: # %bb.31: # %cond.load43 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1,2,3,4,5,6],ymm2[7],ymm0[8,9,10,11,12,13,14],ymm2[15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_32: # %else44 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_34 |
| ; AVXNC-NEXT: # %bb.33: # %cond.load46 |
| ; AVXNC-NEXT: vpinsrw $0, (%rax), %xmm1, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_34: # %else47 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_36 |
| ; AVXNC-NEXT: # %bb.35: # %cond.load49 |
| ; AVXNC-NEXT: vpinsrw $1, (%rax), %xmm1, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_36: # %else50 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_38 |
| ; AVXNC-NEXT: # %bb.37: # %cond.load52 |
| ; AVXNC-NEXT: vpinsrw $2, (%rax), %xmm1, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_38: # %else53 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_40 |
| ; AVXNC-NEXT: # %bb.39: # %cond.load55 |
| ; AVXNC-NEXT: vpinsrw $3, (%rax), %xmm1, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_40: # %else56 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_42 |
| ; AVXNC-NEXT: # %bb.41: # %cond.load58 |
| ; AVXNC-NEXT: vpinsrw $4, (%rax), %xmm1, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_42: # %else59 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_44 |
| ; AVXNC-NEXT: # %bb.43: # %cond.load61 |
| ; AVXNC-NEXT: vpinsrw $5, (%rax), %xmm1, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_44: # %else62 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_46 |
| ; AVXNC-NEXT: # %bb.45: # %cond.load64 |
| ; AVXNC-NEXT: vpinsrw $6, (%rax), %xmm1, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_46: # %else65 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_48 |
| ; AVXNC-NEXT: # %bb.47: # %cond.load67 |
| ; AVXNC-NEXT: vpinsrw $7, (%rax), %xmm1, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_48: # %else68 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_50 |
| ; AVXNC-NEXT: # %bb.49: # %cond.load70 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm2[0],ymm1[1,2,3,4,5,6,7],ymm2[8],ymm1[9,10,11,12,13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_50: # %else71 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_52 |
| ; AVXNC-NEXT: # %bb.51: # %cond.load73 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2,3,4,5,6,7,8],ymm2[9],ymm1[10,11,12,13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_52: # %else74 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_54 |
| ; AVXNC-NEXT: # %bb.53: # %cond.load76 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm2[2],ymm1[3,4,5,6,7,8,9],ymm2[10],ymm1[11,12,13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_54: # %else77 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_56 |
| ; AVXNC-NEXT: # %bb.55: # %cond.load79 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2],ymm2[3],ymm1[4,5,6,7,8,9,10],ymm2[11],ymm1[12,13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_56: # %else80 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_58 |
| ; AVXNC-NEXT: # %bb.57: # %cond.load82 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3],ymm2[4],ymm1[5,6,7,8,9,10,11],ymm2[12],ymm1[13,14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_58: # %else83 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_60 |
| ; AVXNC-NEXT: # %bb.59: # %cond.load85 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4],ymm2[5],ymm1[6,7,8,9,10,11,12],ymm2[13],ymm1[14,15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_60: # %else86 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_62 |
| ; AVXNC-NEXT: # %bb.61: # %cond.load88 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5],ymm2[6],ymm1[7,8,9,10,11,12,13],ymm2[14],ymm1[15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_62: # %else89 |
| ; AVXNC-NEXT: xorl %eax, %eax |
| ; AVXNC-NEXT: testb %al, %al |
| ; AVXNC-NEXT: jne .LBB12_64 |
| ; AVXNC-NEXT: # %bb.63: # %cond.load91 |
| ; AVXNC-NEXT: vpbroadcastw (%rax), %ymm2 |
| ; AVXNC-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1,2,3,4,5,6],ymm2[7],ymm1[8,9,10,11,12,13,14],ymm2[15] |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] |
| ; AVXNC-NEXT: .LBB12_64: # %else92 |
| ; AVXNC-NEXT: retq |
| %1 = call <32 x bfloat> @llvm.masked.load.v32bf16.p0(ptr poison, i32 2, <32 x i1> poison, <32 x bfloat> <bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80, bfloat 0xRBF80>) |
| ret <32 x bfloat> %1 |
| } |
| |
| define <32 x bfloat> @pr62997_3(<32 x bfloat> %0, bfloat %1) { |
| ; SSE2-LABEL: pr62997_3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq %xmm0, %rax |
| ; SSE2-NEXT: movabsq $-4294967296, %rcx # imm = 0xFFFFFFFF00000000 |
| ; SSE2-NEXT: andq %rax, %rcx |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: movd %xmm4, %edx |
| ; SSE2-NEXT: shll $16, %edx |
| ; SSE2-NEXT: orl %eax, %edx |
| ; SSE2-NEXT: orq %rcx, %rdx |
| ; SSE2-NEXT: movq %rdx, %xmm4 |
| ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1] |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: pr62997_3: |
| ; F16: # %bb.0: |
| ; F16-NEXT: vmovd %xmm1, %eax |
| ; F16-NEXT: vpinsrw $1, %eax, %xmm0, %xmm1 |
| ; F16-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm0 |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: pr62997_3: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: vmovd %xmm2, %eax |
| ; AVXNC-NEXT: vpinsrw $1, %eax, %xmm0, %xmm2 |
| ; AVXNC-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] |
| ; AVXNC-NEXT: retq |
| %3 = insertelement <32 x bfloat> %0, bfloat %1, i64 1 |
| ret <32 x bfloat> %3 |
| } |
| |
| declare <32 x bfloat> @llvm.masked.load.v32bf16.p0(ptr, i32, <32 x i1>, <32 x bfloat>) |
| |
| define <4 x float> @pr64460_1(<4 x bfloat> %a) { |
| ; SSE2-LABEL: pr64460_1: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pextrw $1, %xmm0, %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm2 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm1 |
| ; SSE2-NEXT: pextrw $3, %xmm0, %eax |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm2 |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm0 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: pr64460_1: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] |
| ; AVX-NEXT: retq |
| %b = fpext <4 x bfloat> %a to <4 x float> |
| ret <4 x float> %b |
| } |
| |
| define <8 x float> @pr64460_2(<8 x bfloat> %a) { |
| ; SSE2-LABEL: pr64460_2: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq %xmm0, %rdx |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: movq %xmm0, %rcx |
| ; SSE2-NEXT: movq %rcx, %rax |
| ; SSE2-NEXT: shrq $32, %rax |
| ; SSE2-NEXT: movq %rdx, %rsi |
| ; SSE2-NEXT: shrq $32, %rsi |
| ; SSE2-NEXT: movl %edx, %edi |
| ; SSE2-NEXT: andl $-65536, %edi # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %edi, %xmm1 |
| ; SSE2-NEXT: movl %edx, %edi |
| ; SSE2-NEXT: shll $16, %edi |
| ; SSE2-NEXT: movd %edi, %xmm0 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE2-NEXT: shrq $48, %rdx |
| ; SSE2-NEXT: shll $16, %edx |
| ; SSE2-NEXT: movd %edx, %xmm1 |
| ; SSE2-NEXT: shll $16, %esi |
| ; SSE2-NEXT: movd %esi, %xmm2 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] |
| ; SSE2-NEXT: movl %ecx, %edx |
| ; SSE2-NEXT: andl $-65536, %edx # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %edx, %xmm2 |
| ; SSE2-NEXT: movl %ecx, %edx |
| ; SSE2-NEXT: shll $16, %edx |
| ; SSE2-NEXT: movd %edx, %xmm1 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] |
| ; SSE2-NEXT: shrq $48, %rcx |
| ; SSE2-NEXT: shll $16, %ecx |
| ; SSE2-NEXT: movd %ecx, %xmm2 |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm3 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: pr64460_2: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero |
| ; AVX-NEXT: vpslld $16, %ymm0, %ymm0 |
| ; AVX-NEXT: retq |
| %b = fpext <8 x bfloat> %a to <8 x float> |
| ret <8 x float> %b |
| } |
| |
| define <16 x float> @pr64460_3(<16 x bfloat> %a) { |
| ; SSE2-LABEL: pr64460_3: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq %xmm1, %rdi |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1,1] |
| ; SSE2-NEXT: movq %xmm1, %rcx |
| ; SSE2-NEXT: movq %rcx, %rax |
| ; SSE2-NEXT: shrq $32, %rax |
| ; SSE2-NEXT: movq %xmm0, %r9 |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: movq %xmm0, %rsi |
| ; SSE2-NEXT: movq %rsi, %rdx |
| ; SSE2-NEXT: shrq $32, %rdx |
| ; SSE2-NEXT: movq %rdi, %r8 |
| ; SSE2-NEXT: shrq $32, %r8 |
| ; SSE2-NEXT: movq %r9, %r10 |
| ; SSE2-NEXT: shrq $32, %r10 |
| ; SSE2-NEXT: movl %r9d, %r11d |
| ; SSE2-NEXT: andl $-65536, %r11d # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %r11d, %xmm1 |
| ; SSE2-NEXT: movl %r9d, %r11d |
| ; SSE2-NEXT: shll $16, %r11d |
| ; SSE2-NEXT: movd %r11d, %xmm0 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE2-NEXT: shrq $48, %r9 |
| ; SSE2-NEXT: shll $16, %r9d |
| ; SSE2-NEXT: movd %r9d, %xmm1 |
| ; SSE2-NEXT: shll $16, %r10d |
| ; SSE2-NEXT: movd %r10d, %xmm2 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] |
| ; SSE2-NEXT: movl %edi, %r9d |
| ; SSE2-NEXT: andl $-65536, %r9d # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %r9d, %xmm1 |
| ; SSE2-NEXT: movl %edi, %r9d |
| ; SSE2-NEXT: shll $16, %r9d |
| ; SSE2-NEXT: movd %r9d, %xmm2 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] |
| ; SSE2-NEXT: shrq $48, %rdi |
| ; SSE2-NEXT: shll $16, %edi |
| ; SSE2-NEXT: movd %edi, %xmm1 |
| ; SSE2-NEXT: shll $16, %r8d |
| ; SSE2-NEXT: movd %r8d, %xmm3 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] |
| ; SSE2-NEXT: movl %esi, %edi |
| ; SSE2-NEXT: andl $-65536, %edi # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %edi, %xmm3 |
| ; SSE2-NEXT: movl %esi, %edi |
| ; SSE2-NEXT: shll $16, %edi |
| ; SSE2-NEXT: movd %edi, %xmm1 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] |
| ; SSE2-NEXT: shrq $48, %rsi |
| ; SSE2-NEXT: shll $16, %esi |
| ; SSE2-NEXT: movd %esi, %xmm3 |
| ; SSE2-NEXT: shll $16, %edx |
| ; SSE2-NEXT: movd %edx, %xmm4 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0] |
| ; SSE2-NEXT: movl %ecx, %edx |
| ; SSE2-NEXT: andl $-65536, %edx # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %edx, %xmm4 |
| ; SSE2-NEXT: movl %ecx, %edx |
| ; SSE2-NEXT: shll $16, %edx |
| ; SSE2-NEXT: movd %edx, %xmm3 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] |
| ; SSE2-NEXT: shrq $48, %rcx |
| ; SSE2-NEXT: shll $16, %ecx |
| ; SSE2-NEXT: movd %ecx, %xmm4 |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm5 |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0] |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: pr64460_3: |
| ; F16: # %bb.0: |
| ; F16-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero |
| ; F16-NEXT: vpslld $16, %zmm0, %zmm0 |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: pr64460_3: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero |
| ; AVXNC-NEXT: vpslld $16, %ymm1, %ymm2 |
| ; AVXNC-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; AVXNC-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero |
| ; AVXNC-NEXT: vpslld $16, %ymm0, %ymm1 |
| ; AVXNC-NEXT: vmovdqa %ymm2, %ymm0 |
| ; AVXNC-NEXT: retq |
| %b = fpext <16 x bfloat> %a to <16 x float> |
| ret <16 x float> %b |
| } |
| |
| define <8 x double> @pr64460_4(<8 x bfloat> %a) { |
| ; SSE2-LABEL: pr64460_4: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movq %xmm0, %rsi |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: movq %xmm0, %rdx |
| ; SSE2-NEXT: movq %rdx, %rax |
| ; SSE2-NEXT: shrq $32, %rax |
| ; SSE2-NEXT: movq %rdx, %rcx |
| ; SSE2-NEXT: shrq $48, %rcx |
| ; SSE2-NEXT: movq %rsi, %rdi |
| ; SSE2-NEXT: shrq $32, %rdi |
| ; SSE2-NEXT: movq %rsi, %r8 |
| ; SSE2-NEXT: shrq $48, %r8 |
| ; SSE2-NEXT: movl %esi, %r9d |
| ; SSE2-NEXT: andl $-65536, %r9d # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %r9d, %xmm0 |
| ; SSE2-NEXT: cvtss2sd %xmm0, %xmm1 |
| ; SSE2-NEXT: shll $16, %esi |
| ; SSE2-NEXT: movd %esi, %xmm0 |
| ; SSE2-NEXT: cvtss2sd %xmm0, %xmm0 |
| ; SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: shll $16, %r8d |
| ; SSE2-NEXT: movd %r8d, %xmm1 |
| ; SSE2-NEXT: cvtss2sd %xmm1, %xmm2 |
| ; SSE2-NEXT: shll $16, %edi |
| ; SSE2-NEXT: movd %edi, %xmm1 |
| ; SSE2-NEXT: cvtss2sd %xmm1, %xmm1 |
| ; SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; SSE2-NEXT: movl %edx, %esi |
| ; SSE2-NEXT: andl $-65536, %esi # imm = 0xFFFF0000 |
| ; SSE2-NEXT: movd %esi, %xmm2 |
| ; SSE2-NEXT: cvtss2sd %xmm2, %xmm3 |
| ; SSE2-NEXT: shll $16, %edx |
| ; SSE2-NEXT: movd %edx, %xmm2 |
| ; SSE2-NEXT: cvtss2sd %xmm2, %xmm2 |
| ; SSE2-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] |
| ; SSE2-NEXT: shll $16, %ecx |
| ; SSE2-NEXT: movd %ecx, %xmm3 |
| ; SSE2-NEXT: cvtss2sd %xmm3, %xmm4 |
| ; SSE2-NEXT: shll $16, %eax |
| ; SSE2-NEXT: movd %eax, %xmm3 |
| ; SSE2-NEXT: cvtss2sd %xmm3, %xmm3 |
| ; SSE2-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0] |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: pr64460_4: |
| ; F16: # %bb.0: |
| ; F16-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero |
| ; F16-NEXT: vpslld $16, %ymm0, %ymm0 |
| ; F16-NEXT: vcvtps2pd %ymm0, %zmm0 |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: pr64460_4: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: vpextrw $3, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 |
| ; AVXNC-NEXT: vpextrw $2, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm2 |
| ; AVXNC-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 |
| ; AVXNC-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0] |
| ; AVXNC-NEXT: vpextrw $1, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm2 |
| ; AVXNC-NEXT: vcvtss2sd %xmm2, %xmm2, %xmm2 |
| ; AVXNC-NEXT: vmovd %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm3 |
| ; AVXNC-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 |
| ; AVXNC-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0] |
| ; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2 |
| ; AVXNC-NEXT: vpextrw $7, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm1 |
| ; AVXNC-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 |
| ; AVXNC-NEXT: vpextrw $6, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm3 |
| ; AVXNC-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 |
| ; AVXNC-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0] |
| ; AVXNC-NEXT: vpextrw $5, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm3 |
| ; AVXNC-NEXT: vcvtss2sd %xmm3, %xmm3, %xmm3 |
| ; AVXNC-NEXT: vpextrw $4, %xmm0, %eax |
| ; AVXNC-NEXT: shll $16, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0] |
| ; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 |
| ; AVXNC-NEXT: vmovaps %ymm2, %ymm0 |
| ; AVXNC-NEXT: retq |
| %b = fpext <8 x bfloat> %a to <8 x double> |
| ret <8 x double> %b |
| } |
| |
| define <4 x bfloat> @fptrunc_v4f32(<4 x float> %a) nounwind { |
| ; SSE2-LABEL: fptrunc_v4f32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbp |
| ; SSE2-NEXT: pushq %r14 |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: subq $32, %rsp |
| ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE2-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE2-NEXT: movd %xmm0, %r14d |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: pinsrw $0, %eax, %xmm0 |
| ; SSE2-NEXT: pinsrw $0, %r14d, %xmm1 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] |
| ; SSE2-NEXT: pinsrw $0, %ebp, %xmm0 |
| ; SSE2-NEXT: pinsrw $0, %ebx, %xmm2 |
| ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] |
| ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] |
| ; SSE2-NEXT: addq $32, %rsp |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: popq %r14 |
| ; SSE2-NEXT: popq %rbp |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: fptrunc_v4f32: |
| ; F16: # %bb.0: |
| ; F16-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 |
| ; F16-NEXT: vcvtneps2bf16 %ymm0, %xmm0 |
| ; F16-NEXT: vzeroupper |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: fptrunc_v4f32: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 |
| ; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm0, %xmm0 |
| ; AVXNC-NEXT: vzeroupper |
| ; AVXNC-NEXT: retq |
| %b = fptrunc <4 x float> %a to <4 x bfloat> |
| ret <4 x bfloat> %b |
| } |
| |
| define <8 x bfloat> @fptrunc_v8f32(<8 x float> %a) nounwind { |
| ; SSE2-LABEL: fptrunc_v8f32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbp |
| ; SSE2-NEXT: pushq %r14 |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: subq $32, %rsp |
| ; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill |
| ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %ebx |
| ; SSE2-NEXT: orl %ebp, %ebx |
| ; SSE2-NEXT: shlq $32, %rbx |
| ; SSE2-NEXT: orq %r14, %rbx |
| ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebp, %r14d |
| ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebp, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm1 |
| ; SSE2-NEXT: movq %rbx, %xmm0 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: addq $32, %rsp |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: popq %r14 |
| ; SSE2-NEXT: popq %rbp |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: fptrunc_v8f32: |
| ; F16: # %bb.0: |
| ; F16-NEXT: vcvtneps2bf16 %ymm0, %xmm0 |
| ; F16-NEXT: vzeroupper |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: fptrunc_v8f32: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm0, %xmm0 |
| ; AVXNC-NEXT: vzeroupper |
| ; AVXNC-NEXT: retq |
| %b = fptrunc <8 x float> %a to <8 x bfloat> |
| ret <8 x bfloat> %b |
| } |
| |
| define <16 x bfloat> @fptrunc_v16f32(<16 x float> %a) nounwind { |
| ; SSE2-LABEL: fptrunc_v16f32: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbp |
| ; SSE2-NEXT: pushq %r15 |
| ; SSE2-NEXT: pushq %r14 |
| ; SSE2-NEXT: pushq %r12 |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: subq $64, %rsp |
| ; SSE2-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill |
| ; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movaps %xmm2, %xmm0 |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm2[1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %ebx |
| ; SSE2-NEXT: orl %ebp, %ebx |
| ; SSE2-NEXT: shlq $32, %rbx |
| ; SSE2-NEXT: orq %r14, %rbx |
| ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r15d |
| ; SSE2-NEXT: orl %ebp, %r15d |
| ; SSE2-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebp, %r14d |
| ; SSE2-NEXT: shlq $32, %r14 |
| ; SSE2-NEXT: orq %r15, %r14 |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r12d |
| ; SSE2-NEXT: orl %ebp, %r12d |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r15d |
| ; SSE2-NEXT: orl %ebp, %r15d |
| ; SSE2-NEXT: shlq $32, %r15 |
| ; SSE2-NEXT: orq %r12, %r15 |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r12d |
| ; SSE2-NEXT: orl %ebp, %r12d |
| ; SSE2-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncsfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebp, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r12, %rax |
| ; SSE2-NEXT: movq %rax, %xmm1 |
| ; SSE2-NEXT: movq %r15, %xmm0 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: movq %r14, %xmm2 |
| ; SSE2-NEXT: movq %rbx, %xmm1 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] |
| ; SSE2-NEXT: addq $64, %rsp |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: popq %r12 |
| ; SSE2-NEXT: popq %r14 |
| ; SSE2-NEXT: popq %r15 |
| ; SSE2-NEXT: popq %rbp |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: fptrunc_v16f32: |
| ; F16: # %bb.0: |
| ; F16-NEXT: vcvtneps2bf16 %zmm0, %ymm0 |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: fptrunc_v16f32: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm0, %xmm0 |
| ; AVXNC-NEXT: vinsertf128 $0, %xmm0, %ymm0, %ymm0 |
| ; AVXNC-NEXT: {vex} vcvtneps2bf16 %ymm1, %xmm1 |
| ; AVXNC-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 |
| ; AVXNC-NEXT: retq |
| %b = fptrunc <16 x float> %a to <16 x bfloat> |
| ret <16 x bfloat> %b |
| } |
| |
| define <8 x bfloat> @fptrunc_v8f64(<8 x double> %a) nounwind { |
| ; SSE2-LABEL: fptrunc_v8f64: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbp |
| ; SSE2-NEXT: pushq %r14 |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: subq $64, %rsp |
| ; SSE2-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill |
| ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebx |
| ; SSE2-NEXT: shll $16, %ebx |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebx, %r14d |
| ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %ebx |
| ; SSE2-NEXT: orl %ebp, %ebx |
| ; SSE2-NEXT: shlq $32, %rbx |
| ; SSE2-NEXT: orq %r14, %rbx |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %r14d |
| ; SSE2-NEXT: orl %ebp, %r14d |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1,1] |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %ebp |
| ; SSE2-NEXT: shll $16, %ebp |
| ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; SSE2-NEXT: callq __truncdfbf2@PLT |
| ; SSE2-NEXT: movd %xmm0, %eax |
| ; SSE2-NEXT: movzwl %ax, %eax |
| ; SSE2-NEXT: orl %ebp, %eax |
| ; SSE2-NEXT: shlq $32, %rax |
| ; SSE2-NEXT: orq %r14, %rax |
| ; SSE2-NEXT: movq %rax, %xmm1 |
| ; SSE2-NEXT: movq %rbx, %xmm0 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: addq $64, %rsp |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: popq %r14 |
| ; SSE2-NEXT: popq %rbp |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: fptrunc_v8f64: |
| ; F16: # %bb.0: |
| ; F16-NEXT: pushq %rbp |
| ; F16-NEXT: pushq %r15 |
| ; F16-NEXT: pushq %r14 |
| ; F16-NEXT: pushq %r13 |
| ; F16-NEXT: pushq %r12 |
| ; F16-NEXT: pushq %rbx |
| ; F16-NEXT: subq $136, %rsp |
| ; F16-NEXT: vmovupd %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; F16-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0] |
| ; F16-NEXT: vzeroupper |
| ; F16-NEXT: callq __truncdfbf2@PLT |
| ; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; F16-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; F16-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; F16-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; F16-NEXT: vzeroupper |
| ; F16-NEXT: callq __truncdfbf2@PLT |
| ; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; F16-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; F16-NEXT: # xmm0 = mem[1,0] |
| ; F16-NEXT: callq __truncdfbf2@PLT |
| ; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; F16-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; F16-NEXT: vextractf32x4 $2, %zmm0, %xmm0 |
| ; F16-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; F16-NEXT: vzeroupper |
| ; F16-NEXT: callq __truncdfbf2@PLT |
| ; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; F16-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; F16-NEXT: # xmm0 = mem[1,0] |
| ; F16-NEXT: callq __truncdfbf2@PLT |
| ; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; F16-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; F16-NEXT: vextractf32x4 $3, %zmm0, %xmm0 |
| ; F16-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; F16-NEXT: vzeroupper |
| ; F16-NEXT: callq __truncdfbf2@PLT |
| ; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; F16-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; F16-NEXT: # xmm0 = mem[1,0] |
| ; F16-NEXT: callq __truncdfbf2@PLT |
| ; F16-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; F16-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; F16-NEXT: vmovd %xmm0, %ebp |
| ; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; F16-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; F16-NEXT: vmovd %xmm0, %r14d |
| ; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; F16-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; F16-NEXT: vmovd %xmm0, %r15d |
| ; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; F16-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; F16-NEXT: vmovd %xmm0, %r12d |
| ; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; F16-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; F16-NEXT: vmovd %xmm0, %r13d |
| ; F16-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; F16-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; F16-NEXT: vmovd %xmm0, %ebx |
| ; F16-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; F16-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 |
| ; F16-NEXT: vzeroupper |
| ; F16-NEXT: callq __truncdfbf2@PLT |
| ; F16-NEXT: vmovd %xmm0, %eax |
| ; F16-NEXT: vmovd %eax, %xmm0 |
| ; F16-NEXT: vpinsrw $1, %ebx, %xmm0, %xmm0 |
| ; F16-NEXT: vpinsrw $2, %r13d, %xmm0, %xmm0 |
| ; F16-NEXT: vpinsrw $3, %r12d, %xmm0, %xmm0 |
| ; F16-NEXT: vpinsrw $4, %r15d, %xmm0, %xmm0 |
| ; F16-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 |
| ; F16-NEXT: vpinsrw $6, %ebp, %xmm0, %xmm0 |
| ; F16-NEXT: vpinsrw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload |
| ; F16-NEXT: addq $136, %rsp |
| ; F16-NEXT: popq %rbx |
| ; F16-NEXT: popq %r12 |
| ; F16-NEXT: popq %r13 |
| ; F16-NEXT: popq %r14 |
| ; F16-NEXT: popq %r15 |
| ; F16-NEXT: popq %rbp |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: fptrunc_v8f64: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: pushq %rbp |
| ; AVXNC-NEXT: pushq %r15 |
| ; AVXNC-NEXT: pushq %r14 |
| ; AVXNC-NEXT: pushq %r13 |
| ; AVXNC-NEXT: pushq %r12 |
| ; AVXNC-NEXT: pushq %rbx |
| ; AVXNC-NEXT: subq $120, %rsp |
| ; AVXNC-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVXNC-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; AVXNC-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0] |
| ; AVXNC-NEXT: vzeroupper |
| ; AVXNC-NEXT: callq __truncdfbf2@PLT |
| ; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVXNC-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; AVXNC-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVXNC-NEXT: vzeroupper |
| ; AVXNC-NEXT: callq __truncdfbf2@PLT |
| ; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; AVXNC-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; AVXNC-NEXT: # xmm0 = mem[1,0] |
| ; AVXNC-NEXT: callq __truncdfbf2@PLT |
| ; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVXNC-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 |
| ; AVXNC-NEXT: vzeroupper |
| ; AVXNC-NEXT: callq __truncdfbf2@PLT |
| ; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; AVXNC-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; AVXNC-NEXT: # xmm0 = mem[1,0] |
| ; AVXNC-NEXT: callq __truncdfbf2@PLT |
| ; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVXNC-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; AVXNC-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; AVXNC-NEXT: vzeroupper |
| ; AVXNC-NEXT: callq __truncdfbf2@PLT |
| ; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; AVXNC-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload |
| ; AVXNC-NEXT: # xmm0 = mem[1,0] |
| ; AVXNC-NEXT: callq __truncdfbf2@PLT |
| ; AVXNC-NEXT: vmovss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; AVXNC-NEXT: vmovd %xmm0, %ebp |
| ; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; AVXNC-NEXT: vmovd %xmm0, %r14d |
| ; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; AVXNC-NEXT: vmovd %xmm0, %r15d |
| ; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; AVXNC-NEXT: vmovd %xmm0, %r12d |
| ; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; AVXNC-NEXT: vmovd %xmm0, %r13d |
| ; AVXNC-NEXT: vmovd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; AVXNC-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; AVXNC-NEXT: vmovd %xmm0, %ebx |
| ; AVXNC-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload |
| ; AVXNC-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 |
| ; AVXNC-NEXT: vzeroupper |
| ; AVXNC-NEXT: callq __truncdfbf2@PLT |
| ; AVXNC-NEXT: vmovd %xmm0, %eax |
| ; AVXNC-NEXT: vmovd %eax, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $1, %ebx, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $2, %r13d, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $3, %r12d, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $4, %r15d, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $6, %ebp, %xmm0, %xmm0 |
| ; AVXNC-NEXT: vpinsrw $7, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 4-byte Folded Reload |
| ; AVXNC-NEXT: addq $120, %rsp |
| ; AVXNC-NEXT: popq %rbx |
| ; AVXNC-NEXT: popq %r12 |
| ; AVXNC-NEXT: popq %r13 |
| ; AVXNC-NEXT: popq %r14 |
| ; AVXNC-NEXT: popq %r15 |
| ; AVXNC-NEXT: popq %rbp |
| ; AVXNC-NEXT: retq |
| %b = fptrunc <8 x double> %a to <8 x bfloat> |
| ret <8 x bfloat> %b |
| } |
| |
| define <32 x bfloat> @test_v8bf16_v32bf16(ptr %0) { |
| ; SSE2-LABEL: test_v8bf16_v32bf16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movaps (%rdi), %xmm0 |
| ; SSE2-NEXT: movaps %xmm0, %xmm1 |
| ; SSE2-NEXT: movaps %xmm0, %xmm2 |
| ; SSE2-NEXT: movaps %xmm0, %xmm3 |
| ; SSE2-NEXT: retq |
| ; |
| ; F16-LABEL: test_v8bf16_v32bf16: |
| ; F16: # %bb.0: |
| ; F16-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] |
| ; F16-NEXT: retq |
| ; |
| ; AVXNC-LABEL: test_v8bf16_v32bf16: |
| ; AVXNC: # %bb.0: |
| ; AVXNC-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] |
| ; AVXNC-NEXT: vmovaps %ymm0, %ymm1 |
| ; AVXNC-NEXT: retq |
| %2 = load <8 x bfloat>, ptr %0, align 16 |
| %3 = shufflevector <8 x bfloat> %2, <8 x bfloat> %2, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> |
| ret <32 x bfloat> %3 |
| } |
| |
| define <16 x bfloat> @concat_v8bf16(<8 x bfloat> %x, <8 x bfloat> %y) { |
| ; SSE2-LABEL: concat_v8bf16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: concat_v8bf16: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 |
| ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 |
| ; AVX-NEXT: retq |
| %a = shufflevector <8 x bfloat> %x, <8 x bfloat> %y, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> |
| ret <16 x bfloat> %a |
| } |
| |
| define <8 x bfloat> @extract_v32bf16_v8bf16(<32 x bfloat> %x) { |
| ; SSE2-LABEL: extract_v32bf16_v8bf16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pextrw $0, %xmm1, %eax |
| ; SSE2-NEXT: pextrw $1, %xmm1, %ecx |
| ; SSE2-NEXT: shll $16, %ecx |
| ; SSE2-NEXT: orl %eax, %ecx |
| ; SSE2-NEXT: pextrw $2, %xmm1, %eax |
| ; SSE2-NEXT: pextrw $3, %xmm1, %edx |
| ; SSE2-NEXT: shll $16, %edx |
| ; SSE2-NEXT: orl %eax, %edx |
| ; SSE2-NEXT: shlq $32, %rdx |
| ; SSE2-NEXT: orq %rcx, %rdx |
| ; SSE2-NEXT: pextrw $4, %xmm1, %eax |
| ; SSE2-NEXT: pextrw $5, %xmm1, %ecx |
| ; SSE2-NEXT: shll $16, %ecx |
| ; SSE2-NEXT: orl %eax, %ecx |
| ; SSE2-NEXT: pextrw $6, %xmm1, %eax |
| ; SSE2-NEXT: pextrw $7, %xmm1, %esi |
| ; SSE2-NEXT: shll $16, %esi |
| ; SSE2-NEXT: orl %eax, %esi |
| ; SSE2-NEXT: shlq $32, %rsi |
| ; SSE2-NEXT: orq %rcx, %rsi |
| ; SSE2-NEXT: movq %rsi, %xmm1 |
| ; SSE2-NEXT: movq %rdx, %xmm0 |
| ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: extract_v32bf16_v8bf16: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0 |
| ; AVX-NEXT: vzeroupper |
| ; AVX-NEXT: retq |
| %a = shufflevector <32 x bfloat> %x, <32 x bfloat> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> |
| ret <8 x bfloat> %a |
| } |
| |
| define <16 x bfloat> @concat_zero_v8bf16(<8 x bfloat> %x, <8 x bfloat> %y) { |
| ; SSE2-LABEL: concat_zero_v8bf16: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: xorps %xmm1, %xmm1 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: concat_zero_v8bf16: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vmovaps %xmm0, %xmm0 |
| ; AVX-NEXT: retq |
| %a = shufflevector <8 x bfloat> %x, <8 x bfloat> zeroinitializer, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> |
| ret <16 x bfloat> %a |
| } |