| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -verify-machineinstrs -mcpu=sapphirerapids -mattr=+false-deps-mulc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=ENABLE |
| ; RUN: llc -verify-machineinstrs -mcpu=sapphirerapids -mattr=-false-deps-mulc -mtriple=x86_64-unknown-unknown < %s | FileCheck %s --check-prefixes=DISABLE |
| |
| define <16 x float> @fmulcph(<16 x float> %a0, <16 x float> %a1) { |
| ; ENABLE-LABEL: fmulcph: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfmulcph %zmm1, %zmm0, %zmm2 |
| ; ENABLE-NEXT: vmovaps %zmm2, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulcph: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfmulcph {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %zmm2, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> undef, i16 -1, i32 4) |
| ret <16 x float> %2 |
| } |
| |
| define <16 x float> @fmulcph_mem(<16 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fmulcph_mem: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfmulcph (%rdi), %zmm0, %zmm1 |
| ; ENABLE-NEXT: vmovaps %zmm1, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulcph_mem: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; DISABLE-NEXT: vfmulcph (%rdi), %zmm0, %zmm1 |
| ; DISABLE-NEXT: vmovaps %zmm1, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a1 = load <16 x float>, ptr %p1, align 64 |
| %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> undef, i16 -1, i32 4) |
| ret <16 x float> %2 |
| } |
| |
| define <16 x float> @fmulcph_broadcast(<16 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fmulcph_broadcast: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfmulcph (%rdi){1to16}, %zmm0, %zmm1 |
| ; ENABLE-NEXT: vmovaps %zmm1, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulcph_broadcast: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; DISABLE-NEXT: vfmulcph (%rdi){1to16}, %zmm0, %zmm1 |
| ; DISABLE-NEXT: vmovaps %zmm1, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v1 = load float, ptr %p1, align 4 |
| %t0 = insertelement <16 x float> undef, float %v1, i64 0 |
| %a1 = shufflevector <16 x float> %t0, <16 x float> undef, <16 x i32> zeroinitializer |
| %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> undef, i16 -1, i32 4) |
| ret <16 x float> %2 |
| } |
| |
| define <16 x float> @fmulcph_maskz(<16 x float> %a0, <16 x float> %a1, ptr %mask) { |
| ; ENABLE-LABEL: fmulcph_maskz: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovw (%rdi), %k1 |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfmulcph %zmm1, %zmm0, %zmm2 {%k1} {z} |
| ; ENABLE-NEXT: vmovaps %zmm2, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulcph_maskz: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovw (%rdi), %k1 |
| ; DISABLE-NEXT: vfmulcph {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} {z} # 64-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %zmm2, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i16, ptr %mask |
| %3 = call <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> zeroinitializer, i16 %2, i32 4) |
| ret <16 x float> %3 |
| } |
| |
| define <16 x float> @fcmulcph(<16 x float> %a0, <16 x float> %a1) { |
| ; ENABLE-LABEL: fcmulcph: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfcmulcph %zmm1, %zmm0, %zmm2 |
| ; ENABLE-NEXT: vmovaps %zmm2, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulcph: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfcmulcph {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 # 64-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %zmm2, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> undef, i16 -1, i32 4) |
| ret <16 x float> %2 |
| } |
| |
| define <16 x float> @fcmulcph_mem(<16 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fcmulcph_mem: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfcmulcph (%rdi), %zmm0, %zmm1 |
| ; ENABLE-NEXT: vmovaps %zmm1, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulcph_mem: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; DISABLE-NEXT: vfcmulcph (%rdi), %zmm0, %zmm1 |
| ; DISABLE-NEXT: vmovaps %zmm1, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a1 = load <16 x float>, ptr %p1, align 64 |
| %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> undef, i16 -1, i32 4) |
| ret <16 x float> %2 |
| } |
| |
| define <16 x float> @fcmulcph_broadcast(<16 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fcmulcph_broadcast: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfcmulcph (%rdi){1to16}, %zmm0, %zmm1 |
| ; ENABLE-NEXT: vmovaps %zmm1, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulcph_broadcast: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload |
| ; DISABLE-NEXT: vfcmulcph (%rdi){1to16}, %zmm0, %zmm1 |
| ; DISABLE-NEXT: vmovaps %zmm1, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v1 = load float, ptr %p1, align 4 |
| %t0 = insertelement <16 x float> undef, float %v1, i64 0 |
| %a1 = shufflevector <16 x float> %t0, <16 x float> undef, <16 x i32> zeroinitializer |
| %2 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> undef, i16 -1, i32 4) |
| ret <16 x float> %2 |
| } |
| |
| define <16 x float> @fcmulcph_maskz(<16 x float> %a0, <16 x float> %a1, ptr %mask) { |
| ; ENABLE-LABEL: fcmulcph_maskz: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovw (%rdi), %k1 |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm1 # 64-byte Reload |
| ; ENABLE-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfcmulcph %zmm1, %zmm0, %zmm2 {%k1} {z} |
| ; ENABLE-NEXT: vmovaps %zmm2, %zmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulcph_maskz: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %zmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovw (%rdi), %k1 |
| ; DISABLE-NEXT: vfcmulcph {{[-0-9]+}}(%r{{[sb]}}p), %zmm0, %zmm2 {%k1} {z} # 64-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %zmm2, %zmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i16, ptr %mask |
| %3 = call <16 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.512(<16 x float> %a0, <16 x float> %a1, <16 x float> zeroinitializer, i16 %2, i32 4) |
| ret <16 x float> %3 |
| } |
| |
| define <4 x float> @fmulc(<4 x float> %a0, <4 x float> %a1) { |
| ; ENABLE-LABEL: fmulc: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfmulcph %xmm1, %xmm0, %xmm2 |
| ; ENABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulc: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfmulcph {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fmulc_mem(<4 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fmulc_mem: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfmulcph (%rdi), %xmm0, %xmm1 |
| ; ENABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulc_mem: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; DISABLE-NEXT: vfmulcph (%rdi), %xmm0, %xmm1 |
| ; DISABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a1 = load <4 x float>, ptr %p1, align 64 |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fmulc_broadcast(<4 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fmulc_broadcast: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfmulcph (%rdi){1to4}, %xmm0, %xmm1 |
| ; ENABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulc_broadcast: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; DISABLE-NEXT: vfmulcph (%rdi){1to4}, %xmm0, %xmm1 |
| ; DISABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v1 = load float, ptr %p1, align 4 |
| %t0 = insertelement <4 x float> undef, float %v1, i64 0 |
| %a1 = shufflevector <4 x float> %t0, <4 x float> undef, <4 x i32> zeroinitializer |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fmulc_maskz(<4 x float> %a0, <4 x float> %a1, ptr %mask) { |
| ; ENABLE-LABEL: fmulc_maskz: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfmulcph %xmm1, %xmm0, %xmm2 {%k1} {z} |
| ; ENABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulc_maskz: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vfmulcph {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 {%k1} {z} # 16-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; DISABLE-NEXT: retq |
| |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %2) |
| ret <4 x float> %3 |
| } |
| |
| define <4 x float> @fcmulc(<4 x float> %a0, <4 x float> %a1) { |
| ; ENABLE-LABEL: fcmulc: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfcmulcph %xmm1, %xmm0, %xmm2 |
| ; ENABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulc: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfcmulcph {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fcmulc_mem(<4 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fcmulc_mem: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfcmulcph (%rdi), %xmm0, %xmm1 |
| ; ENABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulc_mem: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; DISABLE-NEXT: vfcmulcph (%rdi), %xmm0, %xmm1 |
| ; DISABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a1 = load <4 x float>, ptr %p1, align 64 |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fcmulc_broadcast(<4 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fcmulc_broadcast: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfcmulcph (%rdi){1to4}, %xmm0, %xmm1 |
| ; ENABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulc_broadcast: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; DISABLE-NEXT: vfcmulcph (%rdi){1to4}, %xmm0, %xmm1 |
| ; DISABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v1 = load float, ptr %p1, align 4 |
| %t0 = insertelement <4 x float> undef, float %v1, i64 0 |
| %a1 = shufflevector <4 x float> %t0, <4 x float> undef, <4 x i32> zeroinitializer |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fcmulc_maskz(<4 x float> %a0, <4 x float> %a1, ptr %mask) { |
| ; ENABLE-LABEL: fcmulc_maskz: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfcmulcph %xmm1, %xmm0, %xmm2 {%k1} {z} |
| ; ENABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulc_maskz: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vfcmulcph {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 {%k1} {z} # 16-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.128(<4 x float> %a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %2) |
| ret <4 x float> %3 |
| } |
| |
| define <8 x float> @fmulc_ymm(<8 x float> %a0, <8 x float> %a1) { |
| ; ENABLE-LABEL: fmulc_ymm: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfmulcph %ymm1, %ymm0, %ymm2 |
| ; ENABLE-NEXT: vmovaps %ymm2, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulc_ymm: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfmulcph {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %ymm2, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> undef, i8 -1) |
| ret <8 x float> %2 |
| } |
| |
| define <8 x float> @fmulc_ymm_mem(<8 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fmulc_ymm_mem: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfmulcph (%rdi), %ymm0, %ymm1 |
| ; ENABLE-NEXT: vmovaps %ymm1, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulc_ymm_mem: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfmulcph (%rdi), %ymm0, %ymm1 |
| ; DISABLE-NEXT: vmovaps %ymm1, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a1 = load <8 x float>, ptr %p1, align 64 |
| %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> undef, i8 -1) |
| ret <8 x float> %2 |
| } |
| |
| define <8 x float> @fmulc_ymm_broadcast(<8 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fmulc_ymm_broadcast: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfmulcph (%rdi){1to8}, %ymm0, %ymm1 |
| ; ENABLE-NEXT: vmovaps %ymm1, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulc_ymm_broadcast: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfmulcph (%rdi){1to8}, %ymm0, %ymm1 |
| ; DISABLE-NEXT: vmovaps %ymm1, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v1 = load float, ptr %p1, align 4 |
| %t0 = insertelement <8 x float> undef, float %v1, i64 0 |
| %a1 = shufflevector <8 x float> %t0, <8 x float> undef, <8 x i32> zeroinitializer |
| %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> undef, i8 -1) |
| ret <8 x float> %2 |
| } |
| |
| define <8 x float> @fmulc_maskz_ymm(<8 x float> %a0, <8 x float> %a1, ptr %mask) { |
| ; ENABLE-LABEL: fmulc_maskz_ymm: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfmulcph %ymm1, %ymm0, %ymm2 {%k1} {z} |
| ; ENABLE-NEXT: vmovaps %ymm2, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulc_maskz_ymm: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vfmulcph {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 {%k1} {z} # 32-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %ymm2, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <8 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> zeroinitializer, i8 %2) |
| ret <8 x float> %3 |
| } |
| |
| define <8 x float> @fcmulc_ymm(<8 x float> %a0, <8 x float> %a1) { |
| ; ENABLE-LABEL: fcmulc_ymm: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfcmulcph %ymm1, %ymm0, %ymm2 |
| ; ENABLE-NEXT: vmovaps %ymm2, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulc_ymm: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfcmulcph {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 # 32-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %ymm2, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> undef, i8 -1) |
| ret <8 x float> %2 |
| } |
| |
| define <8 x float> @fcmulc_ymm_mem(<8 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fcmulc_ymm_mem: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfcmulcph (%rdi), %ymm0, %ymm1 |
| ; ENABLE-NEXT: vmovaps %ymm1, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulc_ymm_mem: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfcmulcph (%rdi), %ymm0, %ymm1 |
| ; DISABLE-NEXT: vmovaps %ymm1, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a1 = load <8 x float>, ptr %p1, align 64 |
| %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> undef, i8 -1) |
| ret <8 x float> %2 |
| } |
| |
| define <8 x float> @fcmulc_ymm_broadcast(<8 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fcmulc_ymm_broadcast: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfcmulcph (%rdi){1to8}, %ymm0, %ymm1 |
| ; ENABLE-NEXT: vmovaps %ymm1, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulc_ymm_broadcast: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfcmulcph (%rdi){1to8}, %ymm0, %ymm1 |
| ; DISABLE-NEXT: vmovaps %ymm1, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %v1 = load float, ptr %p1, align 4 |
| %t0 = insertelement <8 x float> undef, float %v1, i64 0 |
| %a1 = shufflevector <8 x float> %t0, <8 x float> undef, <8 x i32> zeroinitializer |
| %2 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> undef, i8 -1) |
| ret <8 x float> %2 |
| } |
| |
| define <8 x float> @fcmulc_maskz_ymm(<8 x float> %a0, <8 x float> %a1, ptr %mask) { |
| ; ENABLE-LABEL: fcmulc_maskz_ymm: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfcmulcph %ymm1, %ymm0, %ymm2 {%k1} {z} |
| ; ENABLE-NEXT: vmovaps %ymm2, %ymm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulc_maskz_ymm: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vfcmulcph {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm2 {%k1} {z} # 32-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %ymm2, %ymm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <8 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.256(<8 x float> %a0, <8 x float> %a1, <8 x float> zeroinitializer, i8 %2) |
| ret <8 x float> %3 |
| } |
| |
| define <4 x float> @fmulcsh(<4 x float> %a0, <4 x float> %a1) { |
| ; ENABLE-LABEL: fmulcsh: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfmulcsh %xmm1, %xmm0, %xmm2 |
| ; ENABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulcsh: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfmulcsh {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fmulcsh_mem(<4 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fmulcsh_mem: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfmulcsh (%rdi), %xmm0, %xmm1 |
| ; ENABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulcsh_mem: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; DISABLE-NEXT: vfmulcsh (%rdi), %xmm0, %xmm1 |
| ; DISABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a1 = load <4 x float>, ptr %p1, align 64 |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fmulcsh_maskz(<4 x float> %a0, <4 x float> %a1, ptr %mask) { |
| ; ENABLE-LABEL: fmulcsh_maskz: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfmulcsh %xmm1, %xmm0, %xmm2 {%k1} {z} |
| ; ENABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fmulcsh_maskz: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vfmulcsh {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 {%k1} {z} # 16-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfmul.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %2, i32 4) |
| ret <4 x float> %3 |
| } |
| |
| define <4 x float> @fcmulcsh(<4 x float> %a0, <4 x float> %a1) { |
| ; ENABLE-LABEL: fcmulcsh: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfcmulcsh %xmm1, %xmm0, %xmm2 |
| ; ENABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulcsh: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vfcmulcsh {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 # 16-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fcmulcsh_mem(<4 x float> %a0, ptr %p1) { |
| ; ENABLE-LABEL: fcmulcsh_mem: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm1, %xmm1, %xmm1 |
| ; ENABLE-NEXT: vfcmulcsh (%rdi), %xmm0, %xmm1 |
| ; ENABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulcsh_mem: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload |
| ; DISABLE-NEXT: vfcmulcsh (%rdi), %xmm0, %xmm1 |
| ; DISABLE-NEXT: vmovaps %xmm1, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %a1 = load <4 x float>, ptr %p1, align 64 |
| %2 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> undef, i8 -1, i32 4) |
| ret <4 x float> %2 |
| } |
| |
| define <4 x float> @fcmulcsh_maskz(<4 x float> %a0, <4 x float> %a1, ptr %mask) { |
| ; ENABLE-LABEL: fcmulcsh_maskz: |
| ; ENABLE: # %bb.0: |
| ; ENABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; ENABLE-NEXT: #APP |
| ; ENABLE-NEXT: nop |
| ; ENABLE-NEXT: #NO_APP |
| ; ENABLE-NEXT: kmovb (%rdi), %k1 |
| ; ENABLE-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload |
| ; ENABLE-NEXT: vxorps %xmm2, %xmm2, %xmm2 |
| ; ENABLE-NEXT: vfcmulcsh %xmm1, %xmm0, %xmm2 {%k1} {z} |
| ; ENABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; ENABLE-NEXT: retq |
| ; |
| ; DISABLE-LABEL: fcmulcsh_maskz: |
| ; DISABLE: # %bb.0: |
| ; DISABLE-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill |
| ; DISABLE-NEXT: #APP |
| ; DISABLE-NEXT: nop |
| ; DISABLE-NEXT: #NO_APP |
| ; DISABLE-NEXT: kmovb (%rdi), %k1 |
| ; DISABLE-NEXT: vfcmulcsh {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm2 {%k1} {z} # 16-byte Folded Reload |
| ; DISABLE-NEXT: vmovaps %xmm2, %xmm0 |
| ; DISABLE-NEXT: retq |
| %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() |
| %2 = load i8, ptr %mask |
| %3 = call <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.csh(<4 x float> %a0, <4 x float> %a1, <4 x float> zeroinitializer, i8 %2, i32 4) |
| ret <4 x float> %3 |
| } |
| |
| declare <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.csh(<4 x float>, <4 x float>, <4 x float>, i8, i32) |
| declare <4 x float> @llvm.x86.avx512fp16.mask.vfmul.csh(<4 x float>, <4 x float>, <4 x float>, i8, i32) |
| declare <16 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) |
| declare <16 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) |
| declare <8 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.256(<8 x float>, <8 x float>, <8 x float>, i8) |
| declare <8 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.256(<8 x float>, <8 x float>, <8 x float>, i8) |
| declare <4 x float> @llvm.x86.avx512fp16.mask.vfcmul.cph.128(<4 x float>, <4 x float>, <4 x float>, i8) |
| declare <4 x float> @llvm.x86.avx512fp16.mask.vfmul.cph.128(<4 x float>, <4 x float>, <4 x float>, i8) |
| |