blob: 350b4321e3b88a2b85f658755116334009e3d439 [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX512
define void @vp_add_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_add_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_add_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_add_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_sub_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_sub_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_sub_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_sub_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.sub.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_mul_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_mul_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_mul_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: pmuludq %xmm2, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movdqa %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_mul_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.mul.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_sdiv_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_sdiv_v4i32:
; X86: # %bb.0:
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm2
; X86-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; X86-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; X86-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; X86-NEXT: vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-NEXT: vextractps $1, %xmm1, %ecx
; X86-NEXT: vpextrd $1, %xmm0, %eax
; X86-NEXT: cltd
; X86-NEXT: idivl %ecx
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: vmovd %xmm1, %edi
; X86-NEXT: vmovd %xmm0, %eax
; X86-NEXT: cltd
; X86-NEXT: idivl %edi
; X86-NEXT: vmovd %eax, %xmm2
; X86-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; X86-NEXT: vpextrd $2, %xmm1, %ecx
; X86-NEXT: vpextrd $2, %xmm0, %eax
; X86-NEXT: cltd
; X86-NEXT: idivl %ecx
; X86-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; X86-NEXT: vpextrd $3, %xmm1, %ecx
; X86-NEXT: vpextrd $3, %xmm0, %eax
; X86-NEXT: cltd
; X86-NEXT: idivl %ecx
; X86-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: retl
;
; SSE-LABEL: vp_sdiv_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movd %esi, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: paddd %xmm2, %xmm1
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
; SSE-NEXT: movd %xmm2, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
; SSE-NEXT: movd %xmm2, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
; SSE-NEXT: movd %xmm3, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; SSE-NEXT: movd %xmm3, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: movd %eax, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE-NEXT: movd %xmm1, %ecx
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
; SSE-NEXT: movd %xmm1, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movdqa %xmm2, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: vp_sdiv_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX1-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vextractps $1, %xmm1, %ecx
; AVX1-NEXT: vpextrd $1, %xmm0, %eax
; AVX1-NEXT: cltd
; AVX1-NEXT: idivl %ecx
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: vmovd %xmm1, %esi
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: cltd
; AVX1-NEXT: idivl %esi
; AVX1-NEXT: vmovd %eax, %xmm2
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; AVX1-NEXT: vpextrd $2, %xmm0, %eax
; AVX1-NEXT: cltd
; AVX1-NEXT: idivl %ecx
; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
; AVX1-NEXT: vpextrd $3, %xmm0, %eax
; AVX1-NEXT: cltd
; AVX1-NEXT: idivl %ecx
; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: vp_sdiv_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %esi, %xmm2
; AVX2-NEXT: vpbroadcastd %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX2-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
; AVX2-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vextractps $1, %xmm1, %ecx
; AVX2-NEXT: vpextrd $1, %xmm0, %eax
; AVX2-NEXT: cltd
; AVX2-NEXT: idivl %ecx
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: vmovd %xmm1, %esi
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: cltd
; AVX2-NEXT: idivl %esi
; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
; AVX2-NEXT: vpextrd $2, %xmm0, %eax
; AVX2-NEXT: cltd
; AVX2-NEXT: idivl %ecx
; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
; AVX2-NEXT: vpextrd $3, %xmm0, %eax
; AVX2-NEXT: cltd
; AVX2-NEXT: idivl %ecx
; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: vp_sdiv_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %xmm2
; AVX512-NEXT: vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
; AVX512-NEXT: vmovdqa32 %xmm1, %xmm2 {%k1}
; AVX512-NEXT: vpextrd $1, %xmm2, %ecx
; AVX512-NEXT: vpextrd $1, %xmm0, %eax
; AVX512-NEXT: cltd
; AVX512-NEXT: idivl %ecx
; AVX512-NEXT: movl %eax, %ecx
; AVX512-NEXT: vmovd %xmm2, %esi
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: cltd
; AVX512-NEXT: idivl %esi
; AVX512-NEXT: vmovd %eax, %xmm1
; AVX512-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
; AVX512-NEXT: vpextrd $2, %xmm2, %ecx
; AVX512-NEXT: vpextrd $2, %xmm0, %eax
; AVX512-NEXT: cltd
; AVX512-NEXT: idivl %ecx
; AVX512-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpextrd $3, %xmm2, %ecx
; AVX512-NEXT: vpextrd $3, %xmm0, %eax
; AVX512-NEXT: cltd
; AVX512-NEXT: idivl %ecx
; AVX512-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512-NEXT: retq
%res = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_udiv_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_udiv_v4i32:
; X86: # %bb.0:
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm2
; X86-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; X86-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; X86-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; X86-NEXT: vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-NEXT: vextractps $1, %xmm1, %ecx
; X86-NEXT: vpextrd $1, %xmm0, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: divl %ecx
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: vmovd %xmm1, %edi
; X86-NEXT: vmovd %xmm0, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: divl %edi
; X86-NEXT: vmovd %eax, %xmm2
; X86-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; X86-NEXT: vpextrd $2, %xmm1, %ecx
; X86-NEXT: vpextrd $2, %xmm0, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: divl %ecx
; X86-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; X86-NEXT: vpextrd $3, %xmm1, %ecx
; X86-NEXT: vpextrd $3, %xmm0, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: divl %ecx
; X86-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: retl
;
; SSE-LABEL: vp_udiv_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movd %esi, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: paddd %xmm2, %xmm1
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
; SSE-NEXT: movd %xmm2, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
; SSE-NEXT: movd %xmm2, %eax
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
; SSE-NEXT: movd %xmm3, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; SSE-NEXT: movd %xmm3, %eax
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movd %eax, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE-NEXT: movd %xmm1, %ecx
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movd %eax, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
; SSE-NEXT: movd %xmm1, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movd %eax, %xmm0
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movdqa %xmm2, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: vp_udiv_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX1-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vextractps $1, %xmm1, %ecx
; AVX1-NEXT: vpextrd $1, %xmm0, %eax
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: movl %eax, %ecx
; AVX1-NEXT: vmovd %xmm1, %esi
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %esi
; AVX1-NEXT: vmovd %eax, %xmm2
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; AVX1-NEXT: vpextrd $2, %xmm0, %eax
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
; AVX1-NEXT: vpextrd $3, %xmm0, %eax
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: vp_udiv_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %esi, %xmm2
; AVX2-NEXT: vpbroadcastd %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX2-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
; AVX2-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vextractps $1, %xmm1, %ecx
; AVX2-NEXT: vpextrd $1, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: movl %eax, %ecx
; AVX2-NEXT: vmovd %xmm1, %esi
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %esi
; AVX2-NEXT: vmovd %eax, %xmm2
; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
; AVX2-NEXT: vpextrd $2, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
; AVX2-NEXT: vpextrd $3, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: vp_udiv_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %xmm2
; AVX512-NEXT: vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
; AVX512-NEXT: vmovdqa32 %xmm1, %xmm2 {%k1}
; AVX512-NEXT: vpextrd $1, %xmm2, %ecx
; AVX512-NEXT: vpextrd $1, %xmm0, %eax
; AVX512-NEXT: xorl %edx, %edx
; AVX512-NEXT: divl %ecx
; AVX512-NEXT: movl %eax, %ecx
; AVX512-NEXT: vmovd %xmm2, %esi
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: xorl %edx, %edx
; AVX512-NEXT: divl %esi
; AVX512-NEXT: vmovd %eax, %xmm1
; AVX512-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
; AVX512-NEXT: vpextrd $2, %xmm2, %ecx
; AVX512-NEXT: vpextrd $2, %xmm0, %eax
; AVX512-NEXT: xorl %edx, %edx
; AVX512-NEXT: divl %ecx
; AVX512-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; AVX512-NEXT: vpextrd $3, %xmm2, %ecx
; AVX512-NEXT: vpextrd $3, %xmm0, %eax
; AVX512-NEXT: xorl %edx, %edx
; AVX512-NEXT: divl %ecx
; AVX512-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512-NEXT: retq
%res = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_srem_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_srem_v4i32:
; X86: # %bb.0:
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm2
; X86-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; X86-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; X86-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; X86-NEXT: vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-NEXT: vextractps $1, %xmm1, %ecx
; X86-NEXT: vpextrd $1, %xmm0, %eax
; X86-NEXT: cltd
; X86-NEXT: idivl %ecx
; X86-NEXT: movl %edx, %ecx
; X86-NEXT: vmovd %xmm1, %edi
; X86-NEXT: vmovd %xmm0, %eax
; X86-NEXT: cltd
; X86-NEXT: idivl %edi
; X86-NEXT: vmovd %edx, %xmm2
; X86-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; X86-NEXT: vpextrd $2, %xmm1, %ecx
; X86-NEXT: vpextrd $2, %xmm0, %eax
; X86-NEXT: cltd
; X86-NEXT: idivl %ecx
; X86-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
; X86-NEXT: vpextrd $3, %xmm1, %ecx
; X86-NEXT: vpextrd $3, %xmm0, %eax
; X86-NEXT: cltd
; X86-NEXT: idivl %ecx
; X86-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: retl
;
; SSE-LABEL: vp_srem_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movd %esi, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: paddd %xmm2, %xmm1
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
; SSE-NEXT: movd %xmm2, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
; SSE-NEXT: movd %xmm2, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: movd %edx, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
; SSE-NEXT: movd %xmm3, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; SSE-NEXT: movd %xmm3, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: movd %edx, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE-NEXT: movd %xmm1, %ecx
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: movd %edx, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
; SSE-NEXT: movd %xmm1, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: cltd
; SSE-NEXT: idivl %ecx
; SSE-NEXT: movd %edx, %xmm0
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movdqa %xmm2, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: vp_srem_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX1-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vextractps $1, %xmm1, %ecx
; AVX1-NEXT: vpextrd $1, %xmm0, %eax
; AVX1-NEXT: cltd
; AVX1-NEXT: idivl %ecx
; AVX1-NEXT: movl %edx, %ecx
; AVX1-NEXT: vmovd %xmm1, %esi
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: cltd
; AVX1-NEXT: idivl %esi
; AVX1-NEXT: vmovd %edx, %xmm2
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; AVX1-NEXT: vpextrd $2, %xmm0, %eax
; AVX1-NEXT: cltd
; AVX1-NEXT: idivl %ecx
; AVX1-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
; AVX1-NEXT: vpextrd $3, %xmm0, %eax
; AVX1-NEXT: cltd
; AVX1-NEXT: idivl %ecx
; AVX1-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: vp_srem_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %esi, %xmm2
; AVX2-NEXT: vpbroadcastd %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX2-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
; AVX2-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vextractps $1, %xmm1, %ecx
; AVX2-NEXT: vpextrd $1, %xmm0, %eax
; AVX2-NEXT: cltd
; AVX2-NEXT: idivl %ecx
; AVX2-NEXT: movl %edx, %ecx
; AVX2-NEXT: vmovd %xmm1, %esi
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: cltd
; AVX2-NEXT: idivl %esi
; AVX2-NEXT: vmovd %edx, %xmm2
; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
; AVX2-NEXT: vpextrd $2, %xmm0, %eax
; AVX2-NEXT: cltd
; AVX2-NEXT: idivl %ecx
; AVX2-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
; AVX2-NEXT: vpextrd $3, %xmm0, %eax
; AVX2-NEXT: cltd
; AVX2-NEXT: idivl %ecx
; AVX2-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: vp_srem_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %xmm2
; AVX512-NEXT: vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
; AVX512-NEXT: vmovdqa32 %xmm1, %xmm2 {%k1}
; AVX512-NEXT: vpextrd $1, %xmm2, %ecx
; AVX512-NEXT: vpextrd $1, %xmm0, %eax
; AVX512-NEXT: cltd
; AVX512-NEXT: idivl %ecx
; AVX512-NEXT: movl %edx, %ecx
; AVX512-NEXT: vmovd %xmm2, %esi
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: cltd
; AVX512-NEXT: idivl %esi
; AVX512-NEXT: vmovd %edx, %xmm1
; AVX512-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
; AVX512-NEXT: vpextrd $2, %xmm2, %ecx
; AVX512-NEXT: vpextrd $2, %xmm0, %eax
; AVX512-NEXT: cltd
; AVX512-NEXT: idivl %ecx
; AVX512-NEXT: vpinsrd $2, %edx, %xmm1, %xmm1
; AVX512-NEXT: vpextrd $3, %xmm2, %ecx
; AVX512-NEXT: vpextrd $3, %xmm0, %eax
; AVX512-NEXT: cltd
; AVX512-NEXT: idivl %ecx
; AVX512-NEXT: vpinsrd $3, %edx, %xmm1, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512-NEXT: retq
%res = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.srem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_urem_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_urem_v4i32:
; X86: # %bb.0:
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm2
; X86-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; X86-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; X86-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; X86-NEXT: vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-NEXT: vextractps $1, %xmm1, %ecx
; X86-NEXT: vpextrd $1, %xmm0, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: divl %ecx
; X86-NEXT: movl %edx, %ecx
; X86-NEXT: vmovd %xmm1, %edi
; X86-NEXT: vmovd %xmm0, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: divl %edi
; X86-NEXT: vmovd %edx, %xmm2
; X86-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; X86-NEXT: vpextrd $2, %xmm1, %ecx
; X86-NEXT: vpextrd $2, %xmm0, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: divl %ecx
; X86-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
; X86-NEXT: vpextrd $3, %xmm1, %ecx
; X86-NEXT: vpextrd $3, %xmm0, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: divl %ecx
; X86-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%esi)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: retl
;
; SSE-LABEL: vp_urem_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movd %esi, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; SSE-NEXT: pxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: paddd %xmm2, %xmm1
; SSE-NEXT: pcmpeqd %xmm2, %xmm2
; SSE-NEXT: psubd %xmm2, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3]
; SSE-NEXT: movd %xmm2, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,3,3,3]
; SSE-NEXT: movd %xmm2, %eax
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movd %edx, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3]
; SSE-NEXT: movd %xmm3, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; SSE-NEXT: movd %xmm3, %eax
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movd %edx, %xmm3
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE-NEXT: movd %xmm1, %ecx
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movd %edx, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1]
; SSE-NEXT: movd %xmm1, %ecx
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movd %edx, %xmm0
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: movdqa %xmm2, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: vp_urem_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovd %esi, %xmm2
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,0,0,0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX1-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vblendvps %xmm2, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vextractps $1, %xmm1, %ecx
; AVX1-NEXT: vpextrd $1, %xmm0, %eax
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: movl %edx, %ecx
; AVX1-NEXT: vmovd %xmm1, %esi
; AVX1-NEXT: vmovd %xmm0, %eax
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %esi
; AVX1-NEXT: vmovd %edx, %xmm2
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; AVX1-NEXT: vpextrd $2, %xmm0, %eax
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
; AVX1-NEXT: vpextrd $3, %xmm1, %ecx
; AVX1-NEXT: vpextrd $3, %xmm0, %eax
; AVX1-NEXT: xorl %edx, %edx
; AVX1-NEXT: divl %ecx
; AVX1-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: vp_urem_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovd %esi, %xmm2
; AVX2-NEXT: vpbroadcastd %xmm2, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3]
; AVX2-NEXT: vpmaxud %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [1,1,1,1]
; AVX2-NEXT: vblendvps %xmm2, %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vextractps $1, %xmm1, %ecx
; AVX2-NEXT: vpextrd $1, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: movl %edx, %ecx
; AVX2-NEXT: vmovd %xmm1, %esi
; AVX2-NEXT: vmovd %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %esi
; AVX2-NEXT: vmovd %edx, %xmm2
; AVX2-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $2, %xmm1, %ecx
; AVX2-NEXT: vpextrd $2, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
; AVX2-NEXT: vpextrd $3, %xmm1, %ecx
; AVX2-NEXT: vpextrd $3, %xmm0, %eax
; AVX2-NEXT: xorl %edx, %edx
; AVX2-NEXT: divl %ecx
; AVX2-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: vp_urem_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpbroadcastd %esi, %xmm2
; AVX512-NEXT: vpcmpnleud {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %k1
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1]
; AVX512-NEXT: vmovdqa32 %xmm1, %xmm2 {%k1}
; AVX512-NEXT: vpextrd $1, %xmm2, %ecx
; AVX512-NEXT: vpextrd $1, %xmm0, %eax
; AVX512-NEXT: xorl %edx, %edx
; AVX512-NEXT: divl %ecx
; AVX512-NEXT: movl %edx, %ecx
; AVX512-NEXT: vmovd %xmm2, %esi
; AVX512-NEXT: vmovd %xmm0, %eax
; AVX512-NEXT: xorl %edx, %edx
; AVX512-NEXT: divl %esi
; AVX512-NEXT: vmovd %edx, %xmm1
; AVX512-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
; AVX512-NEXT: vpextrd $2, %xmm2, %ecx
; AVX512-NEXT: vpextrd $2, %xmm0, %eax
; AVX512-NEXT: xorl %edx, %edx
; AVX512-NEXT: divl %ecx
; AVX512-NEXT: vpinsrd $2, %edx, %xmm1, %xmm1
; AVX512-NEXT: vpextrd $3, %xmm2, %ecx
; AVX512-NEXT: vpextrd $3, %xmm0, %eax
; AVX512-NEXT: xorl %edx, %edx
; AVX512-NEXT: divl %ecx
; AVX512-NEXT: vpinsrd $3, %edx, %xmm1, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512-NEXT: retq
%res = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.urem.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_ashr_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_ashr_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-NEXT: vpsrad %xmm2, %xmm0, %xmm2
; X86-NEXT: vpsrlq $32, %xmm1, %xmm3
; X86-NEXT: vpsrad %xmm3, %xmm0, %xmm3
; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; X86-NEXT: vpxor %xmm3, %xmm3, %xmm3
; X86-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; X86-NEXT: vpsrad %xmm3, %xmm0, %xmm3
; X86-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; X86-NEXT: vpsrad %xmm1, %xmm0, %xmm0
; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_ashr_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrad %xmm2, %xmm3
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrad %xmm2, %xmm4
; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrad %xmm2, %xmm3
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
; SSE-NEXT: psrad %xmm1, %xmm0
; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm3[1]
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,3]
; SSE-NEXT: movaps %xmm4, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: vp_ashr_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrad %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; AVX1-NEXT: vpsrad %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: vp_ashr_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: vp_ashr_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512-NEXT: retq
%res = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_lshr_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_lshr_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-NEXT: vpsrld %xmm2, %xmm0, %xmm2
; X86-NEXT: vpsrlq $32, %xmm1, %xmm3
; X86-NEXT: vpsrld %xmm3, %xmm0, %xmm3
; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; X86-NEXT: vpxor %xmm3, %xmm3, %xmm3
; X86-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; X86-NEXT: vpsrld %xmm3, %xmm0, %xmm3
; X86-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; X86-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_lshr_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[0,1,1,1,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrld %xmm2, %xmm4
; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
; SSE-NEXT: psrld %xmm1, %xmm0
; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm3[1]
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,3]
; SSE-NEXT: movaps %xmm4, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: vp_lshr_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrldq {{.*#+}} xmm2 = xmm1[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; AVX1-NEXT: vpsrld %xmm2, %xmm0, %xmm2
; AVX1-NEXT: vpsrlq $32, %xmm1, %xmm3
; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7]
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm3 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; AVX1-NEXT: vpsrld %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: vp_lshr_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: vp_lshr_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512-NEXT: retq
%res = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_shl_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_shl_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpslld $23, %xmm1, %xmm1
; X86-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1
; X86-NEXT: vcvttps2dq %xmm1, %xmm1
; X86-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_shl_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE-NEXT: pmuludq %xmm1, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE-NEXT: pmuludq %xmm2, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movdqa %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX1-LABEL: vp_shl_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
; AVX1-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, (%rdi)
; AVX1-NEXT: retq
;
; AVX2-LABEL: vp_shl_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa %xmm0, (%rdi)
; AVX2-NEXT: retq
;
; AVX512-LABEL: vp_shl_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa %xmm0, (%rdi)
; AVX512-NEXT: retq
%res = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.shl.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_or_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_or_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vorps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_or_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: movaps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_or_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.or.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_and_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_and_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vorps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_and_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: orps %xmm1, %xmm0
; SSE-NEXT: movaps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_and_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.and.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_xor_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_xor_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vxorps %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovaps %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_xor_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm0
; SSE-NEXT: movaps %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_xor_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovaps %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.xor.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_abs_v4i32(<4 x i32> %a0, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_abs_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpabsd %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_abs_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrad $31, %xmm1
; SSE-NEXT: pxor %xmm1, %xmm0
; SSE-NEXT: psubd %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_abs_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpabsd %xmm0, %xmm0
; AVX-NEXT: vmovdqa %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %a0, i1 false, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.abs.v4i32(<4 x i32>, i1 immarg, <4 x i1>, i32)
define void @vp_smax_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_smax_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_smax_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pcmpgtd %xmm1, %xmm2
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_smax_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.smax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_smin_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_smin_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_smin_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: pcmpgtd %xmm0, %xmm2
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_smin_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpminsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.smin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_umax_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_umax_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_umax_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pxor %xmm2, %xmm3
; SSE-NEXT: pxor %xmm0, %xmm2
; SSE-NEXT: pcmpgtd %xmm3, %xmm2
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_umax_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.umax.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @vp_umin_v4i32(<4 x i32> %a0, <4 x i32> %a1, ptr %out, i32 %vp) nounwind {
; X86-LABEL: vp_umin_v4i32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpminud %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovdqa %xmm0, (%eax)
; X86-NEXT: retl
;
; SSE-LABEL: vp_umin_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pxor %xmm2, %xmm3
; SSE-NEXT: pxor %xmm1, %xmm2
; SSE-NEXT: pcmpgtd %xmm3, %xmm2
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, (%rdi)
; SSE-NEXT: retq
;
; AVX-LABEL: vp_umin_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpminud %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovdqa %xmm0, (%rdi)
; AVX-NEXT: retq
%res = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %a0, <4 x i32> %a1, <4 x i1> <i1 -1, i1 -1, i1 -1, i1 -1>, i32 %vp)
store <4 x i32> %res, ptr %out
ret void
}
declare <4 x i32> @llvm.vp.umin.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define <4 x i32> @vp_bitreverse_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; X86-LABEL: vp_bitreverse_v4i32:
; X86: # %bb.0:
; X86-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X86-NEXT: vbroadcastss {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; X86-NEXT: vpand %xmm1, %xmm0, %xmm2
; X86-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; X86-NEXT: vpshufb %xmm2, %xmm3, %xmm2
; X86-NEXT: vpsrlw $4, %xmm0, %xmm0
; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; X86-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; X86-NEXT: vpor %xmm0, %xmm2, %xmm0
; X86-NEXT: retl
;
; SSE-LABEL: vp_bitreverse_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE-NEXT: packuswb %xmm2, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrlw $4, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: psllw $4, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrlw $2, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: psllw $2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm0, %xmm1
; SSE-NEXT: psrlw $1, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: paddb %xmm0, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-LABEL: vp_bitreverse_v4i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX1-NEXT: vbroadcastss {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: vp_bitreverse_v4i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX2-NEXT: vpshufb %xmm2, %xmm3, %xmm2
; AVX2-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX2-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: vp_bitreverse_v4i32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm2
; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX512-NEXT: vpshufb %xmm2, %xmm3, %xmm2
; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX512-NEXT: vpshufb %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vpor %xmm0, %xmm2, %xmm0
; AVX512-NEXT: retq
%v = call <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl)
ret <4 x i32> %v
}
declare <4 x i32> @llvm.vp.bitreverse.v4i32(<4 x i32>, <4 x i1>, i32)
define <4 x i32> @vp_bswap_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; X86-LABEL: vp_bswap_v4i32:
; X86: # %bb.0:
; X86-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; X86-NEXT: retl
;
; SSE-LABEL: vp_bswap_v4i32:
; SSE: # %bb.0:
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
; SSE-NEXT: packuswb %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: vp_bswap_v4i32:
; AVX: # %bb.0:
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12]
; AVX-NEXT: retq
%v = call <4 x i32> @llvm.vp.bswap.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl)
ret <4 x i32> %v
}
declare <4 x i32> @llvm.vp.bswap.v4i32(<4 x i32>, <4 x i1>, i32)