|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+sse2 -show-mc-encoding | FileCheck %s --check-prefixes=SSE,X86-SSE | 
|  | ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=AVX1,X86-AVX1 | 
|  | ; RUN: llc < %s -disable-peephole -mtriple=i386-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=AVX512,X86-AVX512 | 
|  | ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+sse2 -show-mc-encoding | FileCheck %s --check-prefixes=SSE,X64-SSE | 
|  | ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=AVX1,X64-AVX1 | 
|  | ; RUN: llc < %s -disable-peephole -mtriple=x86_64-apple-darwin -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=AVX512,X64-AVX512 | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_sqrt_pd(<2 x double> %a0) { | 
|  | ; SSE-LABEL: test_x86_sse2_sqrt_pd: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    sqrtpd %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x51,0xc0] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_sqrt_pd: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vsqrtpd %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x51,0xc0] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_sqrt_pd: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vsqrtpd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x51,0xc0] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double> %a0) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.sqrt.pd(<2 x double>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_sqrt_sd(<2 x double> %a0) { | 
|  | ; SSE-LABEL: test_x86_sse2_sqrt_sd: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_sqrt_sd: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_sqrt_sd: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a0) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_sqrt_sd_vec_load(ptr %a0) { | 
|  | ; X86-SSE-LABEL: test_x86_sse2_sqrt_sd_vec_load: | 
|  | ; X86-SSE:       ## %bb.0: | 
|  | ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-SSE-NEXT:    movapd (%eax), %xmm0 ## encoding: [0x66,0x0f,0x28,0x00] | 
|  | ; X86-SSE-NEXT:    sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0] | 
|  | ; X86-SSE-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX1-LABEL: test_x86_sse2_sqrt_sd_vec_load: | 
|  | ; X86-AVX1:       ## %bb.0: | 
|  | ; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX1-NEXT:    vmovapd (%eax), %xmm0 ## encoding: [0xc5,0xf9,0x28,0x00] | 
|  | ; X86-AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0] | 
|  | ; X86-AVX1-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX512-LABEL: test_x86_sse2_sqrt_sd_vec_load: | 
|  | ; X86-AVX512:       ## %bb.0: | 
|  | ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX512-NEXT:    vmovapd (%eax), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x00] | 
|  | ; X86-AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0] | 
|  | ; X86-AVX512-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-SSE-LABEL: test_x86_sse2_sqrt_sd_vec_load: | 
|  | ; X64-SSE:       ## %bb.0: | 
|  | ; X64-SSE-NEXT:    movapd (%rdi), %xmm0 ## encoding: [0x66,0x0f,0x28,0x07] | 
|  | ; X64-SSE-NEXT:    sqrtsd %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x51,0xc0] | 
|  | ; X64-SSE-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX1-LABEL: test_x86_sse2_sqrt_sd_vec_load: | 
|  | ; X64-AVX1:       ## %bb.0: | 
|  | ; X64-AVX1-NEXT:    vmovapd (%rdi), %xmm0 ## encoding: [0xc5,0xf9,0x28,0x07] | 
|  | ; X64-AVX1-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x51,0xc0] | 
|  | ; X64-AVX1-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX512-LABEL: test_x86_sse2_sqrt_sd_vec_load: | 
|  | ; X64-AVX512:       ## %bb.0: | 
|  | ; X64-AVX512-NEXT:    vmovapd (%rdi), %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0x07] | 
|  | ; X64-AVX512-NEXT:    vsqrtsd %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x51,0xc0] | 
|  | ; X64-AVX512-NEXT:    retq ## encoding: [0xc3] | 
|  | %a1 = load <2 x double>, ptr %a0, align 16 | 
|  | %res = call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %a1) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  |  | 
|  |  | 
|  | define <2 x i64> @test_x86_sse2_psll_dq_bs(<2 x i64> %a0) { | 
|  | ; SSE-LABEL: test_x86_sse2_psll_dq_bs: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    pslldq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xf8,0x07] | 
|  | ; SSE-NEXT:    ## xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_psll_dq_bs: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpslldq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xf8,0x07] | 
|  | ; AVX1-NEXT:    ## xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_psll_dq_bs: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpslldq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf8,0x07] | 
|  | ; AVX512-NEXT:    ## xmm0 = zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7,8] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1] | 
|  | ret <2 x i64> %res | 
|  | } | 
|  | declare <2 x i64> @llvm.x86.sse2.psll.dq.bs(<2 x i64>, i32) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x i64> @test_x86_sse2_psrl_dq_bs(<2 x i64> %a0) { | 
|  | ; SSE-LABEL: test_x86_sse2_psrl_dq_bs: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    psrldq $7, %xmm0 ## encoding: [0x66,0x0f,0x73,0xd8,0x07] | 
|  | ; SSE-NEXT:    ## xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_psrl_dq_bs: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpsrldq $7, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xd8,0x07] | 
|  | ; AVX1-NEXT:    ## xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_psrl_dq_bs: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpsrldq $7, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd8,0x07] | 
|  | ; AVX512-NEXT:    ## xmm0 = xmm0[7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64> %a0, i32 7) ; <<2 x i64>> [#uses=1] | 
|  | ret <2 x i64> %res | 
|  | } | 
|  | declare <2 x i64> @llvm.x86.sse2.psrl.dq.bs(<2 x i64>, i32) nounwind readnone | 
|  |  | 
|  | define <2 x i64> @test_x86_sse2_psll_dq(<2 x i64> %a0) { | 
|  | ; SSE-LABEL: test_x86_sse2_psll_dq: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    pslldq $1, %xmm0 ## encoding: [0x66,0x0f,0x73,0xf8,0x01] | 
|  | ; SSE-NEXT:    ## xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_psll_dq: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpslldq $1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xf8,0x01] | 
|  | ; AVX1-NEXT:    ## xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_psll_dq: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpslldq $1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xf8,0x01] | 
|  | ; AVX512-NEXT:    ## xmm0 = zero,xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1] | 
|  | ret <2 x i64> %res | 
|  | } | 
|  | declare <2 x i64> @llvm.x86.sse2.psll.dq(<2 x i64>, i32) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x i64> @test_x86_sse2_psrl_dq(<2 x i64> %a0) { | 
|  | ; SSE-LABEL: test_x86_sse2_psrl_dq: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    psrldq $1, %xmm0 ## encoding: [0x66,0x0f,0x73,0xd8,0x01] | 
|  | ; SSE-NEXT:    ## xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_psrl_dq: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpsrldq $1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x73,0xd8,0x01] | 
|  | ; AVX1-NEXT:    ## xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_psrl_dq: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpsrldq $1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x73,0xd8,0x01] | 
|  | ; AVX512-NEXT:    ## xmm0 = xmm0[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64> %a0, i32 8) ; <<2 x i64>> [#uses=1] | 
|  | ret <2 x i64> %res | 
|  | } | 
|  | declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_cvtdq2pd(<4 x i32> %a0) { | 
|  | ; SSE-LABEL: test_x86_sse2_cvtdq2pd: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    cvtdq2pd %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0xe6,0xc0] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_cvtdq2pd: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vcvtdq2pd %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0xe6,0xc0] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_cvtdq2pd: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vcvtdq2pd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0xe6,0xc0] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32> %a0) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.cvtdq2pd(<4 x i32>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_cvtps2pd(<4 x float> %a0) { | 
|  | ; SSE-LABEL: test_x86_sse2_cvtps2pd: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    cvtps2pd %xmm0, %xmm0 ## encoding: [0x0f,0x5a,0xc0] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_cvtps2pd: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vcvtps2pd %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5a,0xc0] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_cvtps2pd: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vcvtps2pd %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0xc0] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float> %a0) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.cvtps2pd(<4 x float>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define void @test_x86_sse2_storel_dq(ptr %a0, <4 x i32> %a1) { | 
|  | ; X86-SSE-LABEL: test_x86_sse2_storel_dq: | 
|  | ; X86-SSE:       ## %bb.0: | 
|  | ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-SSE-NEXT:    movlps %xmm0, (%eax) ## encoding: [0x0f,0x13,0x00] | 
|  | ; X86-SSE-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX1-LABEL: test_x86_sse2_storel_dq: | 
|  | ; X86-AVX1:       ## %bb.0: | 
|  | ; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX1-NEXT:    vmovlps %xmm0, (%eax) ## encoding: [0xc5,0xf8,0x13,0x00] | 
|  | ; X86-AVX1-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX512-LABEL: test_x86_sse2_storel_dq: | 
|  | ; X86-AVX512:       ## %bb.0: | 
|  | ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX512-NEXT:    vmovlps %xmm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x00] | 
|  | ; X86-AVX512-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-SSE-LABEL: test_x86_sse2_storel_dq: | 
|  | ; X64-SSE:       ## %bb.0: | 
|  | ; X64-SSE-NEXT:    movlps %xmm0, (%rdi) ## encoding: [0x0f,0x13,0x07] | 
|  | ; X64-SSE-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX1-LABEL: test_x86_sse2_storel_dq: | 
|  | ; X64-AVX1:       ## %bb.0: | 
|  | ; X64-AVX1-NEXT:    vmovlps %xmm0, (%rdi) ## encoding: [0xc5,0xf8,0x13,0x07] | 
|  | ; X64-AVX1-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX512-LABEL: test_x86_sse2_storel_dq: | 
|  | ; X64-AVX512:       ## %bb.0: | 
|  | ; X64-AVX512-NEXT:    vmovlps %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x13,0x07] | 
|  | ; X64-AVX512-NEXT:    retq ## encoding: [0xc3] | 
|  | call void @llvm.x86.sse2.storel.dq(ptr %a0, <4 x i32> %a1) | 
|  | ret void | 
|  | } | 
|  | declare void @llvm.x86.sse2.storel.dq(ptr, <4 x i32>) nounwind | 
|  |  | 
|  |  | 
|  | define void @test_x86_sse2_storeu_dq(ptr %a0, <16 x i8> %a1) { | 
|  | ; add operation forces the execution domain. | 
|  | ; X86-SSE-LABEL: test_x86_sse2_storeu_dq: | 
|  | ; X86-SSE:       ## %bb.0: | 
|  | ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-SSE-NEXT:    pcmpeqd %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x76,0xc9] | 
|  | ; X86-SSE-NEXT:    psubb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf8,0xc1] | 
|  | ; X86-SSE-NEXT:    movdqu %xmm0, (%eax) ## encoding: [0xf3,0x0f,0x7f,0x00] | 
|  | ; X86-SSE-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX1-LABEL: test_x86_sse2_storeu_dq: | 
|  | ; X86-AVX1:       ## %bb.0: | 
|  | ; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9] | 
|  | ; X86-AVX1-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf8,0xc1] | 
|  | ; X86-AVX1-NEXT:    vmovdqu %xmm0, (%eax) ## encoding: [0xc5,0xfa,0x7f,0x00] | 
|  | ; X86-AVX1-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX512-LABEL: test_x86_sse2_storeu_dq: | 
|  | ; X86-AVX512:       ## %bb.0: | 
|  | ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9] | 
|  | ; X86-AVX512-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1] | 
|  | ; X86-AVX512-NEXT:    vmovdqu %xmm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x00] | 
|  | ; X86-AVX512-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-SSE-LABEL: test_x86_sse2_storeu_dq: | 
|  | ; X64-SSE:       ## %bb.0: | 
|  | ; X64-SSE-NEXT:    pcmpeqd %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x76,0xc9] | 
|  | ; X64-SSE-NEXT:    psubb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf8,0xc1] | 
|  | ; X64-SSE-NEXT:    movdqu %xmm0, (%rdi) ## encoding: [0xf3,0x0f,0x7f,0x07] | 
|  | ; X64-SSE-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX1-LABEL: test_x86_sse2_storeu_dq: | 
|  | ; X64-AVX1:       ## %bb.0: | 
|  | ; X64-AVX1-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9] | 
|  | ; X64-AVX1-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf8,0xc1] | 
|  | ; X64-AVX1-NEXT:    vmovdqu %xmm0, (%rdi) ## encoding: [0xc5,0xfa,0x7f,0x07] | 
|  | ; X64-AVX1-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX512-LABEL: test_x86_sse2_storeu_dq: | 
|  | ; X64-AVX512:       ## %bb.0: | 
|  | ; X64-AVX512-NEXT:    vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9] | 
|  | ; X64-AVX512-NEXT:    vpsubb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf8,0xc1] | 
|  | ; X64-AVX512-NEXT:    vmovdqu %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7f,0x07] | 
|  | ; X64-AVX512-NEXT:    retq ## encoding: [0xc3] | 
|  | %a2 = add <16 x i8> %a1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> | 
|  | call void @llvm.x86.sse2.storeu.dq(ptr %a0, <16 x i8> %a2) | 
|  | ret void | 
|  | } | 
|  | declare void @llvm.x86.sse2.storeu.dq(ptr, <16 x i8>) nounwind | 
|  |  | 
|  |  | 
|  | define void @test_x86_sse2_storeu_pd(ptr %a0, <2 x double> %a1) { | 
|  | ; fadd operation forces the execution domain. | 
|  | ; X86-SSE-LABEL: test_x86_sse2_storeu_pd: | 
|  | ; X86-SSE:       ## %bb.0: | 
|  | ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-SSE-NEXT:    xorpd %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x57,0xc9] | 
|  | ; X86-SSE-NEXT:    movhpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1 ## encoding: [0x66,0x0f,0x16,0x0d,A,A,A,A] | 
|  | ; X86-SSE-NEXT:    ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 | 
|  | ; X86-SSE-NEXT:    ## xmm1 = xmm1[0],mem[0] | 
|  | ; X86-SSE-NEXT:    addpd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x58,0xc8] | 
|  | ; X86-SSE-NEXT:    movupd %xmm1, (%eax) ## encoding: [0x66,0x0f,0x11,0x08] | 
|  | ; X86-SSE-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX1-LABEL: test_x86_sse2_storeu_pd: | 
|  | ; X86-AVX1:       ## %bb.0: | 
|  | ; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9] | 
|  | ; X86-AVX1-NEXT:    vmovhpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] | 
|  | ; X86-AVX1-NEXT:    ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 | 
|  | ; X86-AVX1-NEXT:    ## xmm1 = xmm1[0],mem[0] | 
|  | ; X86-AVX1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x58,0xc1] | 
|  | ; X86-AVX1-NEXT:    vmovupd %xmm0, (%eax) ## encoding: [0xc5,0xf9,0x11,0x00] | 
|  | ; X86-AVX1-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX512-LABEL: test_x86_sse2_storeu_pd: | 
|  | ; X86-AVX512:       ## %bb.0: | 
|  | ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX512-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9] | 
|  | ; X86-AVX512-NEXT:    vmovhpd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] | 
|  | ; X86-AVX512-NEXT:    ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: FK_Data_4 | 
|  | ; X86-AVX512-NEXT:    ## xmm1 = xmm1[0],mem[0] | 
|  | ; X86-AVX512-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1] | 
|  | ; X86-AVX512-NEXT:    vmovupd %xmm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x00] | 
|  | ; X86-AVX512-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-SSE-LABEL: test_x86_sse2_storeu_pd: | 
|  | ; X64-SSE:       ## %bb.0: | 
|  | ; X64-SSE-NEXT:    xorpd %xmm1, %xmm1 ## encoding: [0x66,0x0f,0x57,0xc9] | 
|  | ; X64-SSE-NEXT:    movhpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ## encoding: [0x66,0x0f,0x16,0x0d,A,A,A,A] | 
|  | ; X64-SSE-NEXT:    ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: reloc_riprel_4byte | 
|  | ; X64-SSE-NEXT:    ## xmm1 = xmm1[0],mem[0] | 
|  | ; X64-SSE-NEXT:    addpd %xmm0, %xmm1 ## encoding: [0x66,0x0f,0x58,0xc8] | 
|  | ; X64-SSE-NEXT:    movupd %xmm1, (%rdi) ## encoding: [0x66,0x0f,0x11,0x0f] | 
|  | ; X64-SSE-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX1-LABEL: test_x86_sse2_storeu_pd: | 
|  | ; X64-AVX1:       ## %bb.0: | 
|  | ; X64-AVX1-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x57,0xc9] | 
|  | ; X64-AVX1-NEXT:    vmovhpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] | 
|  | ; X64-AVX1-NEXT:    ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: reloc_riprel_4byte | 
|  | ; X64-AVX1-NEXT:    ## xmm1 = xmm1[0],mem[0] | 
|  | ; X64-AVX1-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0x58,0xc1] | 
|  | ; X64-AVX1-NEXT:    vmovupd %xmm0, (%rdi) ## encoding: [0xc5,0xf9,0x11,0x07] | 
|  | ; X64-AVX1-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX512-LABEL: test_x86_sse2_storeu_pd: | 
|  | ; X64-AVX512:       ## %bb.0: | 
|  | ; X64-AVX512-NEXT:    vxorpd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x57,0xc9] | 
|  | ; X64-AVX512-NEXT:    vmovhpd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x16,0x0d,A,A,A,A] | 
|  | ; X64-AVX512-NEXT:    ## fixup A - offset: 4, value: {{\.?LCPI[0-9]+_[0-9]+}}, kind: reloc_riprel_4byte | 
|  | ; X64-AVX512-NEXT:    ## xmm1 = xmm1[0],mem[0] | 
|  | ; X64-AVX512-NEXT:    vaddpd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc1] | 
|  | ; X64-AVX512-NEXT:    vmovupd %xmm0, (%rdi) ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x11,0x07] | 
|  | ; X64-AVX512-NEXT:    retq ## encoding: [0xc3] | 
|  | %a2 = fadd <2 x double> %a1, <double 0x0, double 0x4200000000000000> | 
|  | call void @llvm.x86.sse2.storeu.pd(ptr %a0, <2 x double> %a2) | 
|  | ret void | 
|  | } | 
|  | declare void @llvm.x86.sse2.storeu.pd(ptr, <2 x double>) nounwind | 
|  |  | 
|  | define <4 x i32> @test_x86_sse2_pshuf_d(<4 x i32> %a) { | 
|  | ; SSE-LABEL: test_x86_sse2_pshuf_d: | 
|  | ; SSE:       ## %bb.0: ## %entry | 
|  | ; SSE-NEXT:    pshufd $27, %xmm0, %xmm0 ## encoding: [0x66,0x0f,0x70,0xc0,0x1b] | 
|  | ; SSE-NEXT:    ## xmm0 = xmm0[3,2,1,0] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_pshuf_d: | 
|  | ; AVX1:       ## %bb.0: ## %entry | 
|  | ; AVX1-NEXT:    vshufps $27, %xmm0, %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0xc6,0xc0,0x1b] | 
|  | ; AVX1-NEXT:    ## xmm0 = xmm0[3,2,1,0] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_pshuf_d: | 
|  | ; AVX512:       ## %bb.0: ## %entry | 
|  | ; AVX512-NEXT:    vshufps $27, %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0xc6,0xc0,0x1b] | 
|  | ; AVX512-NEXT:    ## xmm0 = xmm0[3,2,1,0] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | entry: | 
|  | %res = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27) nounwind readnone | 
|  | ret <4 x i32> %res | 
|  | } | 
|  | declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8) nounwind readnone | 
|  |  | 
|  | define <8 x i16> @test_x86_sse2_pshufl_w(<8 x i16> %a) { | 
|  | ; SSE-LABEL: test_x86_sse2_pshufl_w: | 
|  | ; SSE:       ## %bb.0: ## %entry | 
|  | ; SSE-NEXT:    pshuflw $27, %xmm0, %xmm0 ## encoding: [0xf2,0x0f,0x70,0xc0,0x1b] | 
|  | ; SSE-NEXT:    ## xmm0 = xmm0[3,2,1,0,4,5,6,7] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_pshufl_w: | 
|  | ; AVX1:       ## %bb.0: ## %entry | 
|  | ; AVX1-NEXT:    vpshuflw $27, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x70,0xc0,0x1b] | 
|  | ; AVX1-NEXT:    ## xmm0 = xmm0[3,2,1,0,4,5,6,7] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_pshufl_w: | 
|  | ; AVX512:       ## %bb.0: ## %entry | 
|  | ; AVX512-NEXT:    vpshuflw $27, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x70,0xc0,0x1b] | 
|  | ; AVX512-NEXT:    ## xmm0 = xmm0[3,2,1,0,4,5,6,7] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | entry: | 
|  | %res = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27) nounwind readnone | 
|  | ret <8 x i16> %res | 
|  | } | 
|  | declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8) nounwind readnone | 
|  |  | 
|  | define <8 x i16> @test_x86_sse2_pshufh_w(<8 x i16> %a) { | 
|  | ; SSE-LABEL: test_x86_sse2_pshufh_w: | 
|  | ; SSE:       ## %bb.0: ## %entry | 
|  | ; SSE-NEXT:    pshufhw $27, %xmm0, %xmm0 ## encoding: [0xf3,0x0f,0x70,0xc0,0x1b] | 
|  | ; SSE-NEXT:    ## xmm0 = xmm0[0,1,2,3,7,6,5,4] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_pshufh_w: | 
|  | ; AVX1:       ## %bb.0: ## %entry | 
|  | ; AVX1-NEXT:    vpshufhw $27, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x70,0xc0,0x1b] | 
|  | ; AVX1-NEXT:    ## xmm0 = xmm0[0,1,2,3,7,6,5,4] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_pshufh_w: | 
|  | ; AVX512:       ## %bb.0: ## %entry | 
|  | ; AVX512-NEXT:    vpshufhw $27, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x70,0xc0,0x1b] | 
|  | ; AVX512-NEXT:    ## xmm0 = xmm0[0,1,2,3,7,6,5,4] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | entry: | 
|  | %res = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27) nounwind readnone | 
|  | ret <8 x i16> %res | 
|  | } | 
|  | declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8) nounwind readnone | 
|  |  | 
|  | define <16 x i8> @max_epu8(<16 x i8> %a0, <16 x i8> %a1) { | 
|  | ; SSE-LABEL: max_epu8: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    pmaxub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xde,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: max_epu8: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xde,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: max_epu8: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpmaxub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xde,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8> %a0, <16 x i8> %a1) | 
|  | ret <16 x i8> %res | 
|  | } | 
|  | declare <16 x i8> @llvm.x86.sse2.pmaxu.b(<16 x i8>, <16 x i8>) nounwind readnone | 
|  |  | 
|  | define <16 x i8> @min_epu8(<16 x i8> %a0, <16 x i8> %a1) { | 
|  | ; SSE-LABEL: min_epu8: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    pminub %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xda,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: min_epu8: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xda,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: min_epu8: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpminub %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xda,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8> %a0, <16 x i8> %a1) | 
|  | ret <16 x i8> %res | 
|  | } | 
|  | declare <16 x i8> @llvm.x86.sse2.pminu.b(<16 x i8>, <16 x i8>) nounwind readnone | 
|  |  | 
|  | define <8 x i16> @max_epi16(<8 x i16> %a0, <8 x i16> %a1) { | 
|  | ; SSE-LABEL: max_epi16: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    pmaxsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xee,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: max_epi16: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xee,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: max_epi16: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpmaxsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xee,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16> %a0, <8 x i16> %a1) | 
|  | ret <8 x i16> %res | 
|  | } | 
|  | declare <8 x i16> @llvm.x86.sse2.pmaxs.w(<8 x i16>, <8 x i16>) nounwind readnone | 
|  |  | 
|  | define <8 x i16> @min_epi16(<8 x i16> %a0, <8 x i16> %a1) { | 
|  | ; SSE-LABEL: min_epi16: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    pminsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xea,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: min_epi16: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xea,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: min_epi16: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpminsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xea,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16> %a0, <8 x i16> %a1) | 
|  | ret <8 x i16> %res | 
|  | } | 
|  | declare <8 x i16> @llvm.x86.sse2.pmins.w(<8 x i16>, <8 x i16>) nounwind readnone | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_add_sd(<2 x double> %a0, <2 x double> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_add_sd: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    addsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x58,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_add_sd: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x58,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_add_sd: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vaddsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x58,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.add.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.add.sd(<2 x double>, <2 x double>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_sub_sd(<2 x double> %a0, <2 x double> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_sub_sd: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    subsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5c,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_sub_sd: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5c,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_sub_sd: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vsubsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5c,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.sub.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.sub.sd(<2 x double>, <2 x double>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_mul_sd(<2 x double> %a0, <2 x double> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_mul_sd: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    mulsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x59,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_mul_sd: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x59,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_mul_sd: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vmulsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x59,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.mul.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.mul.sd(<2 x double>, <2 x double>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_div_sd(<2 x double> %a0, <2 x double> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_div_sd: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    divsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x5e,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_div_sd: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vdivsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x5e,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_div_sd: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vdivsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x5e,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.div.sd(<2 x double> %a0, <2 x double> %a1) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x i64> @test_x86_sse2_pmulu_dq(<4 x i32> %a0, <4 x i32> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_pmulu_dq: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    pmuludq %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xf4,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_pmulu_dq: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xf4,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_pmulu_dq: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpmuludq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xf4,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32> %a0, <4 x i32> %a1) ; <<2 x i64>> [#uses=1] | 
|  | ret <2 x i64> %res | 
|  | } | 
|  | declare <2 x i64> @llvm.x86.sse2.pmulu.dq(<4 x i32>, <4 x i32>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_cvtsi2sd(<2 x double> %a0, i32 %a1) { | 
|  | ; X86-SSE-LABEL: test_x86_sse2_cvtsi2sd: | 
|  | ; X86-SSE:       ## %bb.0: | 
|  | ; X86-SSE-NEXT:    cvtsi2sdl {{[0-9]+}}(%esp), %xmm0 ## encoding: [0xf2,0x0f,0x2a,0x44,0x24,0x04] | 
|  | ; X86-SSE-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX1-LABEL: test_x86_sse2_cvtsi2sd: | 
|  | ; X86-AVX1:       ## %bb.0: | 
|  | ; X86-AVX1-NEXT:    vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04] | 
|  | ; X86-AVX1-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX512-LABEL: test_x86_sse2_cvtsi2sd: | 
|  | ; X86-AVX512:       ## %bb.0: | 
|  | ; X86-AVX512-NEXT:    vcvtsi2sdl {{[0-9]+}}(%esp), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0x44,0x24,0x04] | 
|  | ; X86-AVX512-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-SSE-LABEL: test_x86_sse2_cvtsi2sd: | 
|  | ; X64-SSE:       ## %bb.0: | 
|  | ; X64-SSE-NEXT:    cvtsi2sd %edi, %xmm0 ## encoding: [0xf2,0x0f,0x2a,0xc7] | 
|  | ; X64-SSE-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX1-LABEL: test_x86_sse2_cvtsi2sd: | 
|  | ; X64-AVX1:       ## %bb.0: | 
|  | ; X64-AVX1-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x2a,0xc7] | 
|  | ; X64-AVX1-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX512-LABEL: test_x86_sse2_cvtsi2sd: | 
|  | ; X64-AVX512:       ## %bb.0: | 
|  | ; X64-AVX512-NEXT:    vcvtsi2sd %edi, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x2a,0xc7] | 
|  | ; X64-AVX512-NEXT:    retq ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> %a0, i32 %a1) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_cvtss2sd(<2 x double> %a0, <4 x float> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_cvtss2sd: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    cvtss2sd %xmm1, %xmm0 ## encoding: [0xf3,0x0f,0x5a,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_cvtss2sd: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfa,0x5a,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_cvtss2sd: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vcvtss2sd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x5a,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  | declare <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double>, <4 x float>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, ptr %p1) { | 
|  | ; X86-SSE-LABEL: test_x86_sse2_cvtss2sd_load: | 
|  | ; X86-SSE:       ## %bb.0: | 
|  | ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X86-SSE-NEXT:    ## encoding: [0xf3,0x0f,0x10,0x08] | 
|  | ; X86-SSE-NEXT:    cvtss2sd %xmm1, %xmm1 ## encoding: [0xf3,0x0f,0x5a,0xc9] | 
|  | ; X86-SSE-NEXT:    movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1] | 
|  | ; X86-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X86-SSE-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX1-LABEL: test_x86_sse2_cvtss2sd_load: | 
|  | ; X86-AVX1:       ## %bb.0: | 
|  | ; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX1-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X86-AVX1-NEXT:    ## encoding: [0xc5,0xfa,0x10,0x08] | 
|  | ; X86-AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf2,0x5a,0xc9] | 
|  | ; X86-AVX1-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x10,0xc1] | 
|  | ; X86-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X86-AVX1-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX512-LABEL: test_x86_sse2_cvtss2sd_load: | 
|  | ; X86-AVX512:       ## %bb.0: | 
|  | ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X86-AVX512-NEXT:    ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08] | 
|  | ; X86-AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0xc9] | 
|  | ; X86-AVX512-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x10,0xc1] | 
|  | ; X86-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X86-AVX512-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-SSE-LABEL: test_x86_sse2_cvtss2sd_load: | 
|  | ; X64-SSE:       ## %bb.0: | 
|  | ; X64-SSE-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X64-SSE-NEXT:    ## encoding: [0xf3,0x0f,0x10,0x0f] | 
|  | ; X64-SSE-NEXT:    cvtss2sd %xmm1, %xmm1 ## encoding: [0xf3,0x0f,0x5a,0xc9] | 
|  | ; X64-SSE-NEXT:    movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1] | 
|  | ; X64-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X64-SSE-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX1-LABEL: test_x86_sse2_cvtss2sd_load: | 
|  | ; X64-AVX1:       ## %bb.0: | 
|  | ; X64-AVX1-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X64-AVX1-NEXT:    ## encoding: [0xc5,0xfa,0x10,0x0f] | 
|  | ; X64-AVX1-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf2,0x5a,0xc9] | 
|  | ; X64-AVX1-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x10,0xc1] | 
|  | ; X64-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X64-AVX1-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX512-LABEL: test_x86_sse2_cvtss2sd_load: | 
|  | ; X64-AVX512:       ## %bb.0: | 
|  | ; X64-AVX512-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero | 
|  | ; X64-AVX512-NEXT:    ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f] | 
|  | ; X64-AVX512-NEXT:    vcvtss2sd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0xc9] | 
|  | ; X64-AVX512-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x10,0xc1] | 
|  | ; X64-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X64-AVX512-NEXT:    retq ## encoding: [0xc3] | 
|  | %a1 = load <4 x float>, ptr %p1 | 
|  | %res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  |  | 
|  |  | 
|  | define <2 x double> @test_x86_sse2_cvtss2sd_load_optsize(<2 x double> %a0, ptr %p1) optsize { | 
|  | ; X86-SSE-LABEL: test_x86_sse2_cvtss2sd_load_optsize: | 
|  | ; X86-SSE:       ## %bb.0: | 
|  | ; X86-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-SSE-NEXT:    cvtss2sd (%eax), %xmm1 ## encoding: [0xf3,0x0f,0x5a,0x08] | 
|  | ; X86-SSE-NEXT:    movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1] | 
|  | ; X86-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X86-SSE-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX1-LABEL: test_x86_sse2_cvtss2sd_load_optsize: | 
|  | ; X86-AVX1:       ## %bb.0: | 
|  | ; X86-AVX1-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX1-NEXT:    vcvtss2sd (%eax), %xmm7, %xmm1 ## encoding: [0xc5,0xc2,0x5a,0x08] | 
|  | ; X86-AVX1-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x10,0xc1] | 
|  | ; X86-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X86-AVX1-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X86-AVX512-LABEL: test_x86_sse2_cvtss2sd_load_optsize: | 
|  | ; X86-AVX512:       ## %bb.0: | 
|  | ; X86-AVX512-NEXT:    movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] | 
|  | ; X86-AVX512-NEXT:    vcvtss2sd (%eax), %xmm7, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xc2,0x5a,0x08] | 
|  | ; X86-AVX512-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1] | 
|  | ; X86-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X86-AVX512-NEXT:    retl ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-SSE-LABEL: test_x86_sse2_cvtss2sd_load_optsize: | 
|  | ; X64-SSE:       ## %bb.0: | 
|  | ; X64-SSE-NEXT:    cvtss2sd (%rdi), %xmm1 ## encoding: [0xf3,0x0f,0x5a,0x0f] | 
|  | ; X64-SSE-NEXT:    movsd %xmm1, %xmm0 ## encoding: [0xf2,0x0f,0x10,0xc1] | 
|  | ; X64-SSE-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X64-SSE-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX1-LABEL: test_x86_sse2_cvtss2sd_load_optsize: | 
|  | ; X64-AVX1:       ## %bb.0: | 
|  | ; X64-AVX1-NEXT:    vcvtss2sd (%rdi), %xmm15, %xmm1 ## encoding: [0xc5,0x82,0x5a,0x0f] | 
|  | ; X64-AVX1-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xfb,0x10,0xc1] | 
|  | ; X64-AVX1-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X64-AVX1-NEXT:    retq ## encoding: [0xc3] | 
|  | ; | 
|  | ; X64-AVX512-LABEL: test_x86_sse2_cvtss2sd_load_optsize: | 
|  | ; X64-AVX512:       ## %bb.0: | 
|  | ; X64-AVX512-NEXT:    vcvtss2sd (%rdi), %xmm15, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0x82,0x5a,0x0f] | 
|  | ; X64-AVX512-NEXT:    vmovsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1] | 
|  | ; X64-AVX512-NEXT:    ## xmm0 = xmm1[0],xmm0[1] | 
|  | ; X64-AVX512-NEXT:    retq ## encoding: [0xc3] | 
|  | %a1 = load <4 x float>, ptr %p1 | 
|  | %res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1] | 
|  | ret <2 x double> %res | 
|  | } | 
|  |  | 
|  |  | 
|  | define <4 x float> @test_x86_sse2_cvtdq2ps(<4 x i32> %a0) { | 
|  | ; SSE-LABEL: test_x86_sse2_cvtdq2ps: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    cvtdq2ps %xmm0, %xmm0 ## encoding: [0x0f,0x5b,0xc0] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_cvtdq2ps: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vcvtdq2ps %xmm0, %xmm0 ## encoding: [0xc5,0xf8,0x5b,0xc0] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_cvtdq2ps: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vcvtdq2ps %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5b,0xc0] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %a0) ; <<4 x float>> [#uses=1] | 
|  | ret <4 x float> %res | 
|  | } | 
|  | declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <16 x i8> @test_x86_sse2_padds_b(<16 x i8> %a0, <16 x i8> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_padds_b: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    paddsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xec,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_padds_b: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xec,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_padds_b: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1] | 
|  | ret <16 x i8> %res | 
|  | } | 
|  | declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <8 x i16> @test_x86_sse2_padds_w(<8 x i16> %a0, <8 x i16> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_padds_w: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    paddsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xed,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_padds_w: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xed,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_padds_w: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] | 
|  | ret <8 x i16> %res | 
|  | } | 
|  | declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <16 x i8> @test_x86_sse2_paddus_b(<16 x i8> %a0, <16 x i8> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_paddus_b: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    paddusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdc,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_paddus_b: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_paddus_b: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; AVX2-LABEL: test_x86_sse2_paddus_b: | 
|  | ; AVX2:       ## %bb.0: | 
|  | ; AVX2-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdc,0xc1] | 
|  | ; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; SKX-LABEL: test_x86_sse2_paddus_b: | 
|  | ; SKX:       ## %bb.0: | 
|  | ; SKX-NEXT:    vpaddusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdc,0xc1] | 
|  | ; SKX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1] | 
|  | ret <16 x i8> %res | 
|  | } | 
|  | declare <16 x i8> @llvm.x86.sse2.paddus.b(<16 x i8>, <16 x i8>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <8 x i16> @test_x86_sse2_paddus_w(<8 x i16> %a0, <8 x i16> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_paddus_w: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    paddusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xdd,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_paddus_w: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_paddus_w: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; AVX2-LABEL: test_x86_sse2_paddus_w: | 
|  | ; AVX2:       ## %bb.0: | 
|  | ; AVX2-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xdd,0xc1] | 
|  | ; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; SKX-LABEL: test_x86_sse2_paddus_w: | 
|  | ; SKX:       ## %bb.0: | 
|  | ; SKX-NEXT:    vpaddusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xdd,0xc1] | 
|  | ; SKX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] | 
|  | ret <8 x i16> %res | 
|  | } | 
|  | declare <8 x i16> @llvm.x86.sse2.paddus.w(<8 x i16>, <8 x i16>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <16 x i8> @test_x86_sse2_psubs_b(<16 x i8> %a0, <16 x i8> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_psubs_b: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    psubsb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe8,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_psubs_b: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe8,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_psubs_b: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1] | 
|  | ret <16 x i8> %res | 
|  | } | 
|  | declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <8 x i16> @test_x86_sse2_psubs_w(<8 x i16> %a0, <8 x i16> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_psubs_w: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    psubsw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xe9,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_psubs_w: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xe9,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_psubs_w: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] | 
|  | ret <8 x i16> %res | 
|  | } | 
|  | declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <16 x i8> @test_x86_sse2_psubus_b(<16 x i8> %a0, <16 x i8> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_psubus_b: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    psubusb %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd8,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_psubus_b: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_psubus_b: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; AVX2-LABEL: test_x86_sse2_psubus_b: | 
|  | ; AVX2:       ## %bb.0: | 
|  | ; AVX2-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd8,0xc1] | 
|  | ; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; SKX-LABEL: test_x86_sse2_psubus_b: | 
|  | ; SKX:       ## %bb.0: | 
|  | ; SKX-NEXT:    vpsubusb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd8,0xc1] | 
|  | ; SKX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8> %a0, <16 x i8> %a1) ; <<16 x i8>> [#uses=1] | 
|  | ret <16 x i8> %res | 
|  | } | 
|  | declare <16 x i8> @llvm.x86.sse2.psubus.b(<16 x i8>, <16 x i8>) nounwind readnone | 
|  |  | 
|  |  | 
|  | define <8 x i16> @test_x86_sse2_psubus_w(<8 x i16> %a0, <8 x i16> %a1) { | 
|  | ; SSE-LABEL: test_x86_sse2_psubus_w: | 
|  | ; SSE:       ## %bb.0: | 
|  | ; SSE-NEXT:    psubusw %xmm1, %xmm0 ## encoding: [0x66,0x0f,0xd9,0xc1] | 
|  | ; SSE-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX1-LABEL: test_x86_sse2_psubus_w: | 
|  | ; AVX1:       ## %bb.0: | 
|  | ; AVX1-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1] | 
|  | ; AVX1-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; | 
|  | ; AVX512-LABEL: test_x86_sse2_psubus_w: | 
|  | ; AVX512:       ## %bb.0: | 
|  | ; AVX512-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1] | 
|  | ; AVX512-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; AVX2-LABEL: test_x86_sse2_psubus_w: | 
|  | ; AVX2:       ## %bb.0: | 
|  | ; AVX2-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd9,0xc1] | 
|  | ; AVX2-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | ; SKX-LABEL: test_x86_sse2_psubus_w: | 
|  | ; SKX:       ## %bb.0: | 
|  | ; SKX-NEXT:    vpsubusw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd9,0xc1] | 
|  | ; SKX-NEXT:    ret{{[l|q]}} ## encoding: [0xc3] | 
|  | %res = call <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16> %a0, <8 x i16> %a1) ; <<8 x i16>> [#uses=1] | 
|  | ret <8 x i16> %res | 
|  | } | 
|  | declare <8 x i16> @llvm.x86.sse2.psubus.w(<8 x i16>, <8 x i16>) nounwind readnone |