| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefixes=X64,SSE,SSE4 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=X64,SSE,SSE2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=X64,AVX,AVX2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=X64,AVX,AVX512 |
| ; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 |
| |
| define i8 @ucmp.8.8(i8 %x, i8 %y) nounwind { |
| ; X64-LABEL: ucmp.8.8: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpb %sil, %dil |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp.8.8: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: retl |
| %1 = call i8 @llvm.ucmp(i8 %x, i8 %y) |
| ret i8 %1 |
| } |
| |
| define i8 @ucmp.8.16(i16 %x, i16 %y) nounwind { |
| ; X64-LABEL: ucmp.8.16: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpw %si, %di |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp.8.16: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpw {{[0-9]+}}(%esp), %ax |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: retl |
| %1 = call i8 @llvm.ucmp(i16 %x, i16 %y) |
| ret i8 %1 |
| } |
| |
| define i8 @ucmp.8.32(i32 %x, i32 %y) nounwind { |
| ; X64-LABEL: ucmp.8.32: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpl %esi, %edi |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp.8.32: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: retl |
| %1 = call i8 @llvm.ucmp(i32 %x, i32 %y) |
| ret i8 %1 |
| } |
| |
| define i8 @ucmp.8.64(i64 %x, i64 %y) nounwind { |
| ; X64-LABEL: ucmp.8.64: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpq %rsi, %rdi |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp.8.64: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: cmpl %ecx, %esi |
| ; X86-NEXT: movl %edi, %eax |
| ; X86-NEXT: sbbl %edx, %eax |
| ; X86-NEXT: setb %al |
| ; X86-NEXT: cmpl %esi, %ecx |
| ; X86-NEXT: sbbl %edi, %edx |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: retl |
| %1 = call i8 @llvm.ucmp(i64 %x, i64 %y) |
| ret i8 %1 |
| } |
| |
| define i8 @ucmp.8.128(i128 %x, i128 %y) nounwind { |
| ; X64-LABEL: ucmp.8.128: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpq %rdi, %rdx |
| ; X64-NEXT: movq %rcx, %rax |
| ; X64-NEXT: sbbq %rsi, %rax |
| ; X64-NEXT: setb %al |
| ; X64-NEXT: cmpq %rdx, %rdi |
| ; X64-NEXT: sbbq %rcx, %rsi |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp.8.128: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movl %ebp, %eax |
| ; X86-NEXT: sbbl %esi, %eax |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: sbbl %edx, %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl %edi, %eax |
| ; X86-NEXT: sbbl %ecx, %eax |
| ; X86-NEXT: setb %al |
| ; X86-NEXT: cmpl %ebx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %ebp, %esi |
| ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: sbbl %edi, %ecx |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl |
| %1 = call i8 @llvm.ucmp(i128 %x, i128 %y) |
| ret i8 %1 |
| } |
| |
| define i32 @ucmp.32.32(i32 %x, i32 %y) nounwind { |
| ; X64-LABEL: ucmp.32.32: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpl %esi, %edi |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: movsbl %al, %eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp.32.32: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movsbl %al, %eax |
| ; X86-NEXT: retl |
| %1 = call i32 @llvm.ucmp(i32 %x, i32 %y) |
| ret i32 %1 |
| } |
| |
| define i32 @ucmp.32.64(i64 %x, i64 %y) nounwind { |
| ; X64-LABEL: ucmp.32.64: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpq %rsi, %rdi |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: movsbl %al, %eax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp.32.64: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: cmpl %eax, %edx |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %ecx, %edi |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %edx, %eax |
| ; X86-NEXT: sbbl %esi, %ecx |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movsbl %bl, %eax |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: retl |
| %1 = call i32 @llvm.ucmp(i64 %x, i64 %y) |
| ret i32 %1 |
| } |
| |
| define i64 @ucmp.64.64(i64 %x, i64 %y) nounwind { |
| ; X64-LABEL: ucmp.64.64: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpq %rsi, %rdi |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: movsbq %al, %rax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp.64.64: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: cmpl %eax, %edx |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %ecx, %edi |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %edx, %eax |
| ; X86-NEXT: sbbl %esi, %ecx |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movsbl %bl, %eax |
| ; X86-NEXT: movl %eax, %edx |
| ; X86-NEXT: sarl $31, %edx |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: retl |
| %1 = call i64 @llvm.ucmp(i64 %x, i64 %y) |
| ret i64 %1 |
| } |
| |
| define i4 @ucmp_narrow_result(i32 %x, i32 %y) nounwind { |
| ; X64-LABEL: ucmp_narrow_result: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpl %esi, %edi |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_narrow_result: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: retl |
| %1 = call i4 @llvm.ucmp(i32 %x, i32 %y) |
| ret i4 %1 |
| } |
| |
| define i8 @ucmp_narrow_op(i62 %x, i62 %y) nounwind { |
| ; SSE-LABEL: ucmp_narrow_op: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movabsq $4611686018427387903, %rax # imm = 0x3FFFFFFFFFFFFFFF |
| ; SSE-NEXT: andq %rax, %rsi |
| ; SSE-NEXT: andq %rax, %rdi |
| ; SSE-NEXT: cmpq %rsi, %rdi |
| ; SSE-NEXT: seta %al |
| ; SSE-NEXT: sbbb $0, %al |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: ucmp_narrow_op: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: movb $62, %al |
| ; AVX-NEXT: bzhiq %rax, %rsi, %rcx |
| ; AVX-NEXT: bzhiq %rax, %rdi, %rax |
| ; AVX-NEXT: cmpq %rcx, %rax |
| ; AVX-NEXT: seta %al |
| ; AVX-NEXT: sbbb $0, %al |
| ; AVX-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_narrow_op: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl $1073741823, %ecx # imm = 0x3FFFFFFF |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: andl %ecx, %edx |
| ; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: cmpl %esi, %edi |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: sbbl %edx, %eax |
| ; X86-NEXT: setb %al |
| ; X86-NEXT: cmpl %edi, %esi |
| ; X86-NEXT: sbbl %ecx, %edx |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: retl |
| %1 = call i8 @llvm.ucmp(i62 %x, i62 %y) |
| ret i8 %1 |
| } |
| |
| define i141 @ucmp_wide_result(i32 %x, i32 %y) nounwind { |
| ; X64-LABEL: ucmp_wide_result: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpl %esi, %edi |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: movsbq %al, %rax |
| ; X64-NEXT: movq %rax, %rdx |
| ; X64-NEXT: sarq $63, %rdx |
| ; X64-NEXT: movl %edx, %ecx |
| ; X64-NEXT: andl $8191, %ecx # imm = 0x1FFF |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_wide_result: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: seta %cl |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movsbl %cl, %ecx |
| ; X86-NEXT: movl %ecx, (%eax) |
| ; X86-NEXT: sarl $31, %ecx |
| ; X86-NEXT: movl %ecx, 12(%eax) |
| ; X86-NEXT: movl %ecx, 8(%eax) |
| ; X86-NEXT: movl %ecx, 4(%eax) |
| ; X86-NEXT: andl $8191, %ecx # imm = 0x1FFF |
| ; X86-NEXT: movw %cx, 16(%eax) |
| ; X86-NEXT: retl $4 |
| %1 = call i141 @llvm.ucmp(i32 %x, i32 %y) |
| ret i141 %1 |
| } |
| |
| define i8 @ucmp_wide_op(i109 %x, i109 %y) nounwind { |
| ; SSE-LABEL: ucmp_wide_op: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: movabsq $35184372088831, %rax # imm = 0x1FFFFFFFFFFF |
| ; SSE-NEXT: andq %rax, %rsi |
| ; SSE-NEXT: andq %rax, %rcx |
| ; SSE-NEXT: cmpq %rdi, %rdx |
| ; SSE-NEXT: movq %rcx, %rax |
| ; SSE-NEXT: sbbq %rsi, %rax |
| ; SSE-NEXT: setb %al |
| ; SSE-NEXT: cmpq %rdx, %rdi |
| ; SSE-NEXT: sbbq %rcx, %rsi |
| ; SSE-NEXT: sbbb $0, %al |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: ucmp_wide_op: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: movb $45, %al |
| ; AVX-NEXT: bzhiq %rax, %rsi, %rsi |
| ; AVX-NEXT: bzhiq %rax, %rcx, %rcx |
| ; AVX-NEXT: cmpq %rdi, %rdx |
| ; AVX-NEXT: movq %rcx, %rax |
| ; AVX-NEXT: sbbq %rsi, %rax |
| ; AVX-NEXT: setb %al |
| ; AVX-NEXT: cmpq %rdx, %rdi |
| ; AVX-NEXT: sbbq %rcx, %rsi |
| ; AVX-NEXT: sbbb $0, %al |
| ; AVX-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_wide_op: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl $8191, %ecx # imm = 0x1FFF |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: andl %ecx, %edx |
| ; X86-NEXT: andl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: sbbl %edi, %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movl %ebx, %eax |
| ; X86-NEXT: sbbl %esi, %eax |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: sbbl %edx, %eax |
| ; X86-NEXT: setb %al |
| ; X86-NEXT: cmpl %ebp, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: sbbl %ebx, %esi |
| ; X86-NEXT: sbbl %ecx, %edx |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl |
| %1 = call i8 @llvm.ucmp(i109 %x, i109 %y) |
| ret i8 %1 |
| } |
| |
| define i41 @ucmp_uncommon_types(i7 %x, i7 %y) nounwind { |
| ; X64-LABEL: ucmp_uncommon_types: |
| ; X64: # %bb.0: |
| ; X64-NEXT: andb $127, %sil |
| ; X64-NEXT: andb $127, %dil |
| ; X64-NEXT: cmpb %sil, %dil |
| ; X64-NEXT: seta %al |
| ; X64-NEXT: sbbb $0, %al |
| ; X64-NEXT: movsbq %al, %rax |
| ; X64-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_uncommon_types: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andb $127, %al |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: andb $127, %cl |
| ; X86-NEXT: cmpb %al, %cl |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movsbl %al, %eax |
| ; X86-NEXT: movl %eax, %edx |
| ; X86-NEXT: sarl $31, %edx |
| ; X86-NEXT: retl |
| %1 = call i41 @llvm.ucmp(i7 %x, i7 %y) |
| ret i41 %1 |
| } |
| |
| define <4 x i32> @ucmp_normal_vectors(<4 x i32> %x, <4 x i32> %y) nounwind { |
| ; SSE4-LABEL: ucmp_normal_vectors: |
| ; SSE4: # %bb.0: |
| ; SSE4-NEXT: movdqa {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] |
| ; SSE4-NEXT: pxor %xmm2, %xmm1 |
| ; SSE4-NEXT: pxor %xmm2, %xmm0 |
| ; SSE4-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE4-NEXT: pcmpgtd %xmm1, %xmm2 |
| ; SSE4-NEXT: pcmpgtd %xmm0, %xmm1 |
| ; SSE4-NEXT: psubd %xmm2, %xmm1 |
| ; SSE4-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE4-NEXT: retq |
| ; |
| ; SSE2-LABEL: ucmp_normal_vectors: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: pmaxud %xmm1, %xmm2 |
| ; SSE2-NEXT: pcmpeqd %xmm0, %xmm2 |
| ; SSE2-NEXT: pminud %xmm0, %xmm1 |
| ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 |
| ; SSE2-NEXT: psubd %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX2-LABEL: ucmp_normal_vectors: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmaxud %xmm1, %xmm0, %xmm2 |
| ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm2 |
| ; AVX2-NEXT: vpminud %xmm1, %xmm0, %xmm1 |
| ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: ucmp_normal_vectors: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpcmpltud %xmm1, %xmm0, %k1 |
| ; AVX512-NEXT: vpcmpnleud %xmm1, %xmm0, %k2 |
| ; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1] |
| ; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 |
| ; AVX512-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} |
| ; AVX512-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_normal_vectors: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: seta %dl |
| ; X86-NEXT: sbbb $0, %dl |
| ; X86-NEXT: movsbl %dl, %edx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: seta %bl |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movsbl %bl, %edi |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: seta %bl |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movsbl %bl, %esi |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: seta %cl |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movsbl %cl, %ecx |
| ; X86-NEXT: movl %ecx, 12(%eax) |
| ; X86-NEXT: movl %esi, 8(%eax) |
| ; X86-NEXT: movl %edi, 4(%eax) |
| ; X86-NEXT: movl %edx, (%eax) |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: retl $4 |
| %1 = call <4 x i32> @llvm.ucmp(<4 x i32> %x, <4 x i32> %y) |
| ret <4 x i32> %1 |
| } |
| |
| define <4 x i8> @ucmp_narrow_vec_result(<4 x i32> %x, <4 x i32> %y) nounwind { |
| ; SSE4-LABEL: ucmp_narrow_vec_result: |
| ; SSE4: # %bb.0: |
| ; SSE4-NEXT: movd %xmm1, %eax |
| ; SSE4-NEXT: movd %xmm0, %ecx |
| ; SSE4-NEXT: cmpl %eax, %ecx |
| ; SSE4-NEXT: seta %al |
| ; SSE4-NEXT: sbbb $0, %al |
| ; SSE4-NEXT: movzbl %al, %eax |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm2, %ecx |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm2, %edx |
| ; SSE4-NEXT: cmpl %ecx, %edx |
| ; SSE4-NEXT: seta %cl |
| ; SSE4-NEXT: sbbb $0, %cl |
| ; SSE4-NEXT: movzbl %cl, %ecx |
| ; SSE4-NEXT: shll $8, %ecx |
| ; SSE4-NEXT: orl %eax, %ecx |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm2, %eax |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm2, %edx |
| ; SSE4-NEXT: cmpl %eax, %edx |
| ; SSE4-NEXT: seta %al |
| ; SSE4-NEXT: sbbb $0, %al |
| ; SSE4-NEXT: movzbl %al, %eax |
| ; SSE4-NEXT: shll $16, %eax |
| ; SSE4-NEXT: orl %ecx, %eax |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm1, %ecx |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm0, %edx |
| ; SSE4-NEXT: cmpl %ecx, %edx |
| ; SSE4-NEXT: seta %cl |
| ; SSE4-NEXT: sbbb $0, %cl |
| ; SSE4-NEXT: movzbl %cl, %ecx |
| ; SSE4-NEXT: shll $24, %ecx |
| ; SSE4-NEXT: orl %eax, %ecx |
| ; SSE4-NEXT: movd %ecx, %xmm0 |
| ; SSE4-NEXT: retq |
| ; |
| ; SSE2-LABEL: ucmp_narrow_vec_result: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pextrd $1, %xmm1, %eax |
| ; SSE2-NEXT: pextrd $1, %xmm0, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: movd %xmm1, %ecx |
| ; SSE2-NEXT: movd %xmm0, %edx |
| ; SSE2-NEXT: cmpl %ecx, %edx |
| ; SSE2-NEXT: seta %cl |
| ; SSE2-NEXT: sbbb $0, %cl |
| ; SSE2-NEXT: movzbl %cl, %ecx |
| ; SSE2-NEXT: movd %ecx, %xmm2 |
| ; SSE2-NEXT: pinsrb $1, %eax, %xmm2 |
| ; SSE2-NEXT: pextrd $2, %xmm1, %eax |
| ; SSE2-NEXT: pextrd $2, %xmm0, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $2, %eax, %xmm2 |
| ; SSE2-NEXT: pextrd $3, %xmm1, %eax |
| ; SSE2-NEXT: pextrd $3, %xmm0, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $3, %eax, %xmm2 |
| ; SSE2-NEXT: movdqa %xmm2, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX-LABEL: ucmp_narrow_vec_result: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpextrd $1, %xmm1, %eax |
| ; AVX-NEXT: vpextrd $1, %xmm0, %ecx |
| ; AVX-NEXT: cmpl %eax, %ecx |
| ; AVX-NEXT: seta %al |
| ; AVX-NEXT: sbbb $0, %al |
| ; AVX-NEXT: vmovd %xmm1, %ecx |
| ; AVX-NEXT: vmovd %xmm0, %edx |
| ; AVX-NEXT: cmpl %ecx, %edx |
| ; AVX-NEXT: seta %cl |
| ; AVX-NEXT: sbbb $0, %cl |
| ; AVX-NEXT: vmovd %ecx, %xmm2 |
| ; AVX-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 |
| ; AVX-NEXT: vpextrd $2, %xmm1, %eax |
| ; AVX-NEXT: vpextrd $2, %xmm0, %ecx |
| ; AVX-NEXT: cmpl %eax, %ecx |
| ; AVX-NEXT: seta %al |
| ; AVX-NEXT: sbbb $0, %al |
| ; AVX-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 |
| ; AVX-NEXT: vpextrd $3, %xmm1, %eax |
| ; AVX-NEXT: vpextrd $3, %xmm0, %ecx |
| ; AVX-NEXT: cmpl %eax, %ecx |
| ; AVX-NEXT: seta %al |
| ; AVX-NEXT: sbbb $0, %al |
| ; AVX-NEXT: vpinsrb $3, %eax, %xmm2, %xmm0 |
| ; AVX-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_narrow_vec_result: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: seta %cl |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: seta %ch |
| ; X86-NEXT: sbbb $0, %ch |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: seta %bl |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: seta %dl |
| ; X86-NEXT: sbbb $0, %dl |
| ; X86-NEXT: movb %dl, 3(%eax) |
| ; X86-NEXT: movb %bl, 2(%eax) |
| ; X86-NEXT: movb %ch, 1(%eax) |
| ; X86-NEXT: movb %cl, (%eax) |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: retl $4 |
| %1 = call <4 x i8> @llvm.ucmp(<4 x i32> %x, <4 x i32> %y) |
| ret <4 x i8> %1 |
| } |
| |
| define <4 x i32> @ucmp_narrow_vec_op(<4 x i8> %x, <4 x i8> %y) nounwind { |
| ; SSE4-LABEL: ucmp_narrow_vec_op: |
| ; SSE4: # %bb.0: |
| ; SSE4-NEXT: pxor %xmm2, %xmm2 |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3] |
| ; SSE4-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE4-NEXT: pcmpgtd %xmm1, %xmm2 |
| ; SSE4-NEXT: pcmpgtd %xmm0, %xmm1 |
| ; SSE4-NEXT: psubd %xmm2, %xmm1 |
| ; SSE4-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE4-NEXT: retq |
| ; |
| ; SSE2-LABEL: ucmp_narrow_vec_op: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero |
| ; SSE2-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 |
| ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1 |
| ; SSE2-NEXT: psubd %xmm2, %xmm1 |
| ; SSE2-NEXT: movdqa %xmm1, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX2-LABEL: ucmp_narrow_vec_op: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero |
| ; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero |
| ; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm2 |
| ; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 |
| ; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0 |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: ucmp_narrow_vec_op: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero |
| ; AVX512-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero |
| ; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %k1 |
| ; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %k2 |
| ; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1] |
| ; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 |
| ; AVX512-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} |
| ; AVX512-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_narrow_vec_op: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %ch |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %dl |
| ; X86-NEXT: seta %dl |
| ; X86-NEXT: sbbb $0, %dl |
| ; X86-NEXT: movsbl %dl, %edx |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %bl |
| ; X86-NEXT: seta %bl |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movsbl %bl, %esi |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %ch |
| ; X86-NEXT: seta %ch |
| ; X86-NEXT: sbbb $0, %ch |
| ; X86-NEXT: movsbl %ch, %edi |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %cl |
| ; X86-NEXT: seta %cl |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movsbl %cl, %ecx |
| ; X86-NEXT: movl %ecx, 12(%eax) |
| ; X86-NEXT: movl %edi, 8(%eax) |
| ; X86-NEXT: movl %esi, 4(%eax) |
| ; X86-NEXT: movl %edx, (%eax) |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: retl $4 |
| %1 = call <4 x i32> @llvm.ucmp(<4 x i8> %x, <4 x i8> %y) |
| ret <4 x i32> %1 |
| } |
| |
| define <16 x i32> @ucmp_wide_vec_result(<16 x i8> %x, <16 x i8> %y) nounwind { |
| ; SSE4-LABEL: ucmp_wide_vec_result: |
| ; SSE4: # %bb.0: |
| ; SSE4-NEXT: movdqa %xmm1, %xmm3 |
| ; SSE4-NEXT: pxor %xmm5, %xmm5 |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7] |
| ; SSE4-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3] |
| ; SSE4-NEXT: movdqa %xmm0, %xmm2 |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3],xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] |
| ; SSE4-NEXT: movdqa %xmm2, %xmm6 |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] |
| ; SSE4-NEXT: movdqa %xmm6, %xmm7 |
| ; SSE4-NEXT: pcmpgtd %xmm4, %xmm7 |
| ; SSE4-NEXT: pcmpgtd %xmm6, %xmm4 |
| ; SSE4-NEXT: psubd %xmm7, %xmm4 |
| ; SSE4-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7] |
| ; SSE4-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7] |
| ; SSE4-NEXT: movdqa %xmm2, %xmm6 |
| ; SSE4-NEXT: pcmpgtd %xmm1, %xmm6 |
| ; SSE4-NEXT: pcmpgtd %xmm2, %xmm1 |
| ; SSE4-NEXT: psubd %xmm6, %xmm1 |
| ; SSE4-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15] |
| ; SSE4-NEXT: movdqa %xmm3, %xmm2 |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1],xmm2[2],xmm5[2],xmm2[3],xmm5[3] |
| ; SSE4-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm5[8],xmm0[9],xmm5[9],xmm0[10],xmm5[10],xmm0[11],xmm5[11],xmm0[12],xmm5[12],xmm0[13],xmm5[13],xmm0[14],xmm5[14],xmm0[15],xmm5[15] |
| ; SSE4-NEXT: movdqa %xmm0, %xmm6 |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3] |
| ; SSE4-NEXT: movdqa %xmm6, %xmm7 |
| ; SSE4-NEXT: pcmpgtd %xmm2, %xmm7 |
| ; SSE4-NEXT: pcmpgtd %xmm6, %xmm2 |
| ; SSE4-NEXT: psubd %xmm7, %xmm2 |
| ; SSE4-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] |
| ; SSE4-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] |
| ; SSE4-NEXT: movdqa %xmm0, %xmm5 |
| ; SSE4-NEXT: pcmpgtd %xmm3, %xmm5 |
| ; SSE4-NEXT: pcmpgtd %xmm0, %xmm3 |
| ; SSE4-NEXT: psubd %xmm5, %xmm3 |
| ; SSE4-NEXT: movdqa %xmm4, %xmm0 |
| ; SSE4-NEXT: retq |
| ; |
| ; SSE2-LABEL: ucmp_wide_vec_result: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: movdqa %xmm0, %xmm4 |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero |
| ; SSE2-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE2-NEXT: pcmpgtd %xmm0, %xmm3 |
| ; SSE2-NEXT: pcmpgtd %xmm2, %xmm0 |
| ; SSE2-NEXT: psubd %xmm3, %xmm0 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1] |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm5 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[1,1,1,1] |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero |
| ; SSE2-NEXT: movdqa %xmm2, %xmm3 |
| ; SSE2-NEXT: pcmpgtd %xmm5, %xmm3 |
| ; SSE2-NEXT: pcmpgtd %xmm2, %xmm5 |
| ; SSE2-NEXT: psubd %xmm3, %xmm5 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,3,2,3] |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero |
| ; SSE2-NEXT: movdqa %xmm3, %xmm6 |
| ; SSE2-NEXT: pcmpgtd %xmm2, %xmm6 |
| ; SSE2-NEXT: pcmpgtd %xmm3, %xmm2 |
| ; SSE2-NEXT: psubd %xmm6, %xmm2 |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,3,3] |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero |
| ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3] |
| ; SSE2-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero |
| ; SSE2-NEXT: movdqa %xmm1, %xmm4 |
| ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4 |
| ; SSE2-NEXT: pcmpgtd %xmm1, %xmm3 |
| ; SSE2-NEXT: psubd %xmm4, %xmm3 |
| ; SSE2-NEXT: movdqa %xmm5, %xmm1 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX2-LABEL: ucmp_wide_vec_result: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero |
| ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero |
| ; AVX2-NEXT: vpcmpgtd %ymm2, %ymm3, %ymm4 |
| ; AVX2-NEXT: vpcmpgtd %ymm3, %ymm2, %ymm2 |
| ; AVX2-NEXT: vpsubd %ymm4, %ymm2, %ymm2 |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] |
| ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero |
| ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] |
| ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero |
| ; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm3 |
| ; AVX2-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0 |
| ; AVX2-NEXT: vpsubd %ymm3, %ymm0, %ymm1 |
| ; AVX2-NEXT: vmovdqa %ymm2, %ymm0 |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: ucmp_wide_vec_result: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpcmpltub %xmm1, %xmm0, %k1 |
| ; AVX512-NEXT: vpcmpnleub %xmm1, %xmm0, %k2 |
| ; AVX512-NEXT: vpbroadcastd {{.*#+}} zmm0 {%k2} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] |
| ; AVX512-NEXT: vpternlogd {{.*#+}} zmm1 = -1 |
| ; AVX512-NEXT: vmovdqa32 %zmm1, %zmm0 {%k1} |
| ; AVX512-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_wide_vec_result: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %ah |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %ch |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %dh |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %bh |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %cl |
| ; X86-NEXT: seta %cl |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %bh |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %bl |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %dh |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %ch |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %ah |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %dl |
| ; X86-NEXT: seta %bl |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, (%esp) # 1-byte Spill |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %bh |
| ; X86-NEXT: sbbb $0, %bh |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movsbl %al, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movsbl %al, %edi |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movsbl %al, %ebp |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movsbl %al, %esi |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movsbl %al, %edx |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpb {{[0-9]+}}(%esp), %al |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movsbl %al, %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl %ecx, 60(%eax) |
| ; X86-NEXT: movl %edx, 56(%eax) |
| ; X86-NEXT: movl %esi, 52(%eax) |
| ; X86-NEXT: movl %ebp, 48(%eax) |
| ; X86-NEXT: movl %edi, 44(%eax) |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload |
| ; X86-NEXT: movl %ecx, 40(%eax) |
| ; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; X86-NEXT: movsbl %bh, %ecx |
| ; X86-NEXT: movl %ecx, 36(%eax) |
| ; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movsbl (%esp), %edx # 1-byte Folded Reload |
| ; X86-NEXT: movl %edx, 32(%eax) |
| ; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; X86-NEXT: movsbl %bl, %edi |
| ; X86-NEXT: movl %edi, 28(%eax) |
| ; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload |
| ; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload |
| ; X86-NEXT: movl %ebx, 24(%eax) |
| ; X86-NEXT: movl %edi, 20(%eax) |
| ; X86-NEXT: movl %edx, 16(%eax) |
| ; X86-NEXT: movl %ecx, 12(%eax) |
| ; X86-NEXT: movl %esi, 8(%eax) |
| ; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movl %ecx, 4(%eax) |
| ; X86-NEXT: movsbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movl %ecx, (%eax) |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl $4 |
| %1 = call <16 x i32> @llvm.ucmp(<16 x i8> %x, <16 x i8> %y) |
| ret <16 x i32> %1 |
| } |
| |
| define <16 x i8> @ucmp_wide_vec_op(<16 x i32> %x, <16 x i32> %y) nounwind { |
| ; SSE4-LABEL: ucmp_wide_vec_op: |
| ; SSE4: # %bb.0: |
| ; SSE4-NEXT: pushq %rbp |
| ; SSE4-NEXT: pushq %r15 |
| ; SSE4-NEXT: pushq %r14 |
| ; SSE4-NEXT: pushq %r13 |
| ; SSE4-NEXT: pushq %r12 |
| ; SSE4-NEXT: pushq %rbx |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm7[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm8, %eax |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm8, %ecx |
| ; SSE4-NEXT: cmpl %eax, %ecx |
| ; SSE4-NEXT: seta %al |
| ; SSE4-NEXT: sbbb $0, %al |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm7[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm8, %ecx |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm8, %edx |
| ; SSE4-NEXT: cmpl %ecx, %edx |
| ; SSE4-NEXT: seta %cl |
| ; SSE4-NEXT: sbbb $0, %cl |
| ; SSE4-NEXT: movd %xmm7, %edx |
| ; SSE4-NEXT: movd %xmm3, %esi |
| ; SSE4-NEXT: cmpl %edx, %esi |
| ; SSE4-NEXT: seta %dl |
| ; SSE4-NEXT: sbbb $0, %dl |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm7, %esi |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm3, %edi |
| ; SSE4-NEXT: cmpl %esi, %edi |
| ; SSE4-NEXT: seta %sil |
| ; SSE4-NEXT: movzbl %al, %eax |
| ; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE4-NEXT: sbbb $0, %sil |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm3, %edi |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm3, %r8d |
| ; SSE4-NEXT: cmpl %edi, %r8d |
| ; SSE4-NEXT: seta %dil |
| ; SSE4-NEXT: sbbb $0, %dil |
| ; SSE4-NEXT: movzbl %cl, %eax |
| ; SSE4-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm3, %r8d |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm3, %r9d |
| ; SSE4-NEXT: cmpl %r8d, %r9d |
| ; SSE4-NEXT: seta %r8b |
| ; SSE4-NEXT: movzbl %dl, %edx |
| ; SSE4-NEXT: sbbb $0, %r8b |
| ; SSE4-NEXT: movd %xmm6, %r9d |
| ; SSE4-NEXT: movd %xmm2, %r10d |
| ; SSE4-NEXT: cmpl %r9d, %r10d |
| ; SSE4-NEXT: seta %r9b |
| ; SSE4-NEXT: movzbl %sil, %esi |
| ; SSE4-NEXT: sbbb $0, %r9b |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm3, %r10d |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm2, %r11d |
| ; SSE4-NEXT: cmpl %r10d, %r11d |
| ; SSE4-NEXT: seta %r10b |
| ; SSE4-NEXT: sbbb $0, %r10b |
| ; SSE4-NEXT: movzbl %dil, %edi |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm2, %r11d |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm2, %ebx |
| ; SSE4-NEXT: cmpl %r11d, %ebx |
| ; SSE4-NEXT: seta %r11b |
| ; SSE4-NEXT: movzbl %r8b, %r8d |
| ; SSE4-NEXT: sbbb $0, %r11b |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm2, %ebx |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm2, %ebp |
| ; SSE4-NEXT: cmpl %ebx, %ebp |
| ; SSE4-NEXT: seta %bpl |
| ; SSE4-NEXT: sbbb $0, %bpl |
| ; SSE4-NEXT: movzbl %r9b, %r9d |
| ; SSE4-NEXT: movd %xmm5, %ebx |
| ; SSE4-NEXT: movd %xmm1, %r14d |
| ; SSE4-NEXT: cmpl %ebx, %r14d |
| ; SSE4-NEXT: seta %r14b |
| ; SSE4-NEXT: sbbb $0, %r14b |
| ; SSE4-NEXT: movzbl %r10b, %r10d |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm2 = xmm5[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm2, %ebx |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm1, %r15d |
| ; SSE4-NEXT: cmpl %ebx, %r15d |
| ; SSE4-NEXT: seta %bl |
| ; SSE4-NEXT: movzbl %r11b, %r11d |
| ; SSE4-NEXT: sbbb $0, %bl |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm1, %r15d |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3] |
| ; SSE4-NEXT: movd %xmm1, %r12d |
| ; SSE4-NEXT: cmpl %r15d, %r12d |
| ; SSE4-NEXT: seta %r12b |
| ; SSE4-NEXT: sbbb $0, %r12b |
| ; SSE4-NEXT: movzbl %bpl, %ebp |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm1, %r15d |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3] |
| ; SSE4-NEXT: movd %xmm1, %r13d |
| ; SSE4-NEXT: cmpl %r15d, %r13d |
| ; SSE4-NEXT: seta %r13b |
| ; SSE4-NEXT: movzbl %r14b, %r15d |
| ; SSE4-NEXT: sbbb $0, %r13b |
| ; SSE4-NEXT: movd %xmm4, %r14d |
| ; SSE4-NEXT: movd %xmm0, %eax |
| ; SSE4-NEXT: cmpl %r14d, %eax |
| ; SSE4-NEXT: seta %r14b |
| ; SSE4-NEXT: sbbb $0, %r14b |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm1, %eax |
| ; SSE4-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] |
| ; SSE4-NEXT: movd %xmm0, %ecx |
| ; SSE4-NEXT: cmpl %eax, %ecx |
| ; SSE4-NEXT: movzbl %bl, %eax |
| ; SSE4-NEXT: movzbl %r12b, %ecx |
| ; SSE4-NEXT: movzbl %r13b, %ebx |
| ; SSE4-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload |
| ; SSE4-NEXT: # xmm0 = mem[0],zero,zero,zero |
| ; SSE4-NEXT: movd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 4-byte Folded Reload |
| ; SSE4-NEXT: # xmm2 = mem[0],zero,zero,zero |
| ; SSE4-NEXT: movd %edx, %xmm3 |
| ; SSE4-NEXT: movd %esi, %xmm4 |
| ; SSE4-NEXT: movd %edi, %xmm5 |
| ; SSE4-NEXT: movd %r8d, %xmm6 |
| ; SSE4-NEXT: movd %r9d, %xmm1 |
| ; SSE4-NEXT: movd %r10d, %xmm7 |
| ; SSE4-NEXT: movd %r11d, %xmm8 |
| ; SSE4-NEXT: movd %ebp, %xmm9 |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] |
| ; SSE4-NEXT: movd %r15d, %xmm10 |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7] |
| ; SSE4-NEXT: movd %eax, %xmm0 |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] |
| ; SSE4-NEXT: movd %ecx, %xmm2 |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3],xmm1[4],xmm7[4],xmm1[5],xmm7[5],xmm1[6],xmm7[6],xmm1[7],xmm7[7] |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3] |
| ; SSE4-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1],xmm10[2],xmm0[2],xmm10[3],xmm0[3],xmm10[4],xmm0[4],xmm10[5],xmm0[5],xmm10[6],xmm0[6],xmm10[7],xmm0[7] |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3] |
| ; SSE4-NEXT: movd %ebx, %xmm3 |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] |
| ; SSE4-NEXT: movzbl %r14b, %eax |
| ; SSE4-NEXT: seta %cl |
| ; SSE4-NEXT: sbbb $0, %cl |
| ; SSE4-NEXT: movd %eax, %xmm0 |
| ; SSE4-NEXT: movzbl %cl, %eax |
| ; SSE4-NEXT: movd %eax, %xmm2 |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] |
| ; SSE4-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1] |
| ; SSE4-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] |
| ; SSE4-NEXT: popq %rbx |
| ; SSE4-NEXT: popq %r12 |
| ; SSE4-NEXT: popq %r13 |
| ; SSE4-NEXT: popq %r14 |
| ; SSE4-NEXT: popq %r15 |
| ; SSE4-NEXT: popq %rbp |
| ; SSE4-NEXT: retq |
| ; |
| ; SSE2-LABEL: ucmp_wide_vec_op: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pextrd $1, %xmm4, %eax |
| ; SSE2-NEXT: movdqa %xmm0, %xmm8 |
| ; SSE2-NEXT: pextrd $1, %xmm0, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: movd %xmm4, %ecx |
| ; SSE2-NEXT: movd %xmm0, %edx |
| ; SSE2-NEXT: cmpl %ecx, %edx |
| ; SSE2-NEXT: seta %cl |
| ; SSE2-NEXT: sbbb $0, %cl |
| ; SSE2-NEXT: movzbl %cl, %ecx |
| ; SSE2-NEXT: movd %ecx, %xmm0 |
| ; SSE2-NEXT: pinsrb $1, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $2, %xmm4, %eax |
| ; SSE2-NEXT: pextrd $2, %xmm8, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $2, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $3, %xmm4, %eax |
| ; SSE2-NEXT: pextrd $3, %xmm8, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $3, %eax, %xmm0 |
| ; SSE2-NEXT: movd %xmm5, %eax |
| ; SSE2-NEXT: movd %xmm1, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $4, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $1, %xmm5, %eax |
| ; SSE2-NEXT: pextrd $1, %xmm1, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $5, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $2, %xmm5, %eax |
| ; SSE2-NEXT: pextrd $2, %xmm1, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $6, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $3, %xmm5, %eax |
| ; SSE2-NEXT: pextrd $3, %xmm1, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $7, %eax, %xmm0 |
| ; SSE2-NEXT: movd %xmm6, %eax |
| ; SSE2-NEXT: movd %xmm2, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $8, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $1, %xmm6, %eax |
| ; SSE2-NEXT: pextrd $1, %xmm2, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $9, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $2, %xmm6, %eax |
| ; SSE2-NEXT: pextrd $2, %xmm2, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $10, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $3, %xmm6, %eax |
| ; SSE2-NEXT: pextrd $3, %xmm2, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $11, %eax, %xmm0 |
| ; SSE2-NEXT: movd %xmm7, %eax |
| ; SSE2-NEXT: movd %xmm3, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $12, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $1, %xmm7, %eax |
| ; SSE2-NEXT: pextrd $1, %xmm3, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $13, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $2, %xmm7, %eax |
| ; SSE2-NEXT: pextrd $2, %xmm3, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $14, %eax, %xmm0 |
| ; SSE2-NEXT: pextrd $3, %xmm7, %eax |
| ; SSE2-NEXT: pextrd $3, %xmm3, %ecx |
| ; SSE2-NEXT: cmpl %eax, %ecx |
| ; SSE2-NEXT: seta %al |
| ; SSE2-NEXT: sbbb $0, %al |
| ; SSE2-NEXT: movzbl %al, %eax |
| ; SSE2-NEXT: pinsrb $15, %eax, %xmm0 |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX2-LABEL: ucmp_wide_vec_op: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vpextrd $1, %xmm2, %eax |
| ; AVX2-NEXT: vpextrd $1, %xmm0, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vmovd %xmm2, %ecx |
| ; AVX2-NEXT: vmovd %xmm0, %edx |
| ; AVX2-NEXT: cmpl %ecx, %edx |
| ; AVX2-NEXT: seta %cl |
| ; AVX2-NEXT: sbbb $0, %cl |
| ; AVX2-NEXT: vmovd %ecx, %xmm4 |
| ; AVX2-NEXT: vpinsrb $1, %eax, %xmm4, %xmm4 |
| ; AVX2-NEXT: vpextrd $2, %xmm2, %eax |
| ; AVX2-NEXT: vpextrd $2, %xmm0, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $2, %eax, %xmm4, %xmm4 |
| ; AVX2-NEXT: vpextrd $3, %xmm2, %eax |
| ; AVX2-NEXT: vpextrd $3, %xmm0, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $3, %eax, %xmm4, %xmm4 |
| ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 |
| ; AVX2-NEXT: vmovd %xmm2, %eax |
| ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm0, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $4, %eax, %xmm4, %xmm4 |
| ; AVX2-NEXT: vpextrd $1, %xmm2, %eax |
| ; AVX2-NEXT: vpextrd $1, %xmm0, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $5, %eax, %xmm4, %xmm4 |
| ; AVX2-NEXT: vpextrd $2, %xmm2, %eax |
| ; AVX2-NEXT: vpextrd $2, %xmm0, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $6, %eax, %xmm4, %xmm4 |
| ; AVX2-NEXT: vpextrd $3, %xmm2, %eax |
| ; AVX2-NEXT: vpextrd $3, %xmm0, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $7, %eax, %xmm4, %xmm0 |
| ; AVX2-NEXT: vmovd %xmm3, %eax |
| ; AVX2-NEXT: vmovd %xmm1, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $8, %eax, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpextrd $1, %xmm3, %eax |
| ; AVX2-NEXT: vpextrd $1, %xmm1, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpextrd $2, %xmm3, %eax |
| ; AVX2-NEXT: vpextrd $2, %xmm1, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpextrd $3, %xmm3, %eax |
| ; AVX2-NEXT: vpextrd $3, %xmm1, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 |
| ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm2 |
| ; AVX2-NEXT: vmovd %xmm2, %eax |
| ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 |
| ; AVX2-NEXT: vmovd %xmm1, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $12, %eax, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpextrd $1, %xmm2, %eax |
| ; AVX2-NEXT: vpextrd $1, %xmm1, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpextrd $2, %xmm2, %eax |
| ; AVX2-NEXT: vpextrd $2, %xmm1, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $14, %eax, %xmm0, %xmm0 |
| ; AVX2-NEXT: vpextrd $3, %xmm2, %eax |
| ; AVX2-NEXT: vpextrd $3, %xmm1, %ecx |
| ; AVX2-NEXT: cmpl %eax, %ecx |
| ; AVX2-NEXT: seta %al |
| ; AVX2-NEXT: sbbb $0, %al |
| ; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 |
| ; AVX2-NEXT: vzeroupper |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: ucmp_wide_vec_op: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vpcmpltud %zmm1, %zmm0, %k1 |
| ; AVX512-NEXT: vpcmpnleud %zmm1, %zmm0, %k2 |
| ; AVX512-NEXT: vmovdqu8 {{.*#+}} xmm0 {%k2} {z} = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] |
| ; AVX512-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 |
| ; AVX512-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1} |
| ; AVX512-NEXT: vzeroupper |
| ; AVX512-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_wide_vec_op: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: subl $12, %esp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: seta %al |
| ; X86-NEXT: sbbb $0, %al |
| ; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: seta %bh |
| ; X86-NEXT: sbbb $0, %bh |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: seta %bl |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: seta %dh |
| ; X86-NEXT: sbbb $0, %dh |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: seta %ch |
| ; X86-NEXT: sbbb $0, %ch |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: seta %dl |
| ; X86-NEXT: sbbb $0, %dl |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: seta %cl |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movb %cl, 15(%eax) |
| ; X86-NEXT: movb %dl, 14(%eax) |
| ; X86-NEXT: movb %ch, 13(%eax) |
| ; X86-NEXT: movb %dh, 12(%eax) |
| ; X86-NEXT: movb %bl, 11(%eax) |
| ; X86-NEXT: movb %bh, 10(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, 9(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, 8(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, 7(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, 6(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, 5(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, 4(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, 3(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, 2(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, 1(%eax) |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: movb %cl, (%eax) |
| ; X86-NEXT: addl $12, %esp |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl $4 |
| %1 = call <16 x i8> @llvm.ucmp(<16 x i32> %x, <16 x i32> %y) |
| ret <16 x i8> %1 |
| } |
| |
| define <17 x i2> @ucmp_uncommon_vectors(<17 x i71> %x, <17 x i71> %y) nounwind { |
| ; SSE4-LABEL: ucmp_uncommon_vectors: |
| ; SSE4: # %bb.0: |
| ; SSE4-NEXT: pushq %rbp |
| ; SSE4-NEXT: pushq %r15 |
| ; SSE4-NEXT: pushq %r14 |
| ; SSE4-NEXT: pushq %r13 |
| ; SSE4-NEXT: pushq %r12 |
| ; SSE4-NEXT: pushq %rbx |
| ; SSE4-NEXT: subq $120, %rsp |
| ; SSE4-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: andl $127, %edx |
| ; SSE4-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: andl $127, %r8d |
| ; SSE4-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, (%rsp) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; SSE4-NEXT: andl $127, %r10d |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: andl $127, %eax |
| ; SSE4-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE4-NEXT: andl $127, %ecx |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; SSE4-NEXT: andl $127, %r8d |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rbx |
| ; SSE4-NEXT: andl $127, %ebx |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; SSE4-NEXT: andl $127, %edx |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r13 |
| ; SSE4-NEXT: andl $127, %r13d |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; SSE4-NEXT: andl $127, %r11d |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; SSE4-NEXT: andl $127, %r14d |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r12 |
| ; SSE4-NEXT: andl $127, %r12d |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rbp |
| ; SSE4-NEXT: cmpq %rax, %rbp |
| ; SSE4-NEXT: movq %r12, %r15 |
| ; SSE4-NEXT: sbbq %r14, %r15 |
| ; SSE4-NEXT: setb %r15b |
| ; SSE4-NEXT: cmpq %rbp, %rax |
| ; SSE4-NEXT: sbbq %r12, %r14 |
| ; SSE4-NEXT: sbbb $0, %r15b |
| ; SSE4-NEXT: movb %r15b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; SSE4-NEXT: cmpq %rax, %r14 |
| ; SSE4-NEXT: movq %r11, %r15 |
| ; SSE4-NEXT: sbbq %r13, %r15 |
| ; SSE4-NEXT: setb %bpl |
| ; SSE4-NEXT: cmpq %r14, %rax |
| ; SSE4-NEXT: sbbq %r11, %r13 |
| ; SSE4-NEXT: sbbb $0, %bpl |
| ; SSE4-NEXT: movb %bpl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; SSE4-NEXT: cmpq %rax, %r11 |
| ; SSE4-NEXT: movq %rdx, %r14 |
| ; SSE4-NEXT: sbbq %rbx, %r14 |
| ; SSE4-NEXT: setb %bpl |
| ; SSE4-NEXT: cmpq %r11, %rax |
| ; SSE4-NEXT: sbbq %rdx, %rbx |
| ; SSE4-NEXT: sbbb $0, %bpl |
| ; SSE4-NEXT: movb %bpl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; SSE4-NEXT: cmpq %rax, %rdx |
| ; SSE4-NEXT: movq %r8, %r11 |
| ; SSE4-NEXT: sbbq %rcx, %r11 |
| ; SSE4-NEXT: setb %r11b |
| ; SSE4-NEXT: cmpq %rdx, %rax |
| ; SSE4-NEXT: sbbq %r8, %rcx |
| ; SSE4-NEXT: sbbb $0, %r11b |
| ; SSE4-NEXT: movb %r11b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE4-NEXT: cmpq %rax, %rcx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: movq %r8, %rdx |
| ; SSE4-NEXT: sbbq %r10, %rdx |
| ; SSE4-NEXT: setb %dl |
| ; SSE4-NEXT: cmpq %rcx, %rax |
| ; SSE4-NEXT: sbbq %r8, %r10 |
| ; SSE4-NEXT: sbbb $0, %dl |
| ; SSE4-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE4-NEXT: cmpq %rax, %rcx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; SSE4-NEXT: movq %r11, %rdx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: sbbq %r8, %rdx |
| ; SSE4-NEXT: setb %r10b |
| ; SSE4-NEXT: cmpq %rcx, %rax |
| ; SSE4-NEXT: sbbq %r11, %r8 |
| ; SSE4-NEXT: sbbb $0, %r10b |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE4-NEXT: cmpq %rax, %rcx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; SSE4-NEXT: movq %r11, %rdx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: sbbq %r8, %rdx |
| ; SSE4-NEXT: setb %dl |
| ; SSE4-NEXT: cmpq %rcx, %rax |
| ; SSE4-NEXT: sbbq %r11, %r8 |
| ; SSE4-NEXT: sbbb $0, %dl |
| ; SSE4-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE4-NEXT: cmpq %rax, %rcx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; SSE4-NEXT: movq %r11, %rdx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: sbbq %r8, %rdx |
| ; SSE4-NEXT: setb %bpl |
| ; SSE4-NEXT: cmpq %rcx, %rax |
| ; SSE4-NEXT: sbbq %r11, %r8 |
| ; SSE4-NEXT: sbbb $0, %bpl |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE4-NEXT: cmpq %rax, %rcx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; SSE4-NEXT: movq %r11, %rdx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: sbbq %r8, %rdx |
| ; SSE4-NEXT: setb %dl |
| ; SSE4-NEXT: cmpq %rcx, %rax |
| ; SSE4-NEXT: sbbq %r11, %r8 |
| ; SSE4-NEXT: sbbb $0, %dl |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE4-NEXT: cmpq %rax, %rcx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; SSE4-NEXT: movq %r14, %r8 |
| ; SSE4-NEXT: movq (%rsp), %rbx # 8-byte Reload |
| ; SSE4-NEXT: sbbq %rbx, %r8 |
| ; SSE4-NEXT: setb %r11b |
| ; SSE4-NEXT: cmpq %rcx, %rax |
| ; SSE4-NEXT: sbbq %r14, %rbx |
| ; SSE4-NEXT: sbbb $0, %r11b |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE4-NEXT: cmpq %rax, %rcx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; SSE4-NEXT: movq %r14, %rbx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: sbbq %r8, %rbx |
| ; SSE4-NEXT: setb %bl |
| ; SSE4-NEXT: cmpq %rcx, %rax |
| ; SSE4-NEXT: sbbq %r14, %r8 |
| ; SSE4-NEXT: sbbb $0, %bl |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; SSE4-NEXT: cmpq %rax, %r14 |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; SSE4-NEXT: movq %r15, %rcx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: sbbq %r8, %rcx |
| ; SSE4-NEXT: setb %cl |
| ; SSE4-NEXT: cmpq %r14, %rax |
| ; SSE4-NEXT: sbbq %r15, %r8 |
| ; SSE4-NEXT: sbbb $0, %cl |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r15 |
| ; SSE4-NEXT: cmpq %rax, %r15 |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload |
| ; SSE4-NEXT: movq %r12, %r14 |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: sbbq %r8, %r14 |
| ; SSE4-NEXT: setb %r14b |
| ; SSE4-NEXT: cmpq %r15, %rax |
| ; SSE4-NEXT: sbbq %r12, %r8 |
| ; SSE4-NEXT: sbbb $0, %r14b |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: cmpq %r9, %rax |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload |
| ; SSE4-NEXT: movq %r12, %r15 |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: sbbq %r8, %r15 |
| ; SSE4-NEXT: setb %r15b |
| ; SSE4-NEXT: cmpq %rax, %r9 |
| ; SSE4-NEXT: sbbq %r12, %r8 |
| ; SSE4-NEXT: sbbb $0, %r15b |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload |
| ; SSE4-NEXT: cmpq %r12, %rax |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload |
| ; SSE4-NEXT: movq %r13, %r9 |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: sbbq %r8, %r9 |
| ; SSE4-NEXT: setb %r9b |
| ; SSE4-NEXT: cmpq %rax, %r12 |
| ; SSE4-NEXT: sbbq %r13, %r8 |
| ; SSE4-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r12 |
| ; SSE4-NEXT: sbbb $0, %r9b |
| ; SSE4-NEXT: cmpq %rsi, %r12 |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: movq %r8, %rdi |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; SSE4-NEXT: sbbq %rax, %rdi |
| ; SSE4-NEXT: setb %dil |
| ; SSE4-NEXT: cmpq %r12, %rsi |
| ; SSE4-NEXT: sbbq %r8, %rax |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r12 |
| ; SSE4-NEXT: movq {{[0-9]+}}(%rsp), %r13 |
| ; SSE4-NEXT: sbbb $0, %dil |
| ; SSE4-NEXT: cmpq %r12, %r13 |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE4-NEXT: movq %r8, %rsi |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; SSE4-NEXT: sbbq %rax, %rsi |
| ; SSE4-NEXT: setb %sil |
| ; SSE4-NEXT: cmpq %r13, %r12 |
| ; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload |
| ; SSE4-NEXT: movd %r12d, %xmm1 |
| ; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload |
| ; SSE4-NEXT: movd %r12d, %xmm2 |
| ; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload |
| ; SSE4-NEXT: movd %r12d, %xmm3 |
| ; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload |
| ; SSE4-NEXT: movd %r12d, %xmm4 |
| ; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r12d # 1-byte Folded Reload |
| ; SSE4-NEXT: movd %r12d, %xmm5 |
| ; SSE4-NEXT: movzbl %r10b, %r10d |
| ; SSE4-NEXT: movd %r10d, %xmm6 |
| ; SSE4-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %r10d # 1-byte Folded Reload |
| ; SSE4-NEXT: movd %r10d, %xmm7 |
| ; SSE4-NEXT: movzbl %bpl, %r10d |
| ; SSE4-NEXT: movd %r10d, %xmm0 |
| ; SSE4-NEXT: movzbl %dl, %edx |
| ; SSE4-NEXT: movd %edx, %xmm8 |
| ; SSE4-NEXT: movzbl %r11b, %edx |
| ; SSE4-NEXT: movd %edx, %xmm9 |
| ; SSE4-NEXT: movzbl %bl, %edx |
| ; SSE4-NEXT: movd %edx, %xmm10 |
| ; SSE4-NEXT: movzbl %cl, %ecx |
| ; SSE4-NEXT: movd %ecx, %xmm11 |
| ; SSE4-NEXT: movzbl %r14b, %ecx |
| ; SSE4-NEXT: movd %ecx, %xmm12 |
| ; SSE4-NEXT: movzbl %r15b, %ecx |
| ; SSE4-NEXT: movd %ecx, %xmm13 |
| ; SSE4-NEXT: movzbl %r9b, %ecx |
| ; SSE4-NEXT: movd %ecx, %xmm14 |
| ; SSE4-NEXT: movzbl %dil, %ecx |
| ; SSE4-NEXT: movd %ecx, %xmm15 |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7] |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7] |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3] |
| ; SSE4-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1],xmm11[2],xmm9[2],xmm11[3],xmm9[3] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3],xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7] |
| ; SSE4-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7] |
| ; SSE4-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3] |
| ; SSE4-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm11[0],xmm15[1],xmm11[1] |
| ; SSE4-NEXT: sbbq %r8, %rax |
| ; SSE4-NEXT: sbbb $0, %sil |
| ; SSE4-NEXT: punpcklqdq {{.*#+}} xmm15 = xmm15[0],xmm0[0] |
| ; SSE4-NEXT: movzbl %sil, %ecx |
| ; SSE4-NEXT: andl $3, %ecx |
| ; SSE4-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; SSE4-NEXT: movb %cl, 4(%rax) |
| ; SSE4-NEXT: movdqa %xmm15, -{{[0-9]+}}(%rsp) |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx |
| ; SSE4-NEXT: andl $3, %ecx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx |
| ; SSE4-NEXT: andl $3, %edx |
| ; SSE4-NEXT: leaq (%rdx,%rcx,4), %rcx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx |
| ; SSE4-NEXT: andl $3, %edx |
| ; SSE4-NEXT: shll $4, %edx |
| ; SSE4-NEXT: orq %rcx, %rdx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx |
| ; SSE4-NEXT: andl $3, %ecx |
| ; SSE4-NEXT: shll $6, %ecx |
| ; SSE4-NEXT: orq %rdx, %rcx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx |
| ; SSE4-NEXT: andl $3, %edx |
| ; SSE4-NEXT: shll $8, %edx |
| ; SSE4-NEXT: orq %rcx, %rdx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx |
| ; SSE4-NEXT: andl $3, %ecx |
| ; SSE4-NEXT: shll $10, %ecx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi |
| ; SSE4-NEXT: andl $3, %esi |
| ; SSE4-NEXT: shll $12, %esi |
| ; SSE4-NEXT: orq %rcx, %rsi |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi |
| ; SSE4-NEXT: andl $3, %edi |
| ; SSE4-NEXT: shll $14, %edi |
| ; SSE4-NEXT: orq %rsi, %rdi |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx |
| ; SSE4-NEXT: andl $3, %ecx |
| ; SSE4-NEXT: shll $16, %ecx |
| ; SSE4-NEXT: orq %rdi, %rcx |
| ; SSE4-NEXT: orq %rdx, %rcx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx |
| ; SSE4-NEXT: andl $3, %edx |
| ; SSE4-NEXT: shll $18, %edx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi |
| ; SSE4-NEXT: andl $3, %esi |
| ; SSE4-NEXT: shll $20, %esi |
| ; SSE4-NEXT: orq %rdx, %rsi |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx |
| ; SSE4-NEXT: andl $3, %edx |
| ; SSE4-NEXT: shll $22, %edx |
| ; SSE4-NEXT: orq %rsi, %rdx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi |
| ; SSE4-NEXT: andl $3, %esi |
| ; SSE4-NEXT: shll $24, %esi |
| ; SSE4-NEXT: orq %rdx, %rsi |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx |
| ; SSE4-NEXT: andl $3, %edx |
| ; SSE4-NEXT: shlq $26, %rdx |
| ; SSE4-NEXT: orq %rsi, %rdx |
| ; SSE4-NEXT: orq %rcx, %rdx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx |
| ; SSE4-NEXT: andl $3, %ecx |
| ; SSE4-NEXT: shlq $28, %rcx |
| ; SSE4-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi |
| ; SSE4-NEXT: andl $3, %esi |
| ; SSE4-NEXT: shlq $30, %rsi |
| ; SSE4-NEXT: orq %rcx, %rsi |
| ; SSE4-NEXT: orq %rdx, %rsi |
| ; SSE4-NEXT: movl %esi, (%rax) |
| ; SSE4-NEXT: addq $120, %rsp |
| ; SSE4-NEXT: popq %rbx |
| ; SSE4-NEXT: popq %r12 |
| ; SSE4-NEXT: popq %r13 |
| ; SSE4-NEXT: popq %r14 |
| ; SSE4-NEXT: popq %r15 |
| ; SSE4-NEXT: popq %rbp |
| ; SSE4-NEXT: retq |
| ; |
| ; SSE2-LABEL: ucmp_uncommon_vectors: |
| ; SSE2: # %bb.0: |
| ; SSE2-NEXT: pushq %rbp |
| ; SSE2-NEXT: pushq %r15 |
| ; SSE2-NEXT: pushq %r14 |
| ; SSE2-NEXT: pushq %r13 |
| ; SSE2-NEXT: pushq %r12 |
| ; SSE2-NEXT: pushq %rbx |
| ; SSE2-NEXT: subq $88, %rsp |
| ; SSE2-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: andl $127, %r8d |
| ; SSE2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: andl $127, %edx |
| ; SSE2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, (%rsp) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE2-NEXT: andl $127, %ecx |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: andl $127, %eax |
| ; SSE2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rbx |
| ; SSE2-NEXT: andl $127, %ebx |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; SSE2-NEXT: andl $127, %edx |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; SSE2-NEXT: andl $127, %r10d |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; SSE2-NEXT: andl $127, %r14d |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rbp |
| ; SSE2-NEXT: andl $127, %ebp |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r13 |
| ; SSE2-NEXT: andl $127, %r13d |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; SSE2-NEXT: andl $127, %r11d |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r15 |
| ; SSE2-NEXT: andl $127, %r15d |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r12 |
| ; SSE2-NEXT: cmpq %rax, %r12 |
| ; SSE2-NEXT: movq %r15, %r8 |
| ; SSE2-NEXT: sbbq %r11, %r8 |
| ; SSE2-NEXT: setb %r8b |
| ; SSE2-NEXT: cmpq %r12, %rax |
| ; SSE2-NEXT: sbbq %r15, %r11 |
| ; SSE2-NEXT: sbbb $0, %r8b |
| ; SSE2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; SSE2-NEXT: cmpq %rax, %r8 |
| ; SSE2-NEXT: movq %r13, %r11 |
| ; SSE2-NEXT: sbbq %rbp, %r11 |
| ; SSE2-NEXT: setb %r11b |
| ; SSE2-NEXT: cmpq %r8, %rax |
| ; SSE2-NEXT: sbbq %r13, %rbp |
| ; SSE2-NEXT: sbbb $0, %r11b |
| ; SSE2-NEXT: movb %r11b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; SSE2-NEXT: cmpq %rax, %r8 |
| ; SSE2-NEXT: movq %r14, %r11 |
| ; SSE2-NEXT: sbbq %r10, %r11 |
| ; SSE2-NEXT: setb %r11b |
| ; SSE2-NEXT: cmpq %r8, %rax |
| ; SSE2-NEXT: sbbq %r14, %r10 |
| ; SSE2-NEXT: sbbb $0, %r11b |
| ; SSE2-NEXT: movb %r11b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; SSE2-NEXT: cmpq %rax, %r8 |
| ; SSE2-NEXT: movq %rdx, %r10 |
| ; SSE2-NEXT: sbbq %rbx, %r10 |
| ; SSE2-NEXT: setb %r10b |
| ; SSE2-NEXT: cmpq %r8, %rax |
| ; SSE2-NEXT: sbbq %rdx, %rbx |
| ; SSE2-NEXT: sbbb $0, %r10b |
| ; SSE2-NEXT: movb %r10b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; SSE2-NEXT: cmpq %rax, %rdx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; SSE2-NEXT: movq %r10, %r8 |
| ; SSE2-NEXT: sbbq %rcx, %r8 |
| ; SSE2-NEXT: setb %r8b |
| ; SSE2-NEXT: cmpq %rdx, %rax |
| ; SSE2-NEXT: sbbq %r10, %rcx |
| ; SSE2-NEXT: sbbb $0, %r8b |
| ; SSE2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE2-NEXT: cmpq %rax, %rcx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; SSE2-NEXT: movq %r10, %rdx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r8, %rdx |
| ; SSE2-NEXT: setb %dl |
| ; SSE2-NEXT: cmpq %rcx, %rax |
| ; SSE2-NEXT: sbbq %r10, %r8 |
| ; SSE2-NEXT: sbbb $0, %dl |
| ; SSE2-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE2-NEXT: cmpq %rax, %rcx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; SSE2-NEXT: movq %r10, %rdx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r8, %rdx |
| ; SSE2-NEXT: setb %dl |
| ; SSE2-NEXT: cmpq %rcx, %rax |
| ; SSE2-NEXT: sbbq %r10, %r8 |
| ; SSE2-NEXT: sbbb $0, %dl |
| ; SSE2-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE2-NEXT: cmpq %rax, %rcx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; SSE2-NEXT: movq %r11, %rdx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r10, %rdx |
| ; SSE2-NEXT: setb %r8b |
| ; SSE2-NEXT: cmpq %rcx, %rax |
| ; SSE2-NEXT: sbbq %r11, %r10 |
| ; SSE2-NEXT: sbbb $0, %r8b |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE2-NEXT: cmpq %rax, %rcx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload |
| ; SSE2-NEXT: movq %rbx, %rdx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r10, %rdx |
| ; SSE2-NEXT: setb %r11b |
| ; SSE2-NEXT: cmpq %rcx, %rax |
| ; SSE2-NEXT: sbbq %rbx, %r10 |
| ; SSE2-NEXT: sbbb $0, %r11b |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE2-NEXT: cmpq %rax, %rcx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload |
| ; SSE2-NEXT: movq %rbx, %rdx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r10, %rdx |
| ; SSE2-NEXT: setb %dl |
| ; SSE2-NEXT: cmpq %rcx, %rax |
| ; SSE2-NEXT: sbbq %rbx, %r10 |
| ; SSE2-NEXT: sbbb $0, %dl |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; SSE2-NEXT: cmpq %rax, %rcx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; SSE2-NEXT: movq %r14, %r10 |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload |
| ; SSE2-NEXT: sbbq %rbx, %r10 |
| ; SSE2-NEXT: setb %r10b |
| ; SSE2-NEXT: cmpq %rcx, %rax |
| ; SSE2-NEXT: sbbq %r14, %rbx |
| ; SSE2-NEXT: sbbb $0, %r10b |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rbx |
| ; SSE2-NEXT: cmpq %rax, %rbx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; SSE2-NEXT: movq %r15, %rcx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r14, %rcx |
| ; SSE2-NEXT: setb %cl |
| ; SSE2-NEXT: cmpq %rbx, %rax |
| ; SSE2-NEXT: sbbq %r15, %r14 |
| ; SSE2-NEXT: sbbb $0, %cl |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; SSE2-NEXT: cmpq %rax, %r14 |
| ; SSE2-NEXT: movq (%rsp), %r12 # 8-byte Reload |
| ; SSE2-NEXT: movq %r12, %rbx |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r15, %rbx |
| ; SSE2-NEXT: setb %bl |
| ; SSE2-NEXT: cmpq %r14, %rax |
| ; SSE2-NEXT: sbbq %r12, %r15 |
| ; SSE2-NEXT: sbbb $0, %bl |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: cmpq %r9, %rax |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload |
| ; SSE2-NEXT: movq %r12, %r14 |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r15, %r14 |
| ; SSE2-NEXT: setb %bpl |
| ; SSE2-NEXT: cmpq %rax, %r9 |
| ; SSE2-NEXT: sbbq %r12, %r15 |
| ; SSE2-NEXT: sbbb $0, %bpl |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; SSE2-NEXT: cmpq %rsi, %rax |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; SSE2-NEXT: movq %r15, %r9 |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r14, %r9 |
| ; SSE2-NEXT: setb %r9b |
| ; SSE2-NEXT: cmpq %rax, %rsi |
| ; SSE2-NEXT: sbbq %r15, %r14 |
| ; SSE2-NEXT: movq %rdi, %rax |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rsi |
| ; SSE2-NEXT: sbbb $0, %r9b |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; SSE2-NEXT: cmpq %r15, %rsi |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload |
| ; SSE2-NEXT: movq %r12, %rdi |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r14, %rdi |
| ; SSE2-NEXT: setb %dil |
| ; SSE2-NEXT: cmpq %rsi, %r15 |
| ; SSE2-NEXT: sbbq %r12, %r14 |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rsi |
| ; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; SSE2-NEXT: sbbb $0, %dil |
| ; SSE2-NEXT: cmpq %rsi, %r14 |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload |
| ; SSE2-NEXT: movq %r13, %r15 |
| ; SSE2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload |
| ; SSE2-NEXT: sbbq %r12, %r15 |
| ; SSE2-NEXT: setb %r15b |
| ; SSE2-NEXT: cmpq %r14, %rsi |
| ; SSE2-NEXT: sbbq %r13, %r12 |
| ; SSE2-NEXT: sbbb $0, %r15b |
| ; SSE2-NEXT: movzbl %r15b, %esi |
| ; SSE2-NEXT: andl $3, %esi |
| ; SSE2-NEXT: movb %sil, 4(%rax) |
| ; SSE2-NEXT: movzbl %dil, %esi |
| ; SSE2-NEXT: movzbl %r9b, %edi |
| ; SSE2-NEXT: andl $3, %esi |
| ; SSE2-NEXT: andl $3, %edi |
| ; SSE2-NEXT: leaq (%rdi,%rsi,4), %rsi |
| ; SSE2-NEXT: movzbl %bpl, %edi |
| ; SSE2-NEXT: andl $3, %edi |
| ; SSE2-NEXT: shll $4, %edi |
| ; SSE2-NEXT: orq %rsi, %rdi |
| ; SSE2-NEXT: movzbl %bl, %r9d |
| ; SSE2-NEXT: andl $3, %r9d |
| ; SSE2-NEXT: shll $6, %r9d |
| ; SSE2-NEXT: orq %rdi, %r9 |
| ; SSE2-NEXT: movzbl %cl, %esi |
| ; SSE2-NEXT: andl $3, %esi |
| ; SSE2-NEXT: shll $8, %esi |
| ; SSE2-NEXT: orq %r9, %rsi |
| ; SSE2-NEXT: movzbl %dl, %ecx |
| ; SSE2-NEXT: movzbl %r10b, %edx |
| ; SSE2-NEXT: andl $3, %edx |
| ; SSE2-NEXT: shll $10, %edx |
| ; SSE2-NEXT: andl $3, %ecx |
| ; SSE2-NEXT: shll $12, %ecx |
| ; SSE2-NEXT: orq %rdx, %rcx |
| ; SSE2-NEXT: movzbl %r11b, %edx |
| ; SSE2-NEXT: andl $3, %edx |
| ; SSE2-NEXT: shll $14, %edx |
| ; SSE2-NEXT: orq %rcx, %rdx |
| ; SSE2-NEXT: movzbl %r8b, %ecx |
| ; SSE2-NEXT: andl $3, %ecx |
| ; SSE2-NEXT: shll $16, %ecx |
| ; SSE2-NEXT: orq %rdx, %rcx |
| ; SSE2-NEXT: orq %rsi, %rcx |
| ; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; SSE2-NEXT: andl $3, %esi |
| ; SSE2-NEXT: shll $18, %esi |
| ; SSE2-NEXT: andl $3, %edx |
| ; SSE2-NEXT: shll $20, %edx |
| ; SSE2-NEXT: orq %rsi, %rdx |
| ; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; SSE2-NEXT: andl $3, %esi |
| ; SSE2-NEXT: shll $22, %esi |
| ; SSE2-NEXT: orq %rdx, %rsi |
| ; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; SSE2-NEXT: andl $3, %edx |
| ; SSE2-NEXT: shll $24, %edx |
| ; SSE2-NEXT: orq %rsi, %rdx |
| ; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; SSE2-NEXT: andl $3, %esi |
| ; SSE2-NEXT: shlq $26, %rsi |
| ; SSE2-NEXT: orq %rdx, %rsi |
| ; SSE2-NEXT: orq %rcx, %rsi |
| ; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; SSE2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; SSE2-NEXT: andl $3, %edx |
| ; SSE2-NEXT: shlq $28, %rdx |
| ; SSE2-NEXT: andl $3, %ecx |
| ; SSE2-NEXT: shlq $30, %rcx |
| ; SSE2-NEXT: orq %rdx, %rcx |
| ; SSE2-NEXT: orq %rsi, %rcx |
| ; SSE2-NEXT: movl %ecx, (%rax) |
| ; SSE2-NEXT: addq $88, %rsp |
| ; SSE2-NEXT: popq %rbx |
| ; SSE2-NEXT: popq %r12 |
| ; SSE2-NEXT: popq %r13 |
| ; SSE2-NEXT: popq %r14 |
| ; SSE2-NEXT: popq %r15 |
| ; SSE2-NEXT: popq %rbp |
| ; SSE2-NEXT: retq |
| ; |
| ; AVX2-LABEL: ucmp_uncommon_vectors: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: pushq %rbp |
| ; AVX2-NEXT: pushq %r15 |
| ; AVX2-NEXT: pushq %r14 |
| ; AVX2-NEXT: pushq %r13 |
| ; AVX2-NEXT: pushq %r12 |
| ; AVX2-NEXT: pushq %rbx |
| ; AVX2-NEXT: subq $88, %rsp |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: andl $127, %r8d |
| ; AVX2-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: andl $127, %edx |
| ; AVX2-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, (%rsp) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r15 |
| ; AVX2-NEXT: andl $127, %r15d |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: andl $127, %eax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; AVX2-NEXT: andl $127, %r14d |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX2-NEXT: andl $127, %edx |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbp |
| ; AVX2-NEXT: andl $127, %ebp |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; AVX2-NEXT: andl $127, %r8d |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r12 |
| ; AVX2-NEXT: andl $127, %r12d |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r13 |
| ; AVX2-NEXT: andl $127, %r13d |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX2-NEXT: cmpq %rbx, %r11 |
| ; AVX2-NEXT: movq %r13, %r10 |
| ; AVX2-NEXT: sbbq %r12, %r10 |
| ; AVX2-NEXT: setb %r10b |
| ; AVX2-NEXT: cmpq %r11, %rbx |
| ; AVX2-NEXT: sbbq %r13, %r12 |
| ; AVX2-NEXT: sbbb $0, %r10b |
| ; AVX2-NEXT: movb %r10b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX2-NEXT: cmpq %r10, %r11 |
| ; AVX2-NEXT: movq %r8, %rbx |
| ; AVX2-NEXT: sbbq %rbp, %rbx |
| ; AVX2-NEXT: setb %bl |
| ; AVX2-NEXT: cmpq %r11, %r10 |
| ; AVX2-NEXT: sbbq %r8, %rbp |
| ; AVX2-NEXT: sbbb $0, %bl |
| ; AVX2-NEXT: movb %bl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX2-NEXT: cmpq %r8, %r10 |
| ; AVX2-NEXT: movq %rdx, %r11 |
| ; AVX2-NEXT: sbbq %r14, %r11 |
| ; AVX2-NEXT: setb %r11b |
| ; AVX2-NEXT: cmpq %r10, %r8 |
| ; AVX2-NEXT: sbbq %rdx, %r14 |
| ; AVX2-NEXT: sbbb $0, %r11b |
| ; AVX2-NEXT: movb %r11b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; AVX2-NEXT: cmpq %rdx, %r8 |
| ; AVX2-NEXT: movq %rax, %r10 |
| ; AVX2-NEXT: sbbq %r15, %r10 |
| ; AVX2-NEXT: setb %r10b |
| ; AVX2-NEXT: cmpq %r8, %rdx |
| ; AVX2-NEXT: sbbq %rax, %r15 |
| ; AVX2-NEXT: sbbb $0, %r10b |
| ; AVX2-NEXT: movb %r10b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX2-NEXT: cmpq %rax, %rdx |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; AVX2-NEXT: movq %r11, %r8 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r10, %r8 |
| ; AVX2-NEXT: setb %r8b |
| ; AVX2-NEXT: cmpq %rdx, %rax |
| ; AVX2-NEXT: sbbq %r11, %r10 |
| ; AVX2-NEXT: sbbb $0, %r8b |
| ; AVX2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX2-NEXT: cmpq %rax, %rdx |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; AVX2-NEXT: movq %r11, %r8 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r10, %r8 |
| ; AVX2-NEXT: setb %r8b |
| ; AVX2-NEXT: cmpq %rdx, %rax |
| ; AVX2-NEXT: sbbq %r11, %r10 |
| ; AVX2-NEXT: sbbb $0, %r8b |
| ; AVX2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX2-NEXT: cmpq %rax, %rdx |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; AVX2-NEXT: movq %r11, %r8 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r10, %r8 |
| ; AVX2-NEXT: setb %r8b |
| ; AVX2-NEXT: cmpq %rdx, %rax |
| ; AVX2-NEXT: sbbq %r11, %r10 |
| ; AVX2-NEXT: sbbb $0, %r8b |
| ; AVX2-NEXT: movb %r8b, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX2-NEXT: cmpq %rax, %rdx |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; AVX2-NEXT: movq %r11, %r8 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r10, %r8 |
| ; AVX2-NEXT: setb %r12b |
| ; AVX2-NEXT: cmpq %rdx, %rax |
| ; AVX2-NEXT: sbbq %r11, %r10 |
| ; AVX2-NEXT: sbbb $0, %r12b |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX2-NEXT: cmpq %rax, %rdx |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; AVX2-NEXT: movq %r11, %r8 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r10, %r8 |
| ; AVX2-NEXT: setb %r8b |
| ; AVX2-NEXT: cmpq %rdx, %rax |
| ; AVX2-NEXT: sbbq %r11, %r10 |
| ; AVX2-NEXT: sbbb $0, %r8b |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX2-NEXT: cmpq %rax, %r10 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload |
| ; AVX2-NEXT: movq %rbx, %rdx |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r11, %rdx |
| ; AVX2-NEXT: setb %dl |
| ; AVX2-NEXT: cmpq %r10, %rax |
| ; AVX2-NEXT: sbbq %rbx, %r11 |
| ; AVX2-NEXT: sbbb $0, %dl |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX2-NEXT: cmpq %rax, %r11 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; AVX2-NEXT: movq %r14, %r10 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload |
| ; AVX2-NEXT: sbbq %rbx, %r10 |
| ; AVX2-NEXT: setb %r10b |
| ; AVX2-NEXT: cmpq %r11, %rax |
| ; AVX2-NEXT: sbbq %r14, %rbx |
| ; AVX2-NEXT: sbbb $0, %r10b |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rbx |
| ; AVX2-NEXT: cmpq %rax, %rbx |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; AVX2-NEXT: movq %r15, %r11 |
| ; AVX2-NEXT: movq (%rsp), %r14 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r14, %r11 |
| ; AVX2-NEXT: setb %r11b |
| ; AVX2-NEXT: cmpq %rbx, %rax |
| ; AVX2-NEXT: sbbq %r15, %r14 |
| ; AVX2-NEXT: sbbb $0, %r11b |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; AVX2-NEXT: cmpq %rax, %r14 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload |
| ; AVX2-NEXT: movq %r13, %rbx |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r15, %rbx |
| ; AVX2-NEXT: setb %bl |
| ; AVX2-NEXT: cmpq %r14, %rax |
| ; AVX2-NEXT: sbbq %r13, %r15 |
| ; AVX2-NEXT: sbbb $0, %bl |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: cmpq %r9, %rax |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload |
| ; AVX2-NEXT: movq %r13, %r14 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r15, %r14 |
| ; AVX2-NEXT: setb %bpl |
| ; AVX2-NEXT: cmpq %rax, %r9 |
| ; AVX2-NEXT: sbbq %r13, %r15 |
| ; AVX2-NEXT: sbbb $0, %bpl |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: cmpq %rsi, %rax |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; AVX2-NEXT: movq %r15, %r9 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r14, %r9 |
| ; AVX2-NEXT: setb %r9b |
| ; AVX2-NEXT: cmpq %rax, %rsi |
| ; AVX2-NEXT: sbbq %r15, %r14 |
| ; AVX2-NEXT: sbbb $0, %r9b |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: cmpq %rcx, %rax |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; AVX2-NEXT: movq %r15, %rsi |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r14, %rsi |
| ; AVX2-NEXT: setb %sil |
| ; AVX2-NEXT: cmpq %rax, %rcx |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX2-NEXT: sbbq %r15, %r14 |
| ; AVX2-NEXT: sbbb $0, %sil |
| ; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; AVX2-NEXT: cmpq %rax, %rcx |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload |
| ; AVX2-NEXT: movq %r13, %r14 |
| ; AVX2-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; AVX2-NEXT: sbbq %r15, %r14 |
| ; AVX2-NEXT: setb %r14b |
| ; AVX2-NEXT: cmpq %rcx, %rax |
| ; AVX2-NEXT: sbbq %r13, %r15 |
| ; AVX2-NEXT: movq %rdi, %rax |
| ; AVX2-NEXT: sbbb $0, %r14b |
| ; AVX2-NEXT: movzbl %r14b, %ecx |
| ; AVX2-NEXT: andl $3, %ecx |
| ; AVX2-NEXT: movb %cl, 4(%rdi) |
| ; AVX2-NEXT: movzbl %sil, %ecx |
| ; AVX2-NEXT: andl $3, %ecx |
| ; AVX2-NEXT: movzbl %r9b, %esi |
| ; AVX2-NEXT: andl $3, %esi |
| ; AVX2-NEXT: leaq (%rsi,%rcx,4), %rcx |
| ; AVX2-NEXT: movzbl %bpl, %esi |
| ; AVX2-NEXT: andl $3, %esi |
| ; AVX2-NEXT: shll $4, %esi |
| ; AVX2-NEXT: orq %rcx, %rsi |
| ; AVX2-NEXT: movzbl %bl, %ecx |
| ; AVX2-NEXT: andl $3, %ecx |
| ; AVX2-NEXT: shll $6, %ecx |
| ; AVX2-NEXT: orq %rsi, %rcx |
| ; AVX2-NEXT: movzbl %r11b, %esi |
| ; AVX2-NEXT: andl $3, %esi |
| ; AVX2-NEXT: shll $8, %esi |
| ; AVX2-NEXT: orq %rcx, %rsi |
| ; AVX2-NEXT: movzbl %r10b, %ecx |
| ; AVX2-NEXT: andl $3, %ecx |
| ; AVX2-NEXT: shll $10, %ecx |
| ; AVX2-NEXT: movzbl %dl, %edx |
| ; AVX2-NEXT: andl $3, %edx |
| ; AVX2-NEXT: shll $12, %edx |
| ; AVX2-NEXT: orq %rcx, %rdx |
| ; AVX2-NEXT: movzbl %r8b, %edi |
| ; AVX2-NEXT: andl $3, %edi |
| ; AVX2-NEXT: shll $14, %edi |
| ; AVX2-NEXT: orq %rdx, %rdi |
| ; AVX2-NEXT: movzbl %r12b, %ecx |
| ; AVX2-NEXT: andl $3, %ecx |
| ; AVX2-NEXT: shll $16, %ecx |
| ; AVX2-NEXT: orq %rdi, %rcx |
| ; AVX2-NEXT: orq %rsi, %rcx |
| ; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; AVX2-NEXT: andl $3, %edx |
| ; AVX2-NEXT: shll $18, %edx |
| ; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; AVX2-NEXT: andl $3, %esi |
| ; AVX2-NEXT: shll $20, %esi |
| ; AVX2-NEXT: orq %rdx, %rsi |
| ; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; AVX2-NEXT: andl $3, %edx |
| ; AVX2-NEXT: shll $22, %edx |
| ; AVX2-NEXT: orq %rsi, %rdx |
| ; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; AVX2-NEXT: andl $3, %esi |
| ; AVX2-NEXT: shll $24, %esi |
| ; AVX2-NEXT: orq %rdx, %rsi |
| ; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; AVX2-NEXT: andl $3, %edx |
| ; AVX2-NEXT: shlq $26, %rdx |
| ; AVX2-NEXT: orq %rsi, %rdx |
| ; AVX2-NEXT: orq %rcx, %rdx |
| ; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; AVX2-NEXT: andl $3, %ecx |
| ; AVX2-NEXT: shlq $28, %rcx |
| ; AVX2-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; AVX2-NEXT: andl $3, %esi |
| ; AVX2-NEXT: shlq $30, %rsi |
| ; AVX2-NEXT: orq %rcx, %rsi |
| ; AVX2-NEXT: orq %rdx, %rsi |
| ; AVX2-NEXT: movl %esi, (%rax) |
| ; AVX2-NEXT: addq $88, %rsp |
| ; AVX2-NEXT: popq %rbx |
| ; AVX2-NEXT: popq %r12 |
| ; AVX2-NEXT: popq %r13 |
| ; AVX2-NEXT: popq %r14 |
| ; AVX2-NEXT: popq %r15 |
| ; AVX2-NEXT: popq %rbp |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: ucmp_uncommon_vectors: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: pushq %rbp |
| ; AVX512-NEXT: pushq %r15 |
| ; AVX512-NEXT: pushq %r14 |
| ; AVX512-NEXT: pushq %r13 |
| ; AVX512-NEXT: pushq %r12 |
| ; AVX512-NEXT: pushq %rbx |
| ; AVX512-NEXT: subq $88, %rsp |
| ; AVX512-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: andl $127, %r8d |
| ; AVX512-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: andl $127, %edx |
| ; AVX512-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, (%rsp) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbp |
| ; AVX512-NEXT: andl $127, %ebp |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r12 |
| ; AVX512-NEXT: andl $127, %r12d |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r13 |
| ; AVX512-NEXT: andl $127, %r13d |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r15 |
| ; AVX512-NEXT: andl $127, %r15d |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX512-NEXT: andl $127, %r10d |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rbx |
| ; AVX512-NEXT: andl $127, %ebx |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; AVX512-NEXT: andl $127, %r8d |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r9 |
| ; AVX512-NEXT: andl $127, %r9d |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi |
| ; AVX512-NEXT: andl $127, %esi |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi |
| ; AVX512-NEXT: andl $127, %edi |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: andl $127, %eax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX512-NEXT: andl $127, %edx |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX512-NEXT: cmpq %r14, %r11 |
| ; AVX512-NEXT: movq %rdx, %rcx |
| ; AVX512-NEXT: sbbq %rax, %rcx |
| ; AVX512-NEXT: setb %cl |
| ; AVX512-NEXT: cmpq %r11, %r14 |
| ; AVX512-NEXT: sbbq %rdx, %rax |
| ; AVX512-NEXT: sbbb $0, %cl |
| ; AVX512-NEXT: movb %cl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; AVX512-NEXT: cmpq %rax, %rcx |
| ; AVX512-NEXT: movq %rdi, %rdx |
| ; AVX512-NEXT: sbbq %rsi, %rdx |
| ; AVX512-NEXT: setb %dl |
| ; AVX512-NEXT: cmpq %rcx, %rax |
| ; AVX512-NEXT: sbbq %rdi, %rsi |
| ; AVX512-NEXT: sbbb $0, %dl |
| ; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; AVX512-NEXT: cmpq %rax, %rcx |
| ; AVX512-NEXT: movq %r9, %rdx |
| ; AVX512-NEXT: sbbq %r8, %rdx |
| ; AVX512-NEXT: setb %dl |
| ; AVX512-NEXT: cmpq %rcx, %rax |
| ; AVX512-NEXT: sbbq %r9, %r8 |
| ; AVX512-NEXT: sbbb $0, %dl |
| ; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; AVX512-NEXT: cmpq %rax, %rcx |
| ; AVX512-NEXT: movq %rbx, %rdx |
| ; AVX512-NEXT: sbbq %r10, %rdx |
| ; AVX512-NEXT: setb %dl |
| ; AVX512-NEXT: cmpq %rcx, %rax |
| ; AVX512-NEXT: sbbq %rbx, %r10 |
| ; AVX512-NEXT: sbbb $0, %dl |
| ; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; AVX512-NEXT: cmpq %rax, %rcx |
| ; AVX512-NEXT: movq %r15, %rdx |
| ; AVX512-NEXT: sbbq %r13, %rdx |
| ; AVX512-NEXT: setb %dl |
| ; AVX512-NEXT: cmpq %rcx, %rax |
| ; AVX512-NEXT: sbbq %r15, %r13 |
| ; AVX512-NEXT: sbbb $0, %dl |
| ; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; AVX512-NEXT: cmpq %rax, %rcx |
| ; AVX512-NEXT: movq %r12, %rdx |
| ; AVX512-NEXT: sbbq %rbp, %rdx |
| ; AVX512-NEXT: setb %dl |
| ; AVX512-NEXT: cmpq %rcx, %rax |
| ; AVX512-NEXT: sbbq %r12, %rbp |
| ; AVX512-NEXT: sbbb $0, %dl |
| ; AVX512-NEXT: movb %dl, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; AVX512-NEXT: cmpq %rax, %rcx |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload |
| ; AVX512-NEXT: movq %rdi, %rdx |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload |
| ; AVX512-NEXT: sbbq %rsi, %rdx |
| ; AVX512-NEXT: setb %r13b |
| ; AVX512-NEXT: cmpq %rcx, %rax |
| ; AVX512-NEXT: sbbq %rdi, %rsi |
| ; AVX512-NEXT: sbbb $0, %r13b |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; AVX512-NEXT: cmpq %rax, %rcx |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload |
| ; AVX512-NEXT: movq %rdi, %rdx |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload |
| ; AVX512-NEXT: sbbq %rsi, %rdx |
| ; AVX512-NEXT: setb %bpl |
| ; AVX512-NEXT: cmpq %rcx, %rax |
| ; AVX512-NEXT: sbbq %rdi, %rsi |
| ; AVX512-NEXT: sbbb $0, %bpl |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rcx |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX512-NEXT: cmpq %rcx, %rdx |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload |
| ; AVX512-NEXT: movq %rdi, %rax |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload |
| ; AVX512-NEXT: sbbq %rsi, %rax |
| ; AVX512-NEXT: setb %r9b |
| ; AVX512-NEXT: cmpq %rdx, %rcx |
| ; AVX512-NEXT: sbbq %rdi, %rsi |
| ; AVX512-NEXT: sbbb $0, %r9b |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi |
| ; AVX512-NEXT: cmpq %rdx, %rsi |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload |
| ; AVX512-NEXT: movq %rdi, %rcx |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; AVX512-NEXT: sbbq %rax, %rcx |
| ; AVX512-NEXT: setb %cl |
| ; AVX512-NEXT: cmpq %rsi, %rdx |
| ; AVX512-NEXT: sbbq %rdi, %rax |
| ; AVX512-NEXT: sbbb $0, %cl |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rsi |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi |
| ; AVX512-NEXT: cmpq %rsi, %rdi |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload |
| ; AVX512-NEXT: movq %r8, %rdx |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; AVX512-NEXT: sbbq %rax, %rdx |
| ; AVX512-NEXT: setb %dl |
| ; AVX512-NEXT: cmpq %rdi, %rsi |
| ; AVX512-NEXT: sbbq %r8, %rax |
| ; AVX512-NEXT: sbbb $0, %dl |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdi |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; AVX512-NEXT: cmpq %rdi, %r8 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload |
| ; AVX512-NEXT: movq %r10, %rsi |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; AVX512-NEXT: sbbq %rax, %rsi |
| ; AVX512-NEXT: setb %sil |
| ; AVX512-NEXT: cmpq %r8, %rdi |
| ; AVX512-NEXT: sbbq %r10, %rax |
| ; AVX512-NEXT: sbbb $0, %sil |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r8 |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX512-NEXT: cmpq %r8, %r10 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload |
| ; AVX512-NEXT: movq %r11, %rdi |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; AVX512-NEXT: sbbq %rax, %rdi |
| ; AVX512-NEXT: setb %dil |
| ; AVX512-NEXT: cmpq %r10, %r8 |
| ; AVX512-NEXT: sbbq %r11, %rax |
| ; AVX512-NEXT: sbbb $0, %dil |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; AVX512-NEXT: cmpq %rax, %r10 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload |
| ; AVX512-NEXT: movq %rbx, %r8 |
| ; AVX512-NEXT: movq (%rsp), %r11 # 8-byte Reload |
| ; AVX512-NEXT: sbbq %r11, %r8 |
| ; AVX512-NEXT: setb %r8b |
| ; AVX512-NEXT: cmpq %r10, %rax |
| ; AVX512-NEXT: sbbq %rbx, %r11 |
| ; AVX512-NEXT: sbbb $0, %r8b |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload |
| ; AVX512-NEXT: cmpq %rbx, %r11 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; AVX512-NEXT: movq %r14, %r10 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; AVX512-NEXT: sbbq %rax, %r10 |
| ; AVX512-NEXT: setb %r10b |
| ; AVX512-NEXT: cmpq %r11, %rbx |
| ; AVX512-NEXT: sbbq %r14, %rax |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX512-NEXT: sbbb $0, %r10b |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload |
| ; AVX512-NEXT: cmpq %r15, %r11 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; AVX512-NEXT: movq %rax, %rbx |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; AVX512-NEXT: sbbq %r14, %rbx |
| ; AVX512-NEXT: setb %bl |
| ; AVX512-NEXT: cmpq %r11, %r15 |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r11 |
| ; AVX512-NEXT: sbbq %rax, %r14 |
| ; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r14 |
| ; AVX512-NEXT: sbbb $0, %bl |
| ; AVX512-NEXT: cmpq %r11, %r14 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload |
| ; AVX512-NEXT: movq %rax, %r15 |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload |
| ; AVX512-NEXT: sbbq %r12, %r15 |
| ; AVX512-NEXT: setb %r15b |
| ; AVX512-NEXT: cmpq %r14, %r11 |
| ; AVX512-NEXT: sbbq %rax, %r12 |
| ; AVX512-NEXT: sbbb $0, %r15b |
| ; AVX512-NEXT: movzbl %r15b, %r11d |
| ; AVX512-NEXT: andl $3, %r11d |
| ; AVX512-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload |
| ; AVX512-NEXT: movb %r11b, 4(%r14) |
| ; AVX512-NEXT: movzbl %bl, %r11d |
| ; AVX512-NEXT: andl $3, %r11d |
| ; AVX512-NEXT: movzbl %r10b, %r10d |
| ; AVX512-NEXT: andl $3, %r10d |
| ; AVX512-NEXT: leaq (%r10,%r11,4), %r10 |
| ; AVX512-NEXT: movzbl %r8b, %r8d |
| ; AVX512-NEXT: andl $3, %r8d |
| ; AVX512-NEXT: shll $4, %r8d |
| ; AVX512-NEXT: orq %r10, %r8 |
| ; AVX512-NEXT: movzbl %dil, %edi |
| ; AVX512-NEXT: andl $3, %edi |
| ; AVX512-NEXT: shll $6, %edi |
| ; AVX512-NEXT: orq %r8, %rdi |
| ; AVX512-NEXT: movzbl %sil, %esi |
| ; AVX512-NEXT: andl $3, %esi |
| ; AVX512-NEXT: shll $8, %esi |
| ; AVX512-NEXT: orq %rdi, %rsi |
| ; AVX512-NEXT: movzbl %dl, %edx |
| ; AVX512-NEXT: andl $3, %edx |
| ; AVX512-NEXT: shll $10, %edx |
| ; AVX512-NEXT: movzbl %cl, %ecx |
| ; AVX512-NEXT: andl $3, %ecx |
| ; AVX512-NEXT: shll $12, %ecx |
| ; AVX512-NEXT: orq %rdx, %rcx |
| ; AVX512-NEXT: movzbl %r9b, %edx |
| ; AVX512-NEXT: andl $3, %edx |
| ; AVX512-NEXT: shll $14, %edx |
| ; AVX512-NEXT: orq %rcx, %rdx |
| ; AVX512-NEXT: movzbl %bpl, %eax |
| ; AVX512-NEXT: andl $3, %eax |
| ; AVX512-NEXT: shll $16, %eax |
| ; AVX512-NEXT: orq %rdx, %rax |
| ; AVX512-NEXT: orq %rsi, %rax |
| ; AVX512-NEXT: movzbl %r13b, %ecx |
| ; AVX512-NEXT: andl $3, %ecx |
| ; AVX512-NEXT: shll $18, %ecx |
| ; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; AVX512-NEXT: andl $3, %edx |
| ; AVX512-NEXT: shll $20, %edx |
| ; AVX512-NEXT: orq %rcx, %rdx |
| ; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; AVX512-NEXT: andl $3, %ecx |
| ; AVX512-NEXT: shll $22, %ecx |
| ; AVX512-NEXT: orq %rdx, %rcx |
| ; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; AVX512-NEXT: andl $3, %edx |
| ; AVX512-NEXT: shll $24, %edx |
| ; AVX512-NEXT: orq %rcx, %rdx |
| ; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; AVX512-NEXT: andl $3, %ecx |
| ; AVX512-NEXT: shlq $26, %rcx |
| ; AVX512-NEXT: orq %rdx, %rcx |
| ; AVX512-NEXT: orq %rax, %rcx |
| ; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload |
| ; AVX512-NEXT: andl $3, %eax |
| ; AVX512-NEXT: shlq $28, %rax |
| ; AVX512-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; AVX512-NEXT: andl $3, %edx |
| ; AVX512-NEXT: shlq $30, %rdx |
| ; AVX512-NEXT: orq %rax, %rdx |
| ; AVX512-NEXT: orq %rcx, %rdx |
| ; AVX512-NEXT: movq %r14, %rax |
| ; AVX512-NEXT: movl %edx, (%r14) |
| ; AVX512-NEXT: addq $88, %rsp |
| ; AVX512-NEXT: popq %rbx |
| ; AVX512-NEXT: popq %r12 |
| ; AVX512-NEXT: popq %r13 |
| ; AVX512-NEXT: popq %r14 |
| ; AVX512-NEXT: popq %r15 |
| ; AVX512-NEXT: popq %rbp |
| ; AVX512-NEXT: retq |
| ; |
| ; X86-LABEL: ucmp_uncommon_vectors: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %ebp |
| ; X86-NEXT: pushl %ebx |
| ; X86-NEXT: pushl %edi |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: subl $132, %esp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: andl $127, %ebp |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: andl $127, %eax |
| ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: andl $127, %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: andl $127, %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill |
| ; X86-NEXT: cmpl %eax, {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx |
| ; X86-NEXT: movl %ebx, %esi |
| ; X86-NEXT: sbbl %eax, %esi |
| ; X86-NEXT: movl %edi, %esi |
| ; X86-NEXT: sbbl %edx, %esi |
| ; X86-NEXT: movl $0, %esi |
| ; X86-NEXT: sbbl %esi, %esi |
| ; X86-NEXT: setb %cl |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: sbbl %ebx, %eax |
| ; X86-NEXT: sbbl %edi, %edx |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: movl %eax, %edi |
| ; X86-NEXT: sbbl %ebp, %edi |
| ; X86-NEXT: movl $0, %edi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %ecx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: sbbl %eax, %ebp |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: movl %eax, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: sbbl %ebp, %edi |
| ; X86-NEXT: movl $0, %edi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %ecx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: sbbl %eax, %ebp |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: movl %eax, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: sbbl %ebp, %edi |
| ; X86-NEXT: movl $0, %edi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %ecx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: sbbl %eax, %ebp |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: movl %eax, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: sbbl %ebp, %edi |
| ; X86-NEXT: movl $0, %edi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %ecx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: sbbl %eax, %ebp |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: movl %eax, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: sbbl %ebp, %edi |
| ; X86-NEXT: movl $0, %edi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %ecx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: sbbl %eax, %ebp |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: movl %eax, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: sbbl %ebp, %edi |
| ; X86-NEXT: movl $0, %edi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %ecx, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: sbbl %eax, %ebp |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: movl %ebp, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload |
| ; X86-NEXT: sbbl %ebx, %edi |
| ; X86-NEXT: movl $0, %edi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: setb %cl |
| ; X86-NEXT: cmpl %eax, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: sbbl %ebp, %ebx |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: movl %ebp, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload |
| ; X86-NEXT: sbbl %ebx, %edi |
| ; X86-NEXT: movl $0, %edi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: setb %cl |
| ; X86-NEXT: cmpl %eax, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: sbbl %ebp, %ebx |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: movl %ebp, %edi |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload |
| ; X86-NEXT: sbbl %ebx, %edi |
| ; X86-NEXT: movl $0, %edi |
| ; X86-NEXT: sbbl %edi, %edi |
| ; X86-NEXT: setb %cl |
| ; X86-NEXT: cmpl %eax, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: sbbl %ebp, %ebx |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl %eax, %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl %edi, %edx |
| ; X86-NEXT: sbbl %esi, %edx |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: movl %ebp, %edx |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload |
| ; X86-NEXT: sbbl %ebx, %edx |
| ; X86-NEXT: movl $0, %edx |
| ; X86-NEXT: sbbl %edx, %edx |
| ; X86-NEXT: setb %dl |
| ; X86-NEXT: cmpl %ecx, %eax |
| ; X86-NEXT: sbbl %edi, %esi |
| ; X86-NEXT: sbbl %ebp, %ebx |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %dl |
| ; X86-NEXT: movb %dl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl %eax, %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl %edi, %ebx |
| ; X86-NEXT: sbbl %esi, %ebx |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: movl %ebp, %ebx |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload |
| ; X86-NEXT: sbbl %edx, %ebx |
| ; X86-NEXT: movl $0, %ebx |
| ; X86-NEXT: sbbl %ebx, %ebx |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %ecx, %eax |
| ; X86-NEXT: sbbl %edi, %esi |
| ; X86-NEXT: sbbl %ebp, %edx |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl %eax, %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl %edi, %ebx |
| ; X86-NEXT: sbbl %esi, %ebx |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload |
| ; X86-NEXT: movl %ebp, %ebx |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload |
| ; X86-NEXT: sbbl %edx, %ebx |
| ; X86-NEXT: movl $0, %ebx |
| ; X86-NEXT: sbbl %ebx, %ebx |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %ecx, %eax |
| ; X86-NEXT: sbbl %edi, %esi |
| ; X86-NEXT: sbbl %ebp, %edx |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movb %bl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl %edi, %ebp |
| ; X86-NEXT: sbbl %esi, %ebp |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload |
| ; X86-NEXT: movl %ebx, %ebp |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload |
| ; X86-NEXT: sbbl %edx, %ebp |
| ; X86-NEXT: movl $0, %ebp |
| ; X86-NEXT: sbbl %ebp, %ebp |
| ; X86-NEXT: setb %cl |
| ; X86-NEXT: cmpl %eax, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %edi, %esi |
| ; X86-NEXT: sbbl %ebx, %edx |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movb %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: movl %ebp, %eax |
| ; X86-NEXT: sbbl %edi, %eax |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload |
| ; X86-NEXT: movl %ecx, %eax |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload |
| ; X86-NEXT: sbbl %edx, %eax |
| ; X86-NEXT: movl $0, %eax |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: setb %bl |
| ; X86-NEXT: cmpl %esi, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %ebp, %edi |
| ; X86-NEXT: sbbl %ecx, %edx |
| ; X86-NEXT: movl $0, %ecx |
| ; X86-NEXT: sbbl %ecx, %ecx |
| ; X86-NEXT: sbbb $0, %bl |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl %ecx, %ebp |
| ; X86-NEXT: sbbl %edi, %ebp |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: movl %eax, %ebp |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload |
| ; X86-NEXT: sbbl %edx, %ebp |
| ; X86-NEXT: movl $0, %ebp |
| ; X86-NEXT: sbbl %ebp, %ebp |
| ; X86-NEXT: setb %bh |
| ; X86-NEXT: cmpl %esi, {{[0-9]+}}(%esp) |
| ; X86-NEXT: sbbl %ecx, %edi |
| ; X86-NEXT: sbbl %eax, %edx |
| ; X86-NEXT: movl $0, %ecx |
| ; X86-NEXT: sbbl %ecx, %ecx |
| ; X86-NEXT: sbbb $0, %bh |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl %eax, {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movl %esi, %ebp |
| ; X86-NEXT: sbbl %edi, %ebp |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload |
| ; X86-NEXT: movl %edx, %ebp |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: sbbl %eax, %ebp |
| ; X86-NEXT: movl $0, %ebp |
| ; X86-NEXT: sbbl %ebp, %ebp |
| ; X86-NEXT: setb %cl |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: cmpl {{[0-9]+}}(%esp), %ebp |
| ; X86-NEXT: sbbl %esi, %edi |
| ; X86-NEXT: sbbl %edx, %eax |
| ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload |
| ; X86-NEXT: sbbl %eax, %eax |
| ; X86-NEXT: sbbb $0, %cl |
| ; X86-NEXT: movzbl %cl, %ecx |
| ; X86-NEXT: andl $3, %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi |
| ; X86-NEXT: movb %cl, 4(%edi) |
| ; X86-NEXT: movzbl %bh, %ebp |
| ; X86-NEXT: movzbl %bl, %ecx |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; X86-NEXT: andl $3, %ebp |
| ; X86-NEXT: andl $3, %ecx |
| ; X86-NEXT: leal (%ecx,%ebp,4), %ecx |
| ; X86-NEXT: andl $3, %eax |
| ; X86-NEXT: shll $4, %eax |
| ; X86-NEXT: orl %ecx, %eax |
| ; X86-NEXT: andl $3, %ebx |
| ; X86-NEXT: shll $6, %ebx |
| ; X86-NEXT: orl %eax, %ebx |
| ; X86-NEXT: andl $3, %esi |
| ; X86-NEXT: shll $8, %esi |
| ; X86-NEXT: orl %ebx, %esi |
| ; X86-NEXT: andl $3, %edx |
| ; X86-NEXT: shll $10, %edx |
| ; X86-NEXT: orl %esi, %edx |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload |
| ; X86-NEXT: andl $3, %eax |
| ; X86-NEXT: shll $12, %eax |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: andl $3, %ecx |
| ; X86-NEXT: shll $14, %ecx |
| ; X86-NEXT: orl %eax, %ecx |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload |
| ; X86-NEXT: andl $3, %eax |
| ; X86-NEXT: shll $16, %eax |
| ; X86-NEXT: orl %ecx, %eax |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; X86-NEXT: andl $3, %esi |
| ; X86-NEXT: shll $18, %esi |
| ; X86-NEXT: orl %eax, %esi |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: andl $3, %eax |
| ; X86-NEXT: shll $20, %eax |
| ; X86-NEXT: orl %esi, %eax |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 1-byte Folded Reload |
| ; X86-NEXT: orl %edx, %eax |
| ; X86-NEXT: andl $3, %ecx |
| ; X86-NEXT: shll $22, %ecx |
| ; X86-NEXT: andl $3, %esi |
| ; X86-NEXT: shll $24, %esi |
| ; X86-NEXT: orl %ecx, %esi |
| ; X86-NEXT: andl $3, %ebx |
| ; X86-NEXT: shll $26, %ebx |
| ; X86-NEXT: orl %esi, %ebx |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload |
| ; X86-NEXT: andl $3, %ecx |
| ; X86-NEXT: shll $28, %ecx |
| ; X86-NEXT: orl %ebx, %ecx |
| ; X86-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 1-byte Folded Reload |
| ; X86-NEXT: shll $30, %edx |
| ; X86-NEXT: orl %ecx, %edx |
| ; X86-NEXT: orl %eax, %edx |
| ; X86-NEXT: movl %edx, (%edi) |
| ; X86-NEXT: movl %edi, %eax |
| ; X86-NEXT: addl $132, %esp |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: popl %edi |
| ; X86-NEXT: popl %ebx |
| ; X86-NEXT: popl %ebp |
| ; X86-NEXT: retl $4 |
| %1 = call <17 x i2> @llvm.ucmp(<17 x i71> %x, <17 x i71> %y) |
| ret <17 x i2> %1 |
| } |