| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,NOBMI |
| ; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,BMI |
| |
| define i64 @test_i64(i64 %w, i64 %x, i64 %y, i64 %z) { |
| ; NOBMI-LABEL: test_i64: |
| ; NOBMI: # %bb.0: # %Entry |
| ; NOBMI-NEXT: movq %rcx, %rax |
| ; NOBMI-NEXT: andq %rdx, %rsi |
| ; NOBMI-NEXT: notq %rsi |
| ; NOBMI-NEXT: andq %rdi, %rsi |
| ; NOBMI-NEXT: notq %rax |
| ; NOBMI-NEXT: orq %rdx, %rax |
| ; NOBMI-NEXT: andq %rsi, %rax |
| ; NOBMI-NEXT: retq |
| ; |
| ; BMI-LABEL: test_i64: |
| ; BMI: # %bb.0: # %Entry |
| ; BMI-NEXT: andq %rdx, %rsi |
| ; BMI-NEXT: andnq %rdi, %rsi, %rax |
| ; BMI-NEXT: andnq %rcx, %rdx, %rcx |
| ; BMI-NEXT: andnq %rax, %rcx, %rax |
| ; BMI-NEXT: retq |
| Entry: |
| %and1 = and i64 %y, %x |
| %xor1 = xor i64 %and1, -1 |
| %and2 = and i64 %xor1, %w |
| %.not = xor i64 %z, -1 |
| %or1 = or i64 %.not, %y |
| %and3 = and i64 %and2, %or1 |
| ret i64 %and3 |
| } |
| |
| define i32 @test_i32(i32 %w, i32 %x, i32 %y, i32 %z) { |
| ; NOBMI-LABEL: test_i32: |
| ; NOBMI: # %bb.0: # %Entry |
| ; NOBMI-NEXT: movl %ecx, %eax |
| ; NOBMI-NEXT: andl %edx, %esi |
| ; NOBMI-NEXT: notl %esi |
| ; NOBMI-NEXT: andl %edi, %esi |
| ; NOBMI-NEXT: notl %eax |
| ; NOBMI-NEXT: orl %edx, %eax |
| ; NOBMI-NEXT: andl %esi, %eax |
| ; NOBMI-NEXT: retq |
| ; |
| ; BMI-LABEL: test_i32: |
| ; BMI: # %bb.0: # %Entry |
| ; BMI-NEXT: andl %edx, %esi |
| ; BMI-NEXT: andnl %edi, %esi, %eax |
| ; BMI-NEXT: andnl %ecx, %edx, %ecx |
| ; BMI-NEXT: andnl %eax, %ecx, %eax |
| ; BMI-NEXT: retq |
| Entry: |
| %and1 = and i32 %y, %x |
| %xor1 = xor i32 %and1, -1 |
| %and2 = and i32 %xor1, %w |
| %.not = xor i32 %z, -1 |
| %or1 = or i32 %.not, %y |
| %and3 = and i32 %and2, %or1 |
| ret i32 %and3 |
| } |
| |
| define i16 @test_i16(i16 %w, i16 %x, i16 %y, i16 %z) { |
| ; NOBMI-LABEL: test_i16: |
| ; NOBMI: # %bb.0: # %Entry |
| ; NOBMI-NEXT: movl %ecx, %eax |
| ; NOBMI-NEXT: andl %edx, %esi |
| ; NOBMI-NEXT: notl %esi |
| ; NOBMI-NEXT: andl %edi, %esi |
| ; NOBMI-NEXT: notl %eax |
| ; NOBMI-NEXT: orl %edx, %eax |
| ; NOBMI-NEXT: andl %esi, %eax |
| ; NOBMI-NEXT: # kill: def $ax killed $ax killed $eax |
| ; NOBMI-NEXT: retq |
| ; |
| ; BMI-LABEL: test_i16: |
| ; BMI: # %bb.0: # %Entry |
| ; BMI-NEXT: andl %edx, %esi |
| ; BMI-NEXT: andnl %edi, %esi, %eax |
| ; BMI-NEXT: notl %ecx |
| ; BMI-NEXT: orl %edx, %ecx |
| ; BMI-NEXT: andl %ecx, %eax |
| ; BMI-NEXT: # kill: def $ax killed $ax killed $eax |
| ; BMI-NEXT: retq |
| Entry: |
| %and1 = and i16 %y, %x |
| %xor1 = xor i16 %and1, -1 |
| %and2 = and i16 %xor1, %w |
| %.not = xor i16 %z, -1 |
| %or1 = or i16 %.not, %y |
| %and3 = and i16 %and2, %or1 |
| ret i16 %and3 |
| } |
| |
| define i8 @test_i8(i8 %w, i8 %x, i8 %y, i8 %z) { |
| ; CHECK-LABEL: test_i8: |
| ; CHECK: # %bb.0: # %Entry |
| ; CHECK-NEXT: movl %edx, %eax |
| ; CHECK-NEXT: andl %edx, %esi |
| ; CHECK-NEXT: notb %sil |
| ; CHECK-NEXT: andb %dil, %sil |
| ; CHECK-NEXT: notb %cl |
| ; CHECK-NEXT: orb %cl, %al |
| ; CHECK-NEXT: andb %sil, %al |
| ; CHECK-NEXT: # kill: def $al killed $al killed $eax |
| ; CHECK-NEXT: retq |
| Entry: |
| %and1 = and i8 %y, %x |
| %xor1 = xor i8 %and1, -1 |
| %and2 = and i8 %xor1, %w |
| %.not = xor i8 %z, -1 |
| %or1 = or i8 %.not, %y |
| %and3 = and i8 %and2, %or1 |
| ret i8 %and3 |
| } |
| |
| define <16 x i8> @test_v16i8(<16 x i8> %w, <16 x i8> %x, <16 x i8> %y, <16 x i8> %z) { |
| ; NOBMI-LABEL: test_v16i8: |
| ; NOBMI: # %bb.0: # %Entry |
| ; NOBMI-NEXT: andps %xmm2, %xmm1 |
| ; NOBMI-NEXT: andnps %xmm0, %xmm1 |
| ; NOBMI-NEXT: andnps %xmm3, %xmm2 |
| ; NOBMI-NEXT: andnps %xmm1, %xmm2 |
| ; NOBMI-NEXT: movaps %xmm2, %xmm0 |
| ; NOBMI-NEXT: retq |
| ; |
| ; BMI-LABEL: test_v16i8: |
| ; BMI: # %bb.0: # %Entry |
| ; BMI-NEXT: vandps %xmm1, %xmm2, %xmm1 |
| ; BMI-NEXT: vandnps %xmm0, %xmm1, %xmm0 |
| ; BMI-NEXT: vandnps %xmm3, %xmm2, %xmm1 |
| ; BMI-NEXT: vandnps %xmm0, %xmm1, %xmm0 |
| ; BMI-NEXT: retq |
| Entry: |
| %and1 = and <16 x i8> %y, %x |
| %xor1 = xor <16 x i8> %and1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
| %and2 = and <16 x i8> %xor1, %w |
| %.not = xor <16 x i8> %z, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
| %or1 = or <16 x i8> %.not, %y |
| %and3 = and <16 x i8> %and2, %or1 |
| ret <16 x i8> %and3 |
| } |
| |
| define <32 x i8> @test_v32i8(<32 x i8> %w, <32 x i8> %x, <32 x i8> %y, <32 x i8> %z) { |
| ; NOBMI-LABEL: test_v32i8: |
| ; NOBMI: # %bb.0: # %Entry |
| ; NOBMI-NEXT: andps %xmm4, %xmm2 |
| ; NOBMI-NEXT: andps %xmm5, %xmm3 |
| ; NOBMI-NEXT: andnps %xmm1, %xmm3 |
| ; NOBMI-NEXT: andnps %xmm0, %xmm2 |
| ; NOBMI-NEXT: andnps %xmm6, %xmm4 |
| ; NOBMI-NEXT: andnps %xmm2, %xmm4 |
| ; NOBMI-NEXT: andnps %xmm7, %xmm5 |
| ; NOBMI-NEXT: andnps %xmm3, %xmm5 |
| ; NOBMI-NEXT: movaps %xmm4, %xmm0 |
| ; NOBMI-NEXT: movaps %xmm5, %xmm1 |
| ; NOBMI-NEXT: retq |
| ; |
| ; BMI-LABEL: test_v32i8: |
| ; BMI: # %bb.0: # %Entry |
| ; BMI-NEXT: vandps %ymm1, %ymm2, %ymm1 |
| ; BMI-NEXT: vandnps %ymm0, %ymm1, %ymm0 |
| ; BMI-NEXT: vandnps %ymm3, %ymm2, %ymm1 |
| ; BMI-NEXT: vandnps %ymm0, %ymm1, %ymm0 |
| ; BMI-NEXT: retq |
| Entry: |
| %and1 = and <32 x i8> %y, %x |
| %xor1 = xor <32 x i8> %and1, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
| %and2 = and <32 x i8> %xor1, %w |
| %.not = xor <32 x i8> %z, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> |
| %or1 = or <32 x i8> %.not, %y |
| %and3 = and <32 x i8> %and2, %or1 |
| ret <32 x i8> %and3 |
| } |
| |
| ; PR112347 - don't fold if we'd be inverting a constant, as demorgan normalisation will invert it back again. |
| define void @PR112347(ptr %p0, ptr %p1, ptr %p2) { |
| ; CHECK-LABEL: PR112347: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: movl (%rdi), %eax |
| ; CHECK-NEXT: notl %eax |
| ; CHECK-NEXT: orl $-16777204, %eax # imm = 0xFF00000C |
| ; CHECK-NEXT: andl (%rsi), %eax |
| ; CHECK-NEXT: movl %eax, (%rdx) |
| ; CHECK-NEXT: retq |
| %load0 = load i32, ptr %p0, align 1 |
| %load1 = load i32, ptr %p1, align 4 |
| %not = xor i32 %load0, -1 |
| %top = or i32 %not, -16777204 |
| %mask = and i32 %load1, %top |
| store i32 %mask, ptr %p2, align 4 |
| ret void |
| } |
| |
| define void @PR113240(i64 %a) { |
| ; CHECK-LABEL: PR113240: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: movq %rdi, %rax |
| ; CHECK-NEXT: notq %rax |
| ; CHECK-NEXT: movabsq $8796093022206, %rcx # imm = 0x7FFFFFFFFFE |
| ; CHECK-NEXT: notq %rcx |
| ; CHECK-NEXT: orq %rax, %rcx |
| ; CHECK-NEXT: andq %rdi, %rcx |
| ; CHECK-NEXT: movq %rcx, 0 |
| ; CHECK-NEXT: retq |
| entry: |
| %and = and i64 %a, 8796093022206 |
| %bf.value = and i64 8796093022206, 0 |
| %not = xor i64 %and, -1 |
| %and4 = and i64 %a, %not |
| store i64 %and4, ptr null, align 8 |
| ret void |
| } |
| |