| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX2 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512 |
| |
| ; Ensure canonicalizeShuffleWithBinOps doesn't merge binops with different types |
| |
| ; Don't merge PCMPGT nodes of different types |
| define <4 x i32> @dont_merge_pcmpgt(<16 x i8> %0, <4 x i32> %1) { |
| ; SSE-LABEL: dont_merge_pcmpgt: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: pxor %xmm2, %xmm2 |
| ; SSE-NEXT: pcmpgtb %xmm2, %xmm0 |
| ; SSE-NEXT: pcmpgtd %xmm2, %xmm1 |
| ; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7] |
| ; SSE-NEXT: retq |
| ; |
| ; AVX-LABEL: dont_merge_pcmpgt: |
| ; AVX: # %bb.0: |
| ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 |
| ; AVX-NEXT: vpcmpgtb %xmm2, %xmm0, %xmm0 |
| ; AVX-NEXT: vpcmpgtd %xmm2, %xmm1, %xmm1 |
| ; AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3] |
| ; AVX-NEXT: retq |
| %3 = icmp sgt <16 x i8> %0, zeroinitializer |
| %4 = sext <16 x i1> %3 to <16 x i8> |
| %5 = bitcast <16 x i8> %4 to <4 x i32> |
| %6 = icmp sgt <4 x i32> %1, zeroinitializer |
| %7 = sext <4 x i1> %6 to <4 x i32> |
| %8 = shufflevector <4 x i32> %5, <4 x i32> %7, <4 x i32> <i32 0, i32 1, i32 2, i32 7> |
| ret <4 x i32> %8 |
| } |
| |
| ; FIXME: OK to merge logic nodes of different types |
| define <4 x i32> @merge_and(<16 x i8> %0, <4 x i32> %1) { |
| ; SSE-LABEL: merge_and: |
| ; SSE: # %bb.0: |
| ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 |
| ; SSE-NEXT: andps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 |
| ; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3] |
| ; SSE-NEXT: retq |
| ; |
| ; AVX2-LABEL: merge_and: |
| ; AVX2: # %bb.0: |
| ; AVX2-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; AVX2-NEXT: vbroadcastss {{.*#+}} xmm2 = [1,1,1,1] |
| ; AVX2-NEXT: vandps %xmm2, %xmm1, %xmm1 |
| ; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3] |
| ; AVX2-NEXT: retq |
| ; |
| ; AVX512-LABEL: merge_and: |
| ; AVX512: # %bb.0: |
| ; AVX512-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm0, %xmm0 |
| ; AVX512-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm1, %xmm1 |
| ; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3] |
| ; AVX512-NEXT: retq |
| %3 = and <16 x i8> %0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> |
| %4 = bitcast <16 x i8> %3 to <4 x i32> |
| %5 = and <4 x i32> %1, <i32 1, i32 1, i32 1, i32 1> |
| %6 = shufflevector <4 x i32> %4, <4 x i32> %5, <4 x i32> <i32 0, i32 1, i32 2, i32 7> |
| ret <4 x i32> %6 |
| } |