Merging r370404:
------------------------------------------------------------------------
r370404 | rksimon | 2019-08-29 22:22:08 +0200 (Thu, 29 Aug 2019) | 3 lines

[X86][SSE] combinePMULDQ - pmuldq(x, 0) -> zero vector (PR43159)

ISD::isBuildVectorAllZeros permits undef elements to be present, which means we can't return it as a zero vector. PMULDQ/PMULUDQ is an extending multiply so a multiply by zero of the lower 32-bits should result in a zero 64-bit element.
------------------------------------------------------------------------

llvm-svn: 370445
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6f8240e..eecf349 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -44104,7 +44104,8 @@
 
 // Simplify PMULDQ and PMULUDQ operations.
 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
-                             TargetLowering::DAGCombinerInfo &DCI) {
+                             TargetLowering::DAGCombinerInfo &DCI,
+                             const X86Subtarget &Subtarget) {
   SDValue LHS = N->getOperand(0);
   SDValue RHS = N->getOperand(1);
 
@@ -44114,8 +44115,9 @@
     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
 
   // Multiply by zero.
+  // Don't return RHS as it may contain UNDEFs.
   if (ISD::isBuildVectorAllZeros(RHS.getNode()))
-    return RHS;
+    return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
 
   // Aggressively peek through ops to get at the demanded low bits.
   APInt DemandedMask = APInt::getLowBitsSet(64, 32);
@@ -44323,7 +44325,7 @@
   case X86ISD::PCMPEQ:
   case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);
   case X86ISD::PMULDQ:
-  case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI);
+  case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
   }
 
   return SDValue();
diff --git a/llvm/test/CodeGen/X86/combine-pmuldq.ll b/llvm/test/CodeGen/X86/combine-pmuldq.ll
index 5b5ae38..82387c9 100644
--- a/llvm/test/CodeGen/X86/combine-pmuldq.ll
+++ b/llvm/test/CodeGen/X86/combine-pmuldq.ll
@@ -172,3 +172,118 @@
   %tmp35 = add <4 x i64> %tmp29, %tmp28
   ret void
 }
+
+define i32 @PR43159(<4 x i32>* %a0) {
+; SSE-LABEL: PR43159:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    movdqa (%rdi), %xmm0
+; SSE-NEXT:    movdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
+; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE-NEXT:    pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSE-NEXT:    pmuludq %xmm2, %xmm3
+; SSE-NEXT:    movdqa %xmm0, %xmm2
+; SSE-NEXT:    psrld $1, %xmm2
+; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
+; SSE-NEXT:    pmuludq %xmm1, %xmm2
+; SSE-NEXT:    pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; SSE-NEXT:    psubd %xmm1, %xmm0
+; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; SSE-NEXT:    pmuludq {{.*}}(%rip), %xmm0
+; SSE-NEXT:    pxor %xmm2, %xmm2
+; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
+; SSE-NEXT:    paddd %xmm1, %xmm2
+; SSE-NEXT:    movdqa %xmm2, %xmm0
+; SSE-NEXT:    psrld $7, %xmm0
+; SSE-NEXT:    psrld $6, %xmm2
+; SSE-NEXT:    movd %xmm2, %edi
+; SSE-NEXT:    pextrd $1, %xmm0, %esi
+; SSE-NEXT:    pextrd $2, %xmm2, %edx
+; SSE-NEXT:    pextrd $3, %xmm0, %ecx
+; SSE-NEXT:    jmp foo # TAILCALL
+;
+; AVX2-LABEL: PR43159:
+; AVX2:       # %bb.0: # %entry
+; AVX2-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX2-NEXT:    vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm4, %xmm2
+; AVX2-NEXT:    vpmuludq %xmm1, %xmm3, %xmm1
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX2-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX2-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; AVX2-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
+; AVX2-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX2-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
+; AVX2-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX2-NEXT:    vmovd %xmm0, %edi
+; AVX2-NEXT:    vpextrd $1, %xmm0, %esi
+; AVX2-NEXT:    vpextrd $2, %xmm0, %edx
+; AVX2-NEXT:    vpextrd $3, %xmm0, %ecx
+; AVX2-NEXT:    jmp foo # TAILCALL
+;
+; AVX512VL-LABEL: PR43159:
+; AVX512VL:       # %bb.0: # %entry
+; AVX512VL-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512VL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
+; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
+; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX512VL-NEXT:    vpmuludq %xmm2, %xmm4, %xmm2
+; AVX512VL-NEXT:    vpmuludq %xmm1, %xmm3, %xmm1
+; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX512VL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX512VL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX512VL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; AVX512VL-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512VL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
+; AVX512VL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512VL-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512VL-NEXT:    vmovd %xmm0, %edi
+; AVX512VL-NEXT:    vpextrd $1, %xmm0, %esi
+; AVX512VL-NEXT:    vpextrd $2, %xmm0, %edx
+; AVX512VL-NEXT:    vpextrd $3, %xmm0, %ecx
+; AVX512VL-NEXT:    jmp foo # TAILCALL
+;
+; AVX512DQVL-LABEL: PR43159:
+; AVX512DQVL:       # %bb.0: # %entry
+; AVX512DQVL-NEXT:    vmovdqa (%rdi), %xmm0
+; AVX512DQVL-NEXT:    vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX512DQVL-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
+; AVX512DQVL-NEXT:    vpmuludq %xmm2, %xmm4, %xmm2
+; AVX512DQVL-NEXT:    vpmuludq %xmm1, %xmm3, %xmm1
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX512DQVL-NEXT:    vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
+; AVX512DQVL-NEXT:    vpsubd %xmm1, %xmm0, %xmm0
+; AVX512DQVL-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
+; AVX512DQVL-NEXT:    vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
+; AVX512DQVL-NEXT:    vpmuludq %xmm2, %xmm0, %xmm0
+; AVX512DQVL-NEXT:    vpxor %xmm2, %xmm2, %xmm2
+; AVX512DQVL-NEXT:    vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
+; AVX512DQVL-NEXT:    vpaddd %xmm1, %xmm0, %xmm0
+; AVX512DQVL-NEXT:    vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; AVX512DQVL-NEXT:    vmovd %xmm0, %edi
+; AVX512DQVL-NEXT:    vpextrd $1, %xmm0, %esi
+; AVX512DQVL-NEXT:    vpextrd $2, %xmm0, %edx
+; AVX512DQVL-NEXT:    vpextrd $3, %xmm0, %ecx
+; AVX512DQVL-NEXT:    jmp foo # TAILCALL
+entry:
+  %0 = load <4 x i32>, <4 x i32>* %a0, align 16
+  %div = udiv <4 x i32> %0, <i32 167, i32 237, i32 254, i32 177>
+  %ext0 = extractelement <4 x i32> %div, i32 0
+  %ext1 = extractelement <4 x i32> %div, i32 1
+  %ext2 = extractelement <4 x i32> %div, i32 2
+  %ext3 = extractelement <4 x i32> %div, i32 3
+  %call = tail call i32 @foo(i32 %ext0, i32 %ext1, i32 %ext2, i32 %ext3)
+  ret i32 %call
+}
+declare dso_local i32 @foo(i32, i32, i32, i32)