| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64 |
| |
| ;; Use cttz to test if we properly prove never-zero. There is a very |
| ;; simple transform from cttz -> cttz_zero_undef if its operand is |
| ;; known never zero. |
| declare i32 @llvm.cttz.i32(i32, i1) |
| declare i32 @llvm.uadd.sat.i32(i32, i32) |
| declare i32 @llvm.umax.i32(i32, i32) |
| declare i32 @llvm.umin.i32(i32, i32) |
| declare i32 @llvm.smin.i32(i32, i32) |
| declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) |
| declare i32 @llvm.smax.i32(i32, i32) |
| declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>) |
| declare i32 @llvm.bswap.i32(i32) |
| declare i32 @llvm.bitreverse.i32(i32) |
| declare i32 @llvm.ctpop.i32(i32) |
| declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) |
| declare i32 @llvm.abs.i32(i32, i1) |
| declare i32 @llvm.fshl.i32(i32, i32, i32) |
| declare i32 @llvm.fshr.i32(i32, i32, i32) |
| |
| define i32 @or_known_nonzero(i32 %x) { |
| ; X86-LABEL: or_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: orl $1, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: or_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: orl $1, %edi |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %z = or i32 %x, 1 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @or_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: or_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: or_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: orl %esi, %edi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %z = or i32 %x, %y |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @select_known_nonzero(i1 %c, i32 %x) { |
| ; X86-LABEL: select_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: orl $1, %eax |
| ; X86-NEXT: testb $1, {{[0-9]+}}(%esp) |
| ; X86-NEXT: movl $122, %ecx |
| ; X86-NEXT: cmovnel %eax, %ecx |
| ; X86-NEXT: rep bsfl %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: select_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: orl $1, %esi |
| ; X64-NEXT: testb $1, %dil |
| ; X64-NEXT: movl $122, %eax |
| ; X64-NEXT: cmovnel %esi, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %y = or i32 %x, 1 |
| %z = select i1 %c, i32 %y, i32 122 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @select_maybe_zero(i1 %c, i32 %x) { |
| ; X86-LABEL: select_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: orl $1, %eax |
| ; X86-NEXT: xorl %ecx, %ecx |
| ; X86-NEXT: testb $1, {{[0-9]+}}(%esp) |
| ; X86-NEXT: cmovnel %eax, %ecx |
| ; X86-NEXT: bsfl %ecx, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: select_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: orl $1, %esi |
| ; X64-NEXT: xorl %ecx, %ecx |
| ; X64-NEXT: testb $1, %dil |
| ; X64-NEXT: cmovnel %esi, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %y = or i32 %x, 1 |
| %z = select i1 %c, i32 %y, i32 0 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @shl_known_nonzero_1s_bit_set(i32 %x) { |
| ; X86-LABEL: shl_known_nonzero_1s_bit_set: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $123, %eax |
| ; X86-NEXT: shll %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: shl_known_nonzero_1s_bit_set: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: movl $123, %eax |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %z = shl i32 123, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @shl_known_nonzero_nsw(i32 %x, i32 %yy) { |
| ; X86-LABEL: shl_known_nonzero_nsw: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: shll %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: shl_known_nonzero_nsw: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: orl $256, %esi # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %esi |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %y = or i32 %yy, 256 |
| %z = shl nsw i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @shl_known_nonzero_nuw(i32 %x, i32 %yy) { |
| ; X86-LABEL: shl_known_nonzero_nuw: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: shll %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: shl_known_nonzero_nuw: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: orl $256, %esi # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %esi |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %y = or i32 %yy, 256 |
| %z = shl nuw i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @shl_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: shl_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: shll %cl, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: shl_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %esi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %z = shl nuw nsw i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @uaddsat_known_nonzero(i32 %x) { |
| ; X86-LABEL: uaddsat_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: incl %eax |
| ; X86-NEXT: movl $-1, %ecx |
| ; X86-NEXT: cmovnel %eax, %ecx |
| ; X86-NEXT: rep bsfl %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: uaddsat_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: incl %edi |
| ; X64-NEXT: movl $-1, %eax |
| ; X64-NEXT: cmovnel %edi, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 1) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @uaddsat_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: uaddsat_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: addl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl $-1, %ecx |
| ; X86-NEXT: cmovael %eax, %ecx |
| ; X86-NEXT: bsfl %ecx, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: uaddsat_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: addl %esi, %edi |
| ; X64-NEXT: movl $-1, %ecx |
| ; X64-NEXT: cmovael %edi, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @umax_known_nonzero(i32 %x, i32 %y) { |
| ; X86-LABEL: umax_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $4, %edx |
| ; X86-NEXT: shll %cl, %edx |
| ; X86-NEXT: cmpl %edx, %eax |
| ; X86-NEXT: cmoval %eax, %edx |
| ; X86-NEXT: rep bsfl %edx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: umax_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %ecx |
| ; X64-NEXT: movl $4, %eax |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %eax |
| ; X64-NEXT: cmpl %eax, %edi |
| ; X64-NEXT: cmoval %edi, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %yy = shl nuw i32 4, %y |
| %z = call i32 @llvm.umax.i32(i32 %x, i32 %yy) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @umax_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: umax_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: cmpl %eax, %ecx |
| ; X86-NEXT: cmoval %ecx, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: umax_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpl %esi, %edi |
| ; X64-NEXT: cmoval %edi, %esi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.umax.i32(i32 %x, i32 %y) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @umin_known_nonzero(i32 %xx, i32 %yy) { |
| ; X86-LABEL: umin_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $4, %edx |
| ; X86-NEXT: shll %cl, %edx |
| ; X86-NEXT: addl $4, %eax |
| ; X86-NEXT: cmpl %eax, %edx |
| ; X86-NEXT: cmovbl %edx, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: umin_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: movl $4, %eax |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %eax |
| ; X64-NEXT: addl $4, %esi |
| ; X64-NEXT: cmpl %esi, %eax |
| ; X64-NEXT: cmovbl %eax, %esi |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %x = shl nuw i32 4, %xx |
| %y = add nuw nsw i32 %yy, 4 |
| %z = call i32 @llvm.umin.i32(i32 %x, i32 %y) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @umin_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: umin_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl $54, %eax |
| ; X86-NEXT: movl $54, %ecx |
| ; X86-NEXT: cmovbl %eax, %ecx |
| ; X86-NEXT: bsfl %ecx, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: umin_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpl $54, %edi |
| ; X64-NEXT: movl $54, %ecx |
| ; X64-NEXT: cmovbl %edi, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.umin.i32(i32 %x, i32 54) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @smin_known_nonzero(i32 %xx, i32 %yy) { |
| ; X86-LABEL: smin_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $4, %edx |
| ; X86-NEXT: shll %cl, %edx |
| ; X86-NEXT: addl $4, %eax |
| ; X86-NEXT: cmpl %eax, %edx |
| ; X86-NEXT: cmovll %edx, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: smin_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: movl $4, %eax |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %eax |
| ; X64-NEXT: addl $4, %esi |
| ; X64-NEXT: cmpl %esi, %eax |
| ; X64-NEXT: cmovll %eax, %esi |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %x = shl nuw i32 4, %xx |
| %y = add nuw nsw i32 %yy, 4 |
| %z = call i32 @llvm.smin.i32(i32 %x, i32 %y) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @smin_known_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: smin_known_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl $-54, %eax |
| ; X86-NEXT: movl $-54, %ecx |
| ; X86-NEXT: cmovll %eax, %ecx |
| ; X86-NEXT: rep bsfl %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: smin_known_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpl $-54, %edi |
| ; X64-NEXT: movl $-54, %eax |
| ; X64-NEXT: cmovll %edi, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.smin.i32(i32 %x, i32 -54) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define <4 x i32> @smin_known_zero_vec(<4 x i32> %x, <4 x i32> %y) { |
| ; X86-LABEL: smin_known_zero_vec: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967242,4294967273,4294967284,4294967295] |
| ; X86-NEXT: movdqa %xmm1, %xmm2 |
| ; X86-NEXT: pcmpgtd %xmm0, %xmm2 |
| ; X86-NEXT: pand %xmm2, %xmm0 |
| ; X86-NEXT: pandn %xmm1, %xmm2 |
| ; X86-NEXT: por %xmm2, %xmm0 |
| ; X86-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; X86-NEXT: paddd %xmm0, %xmm1 |
| ; X86-NEXT: pand %xmm1, %xmm0 |
| ; X86-NEXT: pxor %xmm1, %xmm1 |
| ; X86-NEXT: pcmpeqd %xmm1, %xmm0 |
| ; X86-NEXT: psrld $31, %xmm0 |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: smin_known_zero_vec: |
| ; X64: # %bb.0: |
| ; X64-NEXT: vpminsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 |
| ; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1 |
| ; X64-NEXT: vpand %xmm1, %xmm0, %xmm0 |
| ; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 |
| ; X64-NEXT: vpsrld $31, %xmm0, %xmm0 |
| ; X64-NEXT: retq |
| %z = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %x, <4 x i32> <i32 -54, i32 -23, i32 -12, i32 -1>) |
| %r = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %z) |
| %3 = icmp eq <4 x i32> %r, <i32 1, i32 1, i32 1, i32 1> |
| %ret = zext <4 x i1> %3 to <4 x i32> |
| ret <4 x i32> %ret |
| } |
| |
| define i32 @smin_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: smin_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl $54, %eax |
| ; X86-NEXT: movl $54, %ecx |
| ; X86-NEXT: cmovll %eax, %ecx |
| ; X86-NEXT: bsfl %ecx, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: smin_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpl $54, %edi |
| ; X64-NEXT: movl $54, %ecx |
| ; X64-NEXT: cmovll %edi, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.smin.i32(i32 %x, i32 54) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @smax_known_nonzero(i32 %xx, i32 %yy) { |
| ; X86-LABEL: smax_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $4, %edx |
| ; X86-NEXT: shll %cl, %edx |
| ; X86-NEXT: addl $4, %eax |
| ; X86-NEXT: cmpl %eax, %edx |
| ; X86-NEXT: cmovgl %edx, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: smax_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: movl $4, %eax |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %eax |
| ; X64-NEXT: addl $4, %esi |
| ; X64-NEXT: cmpl %esi, %eax |
| ; X64-NEXT: cmovgl %eax, %esi |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %x = shl nuw i32 4, %xx |
| %y = add nuw nsw i32 %yy, 4 |
| %z = call i32 @llvm.smax.i32(i32 %x, i32 %y) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @smax_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: smax_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cmpl $55, %eax |
| ; X86-NEXT: movl $54, %ecx |
| ; X86-NEXT: cmovgel %eax, %ecx |
| ; X86-NEXT: rep bsfl %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: smax_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: cmpl $55, %edi |
| ; X64-NEXT: movl $54, %eax |
| ; X64-NEXT: cmovgel %edi, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.smax.i32(i32 %x, i32 54) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define <4 x i32> @smax_known_zero_vec(<4 x i32> %x, <4 x i32> %y) { |
| ; X86-LABEL: smax_known_zero_vec: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movdqa {{.*#+}} xmm1 = [54,23,12,1] |
| ; X86-NEXT: movdqa %xmm0, %xmm2 |
| ; X86-NEXT: pcmpgtd %xmm1, %xmm2 |
| ; X86-NEXT: pand %xmm2, %xmm0 |
| ; X86-NEXT: pandn %xmm1, %xmm2 |
| ; X86-NEXT: por %xmm2, %xmm0 |
| ; X86-NEXT: pcmpeqd %xmm1, %xmm1 |
| ; X86-NEXT: paddd %xmm0, %xmm1 |
| ; X86-NEXT: pand %xmm1, %xmm0 |
| ; X86-NEXT: pxor %xmm1, %xmm1 |
| ; X86-NEXT: pcmpeqd %xmm1, %xmm0 |
| ; X86-NEXT: psrld $31, %xmm0 |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: smax_known_zero_vec: |
| ; X64: # %bb.0: |
| ; X64-NEXT: vpmaxsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; X64-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 |
| ; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1 |
| ; X64-NEXT: vpand %xmm1, %xmm0, %xmm0 |
| ; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1 |
| ; X64-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 |
| ; X64-NEXT: vpsrld $31, %xmm0, %xmm0 |
| ; X64-NEXT: retq |
| %z = call <4 x i32> @llvm.smax.v4i32(<4 x i32> %x, <4 x i32> <i32 54, i32 23, i32 12, i32 1>) |
| %r = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %z) |
| %3 = icmp eq <4 x i32> %r, <i32 1, i32 1, i32 1, i32 1> |
| %ret = zext <4 x i1> %3 to <4 x i32> |
| ret <4 x i32> %ret |
| } |
| |
| define i32 @smax_known_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: smax_known_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: testl %eax, %eax |
| ; X86-NEXT: movl $-1, %ecx |
| ; X86-NEXT: cmovnsl %eax, %ecx |
| ; X86-NEXT: bsfl %ecx, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: smax_known_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: testl %edi, %edi |
| ; X64-NEXT: movl $-1, %ecx |
| ; X64-NEXT: cmovnsl %edi, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.smax.i32(i32 %x, i32 -1) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @rotr_known_nonzero(i32 %xx, i32 %y) { |
| ; X86-LABEL: rotr_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: rorl %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: rotr_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %ecx |
| ; X64-NEXT: orl $256, %edi # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: rorl %cl, %edi |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %x = or i32 %xx, 256 |
| %shr = lshr i32 %x, %y |
| %sub = sub i32 32, %y |
| %shl = shl i32 %x, %sub |
| %z = or i32 %shl, %shr |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @rotr_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: rotr_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: rorl %cl, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: rotr_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %ecx |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: rorl %cl, %edi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %shr = lshr i32 %x, %y |
| %sub = sub i32 32, %y |
| %shl = shl i32 %x, %sub |
| %z = or i32 %shl, %shr |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @rotr_with_fshr_known_nonzero(i32 %xx, i32 %y) { |
| ; X86-LABEL: rotr_with_fshr_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: rorl %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: rotr_with_fshr_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %ecx |
| ; X64-NEXT: orl $256, %edi # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: rorl %cl, %edi |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %x = or i32 %xx, 256 |
| %z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @rotr_with_fshr_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: rotr_with_fshr_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: rorl %cl, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: rotr_with_fshr_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %ecx |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: rorl %cl, %edi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 %y) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @rotl_known_nonzero(i32 %xx, i32 %y) { |
| ; X86-LABEL: rotl_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: roll %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: rotl_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %ecx |
| ; X64-NEXT: orl $256, %edi # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: roll %cl, %edi |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %x = or i32 %xx, 256 |
| %shl = shl i32 %x, %y |
| %sub = sub i32 32, %y |
| %shr = lshr i32 %x, %sub |
| %z = or i32 %shr, %shl |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @rotl_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: rotl_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: roll %cl, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: rotl_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %ecx |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: roll %cl, %edi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %shl = shl i32 %x, %y |
| %sub = sub i32 32, %y |
| %shr = lshr i32 %x, %sub |
| %z = or i32 %shr, %shl |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @rotl_with_fshl_known_nonzero(i32 %xx, i32 %y) { |
| ; X86-LABEL: rotl_with_fshl_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: roll %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: rotl_with_fshl_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %ecx |
| ; X64-NEXT: orl $256, %edi # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: roll %cl, %edi |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %x = or i32 %xx, 256 |
| %z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @rotl_with_fshl_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: rotl_with_fshl_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: roll %cl, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: rotl_with_fshl_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %esi, %ecx |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: roll %cl, %edi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %z = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %y) |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sra_known_nonzero_sign_bit_set(i32 %x) { |
| ; X86-LABEL: sra_known_nonzero_sign_bit_set: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B |
| ; X86-NEXT: sarl %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sra_known_nonzero_sign_bit_set: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: sarl %cl, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %z = ashr i32 2147606891, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sra_known_nonzero_exact(i32 %x, i32 %yy) { |
| ; X86-LABEL: sra_known_nonzero_exact: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: sarl %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sra_known_nonzero_exact: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: orl $256, %esi # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: sarl %cl, %esi |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %y = or i32 %yy, 256 |
| %z = ashr exact i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sra_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: sra_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: sarl %cl, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sra_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: sarl %cl, %esi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %z = ashr exact i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @srl_known_nonzero_sign_bit_set(i32 %x) { |
| ; X86-LABEL: srl_known_nonzero_sign_bit_set: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B |
| ; X86-NEXT: shrl %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: srl_known_nonzero_sign_bit_set: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: movl $-2147360405, %eax # imm = 0x8001E16B |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shrl %cl, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %z = lshr i32 2147606891, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @srl_known_nonzero_exact(i32 %x, i32 %yy) { |
| ; X86-LABEL: srl_known_nonzero_exact: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: shrl %cl, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: srl_known_nonzero_exact: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: orl $256, %esi # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shrl %cl, %esi |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %y = or i32 %yy, 256 |
| %z = lshr exact i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @srl_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: srl_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: shrl %cl, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: srl_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shrl %cl, %esi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %z = lshr exact i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @udiv_known_nonzero(i32 %xx, i32 %y) { |
| ; X86-LABEL: udiv_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: orl $64, %eax |
| ; X86-NEXT: xorl %edx, %edx |
| ; X86-NEXT: divl {{[0-9]+}}(%esp) |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: udiv_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %eax |
| ; X64-NEXT: orl $64, %eax |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divl %esi |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %x = or i32 %xx, 64 |
| %z = udiv exact i32 %x, %y |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @udiv_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: udiv_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: xorl %edx, %edx |
| ; X86-NEXT: divl {{[0-9]+}}(%esp) |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: udiv_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %eax |
| ; X64-NEXT: xorl %edx, %edx |
| ; X64-NEXT: divl %esi |
| ; X64-NEXT: movl $32, %ecx |
| ; X64-NEXT: rep bsfl %eax, %ecx |
| ; X64-NEXT: movl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = udiv exact i32 %x, %y |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sdiv_known_nonzero(i32 %xx, i32 %y) { |
| ; X86-LABEL: sdiv_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: orl $64, %eax |
| ; X86-NEXT: cltd |
| ; X86-NEXT: idivl {{[0-9]+}}(%esp) |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sdiv_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %eax |
| ; X64-NEXT: orl $64, %eax |
| ; X64-NEXT: cltd |
| ; X64-NEXT: idivl %esi |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %x = or i32 %xx, 64 |
| %z = sdiv exact i32 %x, %y |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sdiv_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: sdiv_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: cltd |
| ; X86-NEXT: idivl {{[0-9]+}}(%esp) |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sdiv_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %eax |
| ; X64-NEXT: cltd |
| ; X64-NEXT: idivl %esi |
| ; X64-NEXT: movl $32, %ecx |
| ; X64-NEXT: rep bsfl %eax, %ecx |
| ; X64-NEXT: movl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = sdiv exact i32 %x, %y |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @add_known_nonzero(i32 %xx, i32 %y) { |
| ; X86-LABEL: add_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: orl $1, %eax |
| ; X86-NEXT: addl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: add_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: orl $1, %edi |
| ; X64-NEXT: addl %esi, %edi |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %x = or i32 %xx, 1 |
| %z = add nuw i32 %x, %y |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @add_maybe_zero(i32 %xx, i32 %y) { |
| ; X86-LABEL: add_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: orl $1, %eax |
| ; X86-NEXT: addl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: add_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: orl $1, %edi |
| ; X64-NEXT: addl %esi, %edi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %x = or i32 %xx, 1 |
| %z = add nsw i32 %x, %y |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sub_known_nonzero_neg_case(i32 %xx) { |
| ; X86-LABEL: sub_known_nonzero_neg_case: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: shll %cl, %eax |
| ; X86-NEXT: negl %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sub_known_nonzero_neg_case: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: movl $256, %eax # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %eax |
| ; X64-NEXT: negl %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %x = shl nuw nsw i32 256, %xx |
| %z = sub i32 0, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sub_known_nonzero_ne_case(i32 %xx, i32 %yy) { |
| ; X86-LABEL: sub_known_nonzero_ne_case: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl %eax, %ecx |
| ; X86-NEXT: orl $64, %ecx |
| ; X86-NEXT: andl $-65, %eax |
| ; X86-NEXT: subl %ecx, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sub_known_nonzero_ne_case: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %eax |
| ; X64-NEXT: orl $64, %eax |
| ; X64-NEXT: andl $-65, %edi |
| ; X64-NEXT: subl %eax, %edi |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %x = or i32 %xx, 64 |
| %y = and i32 %xx, -65 |
| %z = sub i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sub_maybe_zero(i32 %x) { |
| ; X86-LABEL: sub_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl %eax, %ecx |
| ; X86-NEXT: orl $64, %ecx |
| ; X86-NEXT: subl %eax, %ecx |
| ; X86-NEXT: bsfl %ecx, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sub_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: orl $64, %ecx |
| ; X64-NEXT: subl %edi, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %y = or i32 %x, 64 |
| %z = sub i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sub_maybe_zero2(i32 %x) { |
| ; X86-LABEL: sub_maybe_zero2: |
| ; X86: # %bb.0: |
| ; X86-NEXT: xorl %eax, %eax |
| ; X86-NEXT: subl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sub_maybe_zero2: |
| ; X64: # %bb.0: |
| ; X64-NEXT: negl %edi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %z = sub i32 0, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @mul_known_nonzero_nsw(i32 %x, i32 %yy) { |
| ; X86-LABEL: mul_known_nonzero_nsw: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: imull {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: mul_known_nonzero_nsw: |
| ; X64: # %bb.0: |
| ; X64-NEXT: orl $256, %esi # imm = 0x100 |
| ; X64-NEXT: imull %edi, %esi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %y = or i32 %yy, 256 |
| %z = mul nsw i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @mul_known_nonzero_nuw(i32 %x, i32 %yy) { |
| ; X86-LABEL: mul_known_nonzero_nuw: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: orl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: imull {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: mul_known_nonzero_nuw: |
| ; X64: # %bb.0: |
| ; X64-NEXT: orl $256, %esi # imm = 0x100 |
| ; X64-NEXT: imull %edi, %esi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %esi, %eax |
| ; X64-NEXT: retq |
| %y = or i32 %yy, 256 |
| %z = mul nuw i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @mul_maybe_zero(i32 %x, i32 %y) { |
| ; X86-LABEL: mul_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: imull {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: mul_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: imull %esi, %edi |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %edi, %eax |
| ; X64-NEXT: retq |
| %z = mul nuw nsw i32 %y, %x |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @bitcast_known_nonzero(<2 x i16> %xx) { |
| ; X86-LABEL: bitcast_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] |
| ; X86-NEXT: pslld $23, %xmm0 |
| ; X86-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 |
| ; X86-NEXT: cvttps2dq %xmm0, %xmm0 |
| ; X86-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] |
| ; X86-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0 # [256,256,u,u,u,u,u,u] |
| ; X86-NEXT: movd %xmm0, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: bitcast_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero |
| ; X64-NEXT: vpslld $23, %xmm0, %xmm0 |
| ; X64-NEXT: vpaddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 |
| ; X64-NEXT: vcvttps2dq %xmm0, %xmm0 |
| ; X64-NEXT: vpackusdw %xmm0, %xmm0, %xmm0 |
| ; X64-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0 # [256,256,u,u,u,u,u,u] |
| ; X64-NEXT: vmovd %xmm0, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %x = shl nuw nsw <2 x i16> <i16 256, i16 256>, %xx |
| %z = bitcast <2 x i16> %x to i32 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @bitcast_maybe_zero(<2 x i16> %x) { |
| ; X86-LABEL: bitcast_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movd %xmm0, %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: bitcast_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: vmovd %xmm0, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = bitcast <2 x i16> %x to i32 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @bitcast_from_float(float %x) { |
| ; X86-LABEL: bitcast_from_float: |
| ; X86: # %bb.0: |
| ; X86-NEXT: bsfl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: bitcast_from_float: |
| ; X64: # %bb.0: |
| ; X64-NEXT: vmovd %xmm0, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = bitcast float %x to i32 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @zext_known_nonzero(i16 %xx) { |
| ; X86-LABEL: zext_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: shll %cl, %eax |
| ; X86-NEXT: movzwl %ax, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: zext_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: movl $256, %eax # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %eax |
| ; X64-NEXT: movzwl %ax, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %x = shl nuw nsw i16 256, %xx |
| %z = zext i16 %x to i32 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @zext_maybe_zero(i16 %x) { |
| ; X86-LABEL: zext_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: zext_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movzwl %di, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = zext i16 %x to i32 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sext_known_nonzero(i16 %xx) { |
| ; X86-LABEL: sext_known_nonzero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl $256, %eax # imm = 0x100 |
| ; X86-NEXT: shll %cl, %eax |
| ; X86-NEXT: movzwl %ax, %eax |
| ; X86-NEXT: rep bsfl %eax, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sext_known_nonzero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movl %edi, %ecx |
| ; X64-NEXT: movl $256, %eax # imm = 0x100 |
| ; X64-NEXT: # kill: def $cl killed $cl killed $ecx |
| ; X64-NEXT: shll %cl, %eax |
| ; X64-NEXT: movzwl %ax, %eax |
| ; X64-NEXT: rep bsfl %eax, %eax |
| ; X64-NEXT: retq |
| %x = shl nuw nsw i16 256, %xx |
| %z = sext i16 %x to i32 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |
| |
| define i32 @sext_maybe_zero(i16 %x) { |
| ; X86-LABEL: sext_maybe_zero: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: bsfl %eax, %ecx |
| ; X86-NEXT: movl $32, %eax |
| ; X86-NEXT: cmovnel %ecx, %eax |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: sext_maybe_zero: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movswl %di, %ecx |
| ; X64-NEXT: movl $32, %eax |
| ; X64-NEXT: rep bsfl %ecx, %eax |
| ; X64-NEXT: retq |
| %z = sext i16 %x to i32 |
| %r = call i32 @llvm.cttz.i32(i32 %z, i1 false) |
| ret i32 %r |
| } |