blob: 0f7e0c76f8e2fb9652455ebb1742d64b00fa121d [file] [log] [blame]
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-linux-gnu \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC_64LE
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64-ibm-aix \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC_64
; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc-ibm-aix \
; RUN: -ppc-asm-full-reg-names --ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=POWERPC_32
define i32 @test_Greater_than(ptr %colauths) {
; This testcase is for the special case of zero-vector comparisons.
; Currently the generated code does a comparison (vcmpequh) and then a negation (xxlnor).
; This pattern is expected to be optimized in a future patch.
; POWERPC_64LE-LABEL: test_Greater_than:
; POWERPC_64LE: # %bb.0: # %entry
; POWERPC_64LE-NEXT: lfd f0, 0(r3)
; POWERPC_64LE-NEXT: xxlxor v3, v3, v3
; POWERPC_64LE-NEXT: li r4, 0
; POWERPC_64LE-NEXT: li r3, 4
; POWERPC_64LE-NEXT: xxswapd v2, f0
; POWERPC_64LE-NEXT: vcmpequh v2, v2, v3
; POWERPC_64LE-NEXT: xxlnor v2, v2, v2
; POWERPC_64LE-NEXT: vmrglh v3, v2, v2
; POWERPC_64LE-NEXT: vextuwrx r4, r4, v2
; POWERPC_64LE-NEXT: vextuwrx r3, r3, v3
; POWERPC_64LE-NEXT: clrlwi r4, r4, 31
; POWERPC_64LE-NEXT: rlwimi r4, r3, 1, 30, 30
; POWERPC_64LE-NEXT: mfvsrwz r3, v3
; POWERPC_64LE-NEXT: rlwimi r4, r3, 2, 29, 29
; POWERPC_64LE-NEXT: li r3, 12
; POWERPC_64LE-NEXT: vextuwrx r3, r3, v3
; POWERPC_64LE-NEXT: rlwimi r4, r3, 3, 28, 28
; POWERPC_64LE-NEXT: stb r4, -1(r1)
; POWERPC_64LE-NEXT: lbz r3, -1(r1)
; POWERPC_64LE-NEXT: popcntd r3, r3
; POWERPC_64LE-NEXT: blr
;
; POWERPC_64-LABEL: test_Greater_than:
; POWERPC_64: # %bb.0: # %entry
; POWERPC_64-NEXT: lxsd v2, 0(r3)
; POWERPC_64-NEXT: xxlxor v3, v3, v3
; POWERPC_64-NEXT: li r4, 12
; POWERPC_64-NEXT: li r3, 8
; POWERPC_64-NEXT: vcmpequh v2, v2, v3
; POWERPC_64-NEXT: xxlnor v2, v2, v2
; POWERPC_64-NEXT: vmrghh v2, v2, v2
; POWERPC_64-NEXT: vextuwlx r4, r4, v2
; POWERPC_64-NEXT: vextuwlx r3, r3, v2
; POWERPC_64-NEXT: clrlwi r4, r4, 31
; POWERPC_64-NEXT: rlwimi r4, r3, 1, 30, 30
; POWERPC_64-NEXT: mfvsrwz r3, v2
; POWERPC_64-NEXT: rlwimi r4, r3, 2, 29, 29
; POWERPC_64-NEXT: li r3, 0
; POWERPC_64-NEXT: vextuwlx r3, r3, v2
; POWERPC_64-NEXT: rlwimi r4, r3, 3, 28, 28
; POWERPC_64-NEXT: stb r4, -1(r1)
; POWERPC_64-NEXT: lbz r3, -1(r1)
; POWERPC_64-NEXT: popcntd r3, r3
; POWERPC_64-NEXT: blr
;
; POWERPC_32-LABEL: test_Greater_than:
; POWERPC_32: # %bb.0: # %entry
; POWERPC_32-NEXT: li r4, 4
; POWERPC_32-NEXT: lxvwsx vs1, 0, r3
; POWERPC_32-NEXT: xxlxor v3, v3, v3
; POWERPC_32-NEXT: lxvwsx vs0, r3, r4
; POWERPC_32-NEXT: xxmrghw v2, vs1, vs0
; POWERPC_32-NEXT: vcmpequh v2, v2, v3
; POWERPC_32-NEXT: xxlnor v2, v2, v2
; POWERPC_32-NEXT: vmrghh v2, v2, v2
; POWERPC_32-NEXT: stxv v2, -32(r1)
; POWERPC_32-NEXT: lwz r3, -20(r1)
; POWERPC_32-NEXT: lwz r4, -24(r1)
; POWERPC_32-NEXT: clrlwi r3, r3, 31
; POWERPC_32-NEXT: rlwimi r3, r4, 1, 30, 30
; POWERPC_32-NEXT: lwz r4, -28(r1)
; POWERPC_32-NEXT: rlwimi r3, r4, 2, 29, 29
; POWERPC_32-NEXT: lwz r4, -32(r1)
; POWERPC_32-NEXT: rlwimi r3, r4, 3, 28, 28
; POWERPC_32-NEXT: popcntw r3, r3
; POWERPC_32-NEXT: blr
entry:
%0 = load <4 x i16>, ptr %colauths, align 2, !tbaa !5
%1 = icmp ne <4 x i16> %0, zeroinitializer
%2 = bitcast <4 x i1> %1 to i4
%3 = tail call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 %2)
%4 = zext nneg i4 %3 to i32
ret i32 %4
}
declare i4 @llvm.ctpop.i4(i4) #1
!5 = !{!6, !6, i64 0}
!6 = !{!"short", !7, i64 0}
!7 = !{!"omnipotent char", !8, i64 0}
!8 = !{!"Simple C/C++ TBAA"}