blob: 37d7ec60f553a526beea9a56da85c8a6294ad0af [file] [log] [blame]
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
name: slt_to_sle_s32
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0
; x slt c => x sle c - 1
;
; We should not have a MOV here. We can subtract 1 from the constant and
; change the condition code.
;
; log_2(4096) == 12, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: slt_to_sle_s32
; CHECK: liveins: $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
; CHECK: $wzr = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
; CHECK: $w0 = COPY [[ANDWri]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = G_CONSTANT i32 4097
%4:gpr(s32) = G_ICMP intpred(slt), %0(s32), %1
%5:gpr(s32) = G_CONSTANT i32 1
%3:gpr(s32) = G_AND %4, %5
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...
---
name: slt_to_sle_s64
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; x slt c => x sle c - 1
;
; We should not have a MOV here. We can subtract 1 from the constant and
; change the condition code.
;
; log_2(4096) == 12, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: slt_to_sle_s64
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: $xzr = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
; CHECK: $x0 = COPY [[ANDXri]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 4097
%4:gpr(s32) = G_ICMP intpred(slt), %0(s64), %1
%6:gpr(s64) = G_ANYEXT %4(s32)
%5:gpr(s64) = G_CONSTANT i64 1
%3:gpr(s64) = G_AND %6, %5
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
...
---
name: sge_to_sgt_s32
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0
; x sge c => x sgt c - 1
;
; We should not have a MOV here. We can subtract 1 from the constant and
; change the condition code.
;
; log_2(4096) == 12, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: sge_to_sgt_s32
; CHECK: liveins: $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
; CHECK: $wzr = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
; CHECK: $w0 = COPY [[ANDWri]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = G_CONSTANT i32 4097
%4:gpr(s32) = G_ICMP intpred(sge), %0(s32), %1
%5:gpr(s32) = G_CONSTANT i32 1
%3:gpr(s32) = G_AND %4, %5
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...
---
name: sge_to_sgt_s64
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; x sge c => x sgt c - 1
;
; We should not have a MOV here. We can subtract 1 from the constant and
; change the condition code.
;
; log_2(4096) == 12, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: sge_to_sgt_s64
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: $xzr = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
; CHECK: $x0 = COPY [[ANDXri]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 4097
%4:gpr(s32) = G_ICMP intpred(sge), %0(s64), %1
%6:gpr(s64) = G_ANYEXT %4(s32)
%5:gpr(s64) = G_CONSTANT i64 1
%3:gpr(s64) = G_AND %6, %5
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
...
---
name: ult_to_ule_s32
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0
; x ult c => x ule c - 1
;
; We should not have a MOV here. We can subtract 1 from the constant and
; change the condition code.
;
; log_2(4096) == 12, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: ult_to_ule_s32
; CHECK: liveins: $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
; CHECK: $wzr = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
; CHECK: $w0 = COPY [[ANDWri]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = G_CONSTANT i32 4097
%4:gpr(s32) = G_ICMP intpred(ult), %0(s32), %1
%5:gpr(s32) = G_CONSTANT i32 1
%3:gpr(s32) = G_AND %4, %5
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...
---
name: ult_to_ule_s64
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; x ult c => x ule c - 1
;
; We should not have a MOV here. We can subtract 1 from the constant and
; change the condition code.
;
; log_2(4096) == 12, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: ult_to_ule_s64
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: $xzr = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 8, implicit $nzcv
; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
; CHECK: $x0 = COPY [[ANDXri]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 4097
%4:gpr(s32) = G_ICMP intpred(ult), %0(s64), %1
%6:gpr(s64) = G_ANYEXT %4(s32)
%5:gpr(s64) = G_CONSTANT i64 1
%3:gpr(s64) = G_AND %6, %5
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
...
---
name: uge_to_ugt_s32
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0
; x uge c => x ugt c - 1
;
; We should not have a MOV here. We can subtract 1 from the constant and
; change the condition code.
;
; log_2(4096) == 12, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: uge_to_ugt_s32
; CHECK: liveins: $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
; CHECK: $wzr = SUBSWri [[COPY]], 1, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
; CHECK: $w0 = COPY [[ANDWri]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = G_CONSTANT i32 4097
%4:gpr(s32) = G_ICMP intpred(uge), %0(s32), %1
%5:gpr(s32) = G_CONSTANT i32 1
%3:gpr(s32) = G_AND %4, %5
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...
---
name: uge_to_ugt_s64
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; x uge c => x ugt c - 1
;
; We should not have a MOV here. We can subtract 1 from the constant and
; change the condition code.
;
; log_2(4096) == 12, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: uge_to_ugt_s64
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: $xzr = SUBSXri [[COPY]], 1, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 9, implicit $nzcv
; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
; CHECK: $x0 = COPY [[ANDXri]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 4097
%4:gpr(s32) = G_ICMP intpred(uge), %0(s64), %1
%6:gpr(s64) = G_ANYEXT %4(s32)
%5:gpr(s64) = G_CONSTANT i64 1
%3:gpr(s64) = G_AND %6, %5
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
...
---
name: sle_to_slt_s32
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0
; x sle c => x slt c + 1
;
; We should not have a MOV here. We can add 1 to the constant and change
; the condition code.
;
; log_2(8192) == 13, so we can represent this as a 12 bit value with a
; left shift.
;
; (We can't use 4095 here, because that's a legal arithmetic immediate.)
; CHECK-LABEL: name: sle_to_slt_s32
; CHECK: liveins: $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
; CHECK: $wzr = SUBSWri [[COPY]], 2, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
; CHECK: $w0 = COPY [[ANDWri]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = G_CONSTANT i32 8191
%4:gpr(s32) = G_ICMP intpred(sle), %0(s32), %1
%5:gpr(s32) = G_CONSTANT i32 1
%3:gpr(s32) = G_AND %4, %5
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...
---
name: sle_to_slt_s64
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; x sle c => x slt c + 1
;
; We should not have a MOV here. We can add 1 to the constant and change
; the condition code.
;
; log_2(8192) == 13, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: sle_to_slt_s64
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: $xzr = SUBSXri [[COPY]], 2, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
; CHECK: $x0 = COPY [[ANDXri]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8191
%4:gpr(s32) = G_ICMP intpred(sle), %0(s64), %1
%6:gpr(s64) = G_ANYEXT %4(s32)
%5:gpr(s64) = G_CONSTANT i64 1
%3:gpr(s64) = G_AND %6, %5
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
...
---
name: sgt_to_sge_s32
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0
; x sgt c => s sge c + 1
;
; We should not have a MOV here. We can add 1 to the constant and change
; the condition code.
;
; log_2(8192) == 13, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: sgt_to_sge_s32
; CHECK: liveins: $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
; CHECK: $wzr = SUBSWri [[COPY]], 2, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
; CHECK: $w0 = COPY [[ANDWri]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = G_CONSTANT i32 8191
%4:gpr(s32) = G_ICMP intpred(sgt), %0(s32), %1
%5:gpr(s32) = G_CONSTANT i32 1
%3:gpr(s32) = G_AND %4, %5
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...
---
name: sgt_to_sge_s64
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; x sgt c => s sge c + 1
;
; We should not have a MOV here. We can add 1 to the constant and change
; the condition code.
;
; log_2(8192) == 13, so we can represent this as a 12 bit value with a
; left shift.
; CHECK-LABEL: name: sgt_to_sge_s64
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: $xzr = SUBSXri [[COPY]], 2, 12, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
; CHECK: $x0 = COPY [[ANDXri]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 8191
%4:gpr(s32) = G_ICMP intpred(sgt), %0(s64), %1
%6:gpr(s64) = G_ANYEXT %4(s32)
%5:gpr(s64) = G_CONSTANT i64 1
%3:gpr(s64) = G_AND %6, %5
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
...
---
name: no_opt_int32_min
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0
; This one should contain a MOV.
;
; If we subtract 1 from the constant, it will wrap around, and so it's not
; true that
;
; x slt c => x sle c - 1
; x sge c => x sgt c - 1
; CHECK-LABEL: name: no_opt_int32_min
; CHECK: liveins: $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm -2147483648
; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
; CHECK: $w0 = COPY [[ANDWri]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = G_CONSTANT i32 -2147483648
%4:gpr(s32) = G_ICMP intpred(slt), %0(s32), %1
%5:gpr(s32) = G_CONSTANT i32 1
%3:gpr(s32) = G_AND %4, %5
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...
---
name: no_opt_int64_min
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; This one should contain a MOV.
;
; If we subtract 1 from the constant, it will wrap around, and so it's not
; true that
;
; x slt c => x sle c - 1
; x sge c => x sgt c - 1
; CHECK-LABEL: name: no_opt_int64_min
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm -9223372036854775808
; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
; CHECK: $x0 = COPY [[ANDXri]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 -9223372036854775808
%4:gpr(s32) = G_ICMP intpred(slt), %0(s64), %1
%6:gpr(s64) = G_ANYEXT %4(s32)
%5:gpr(s64) = G_CONSTANT i64 1
%3:gpr(s64) = G_AND %6, %5
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
...
---
name: no_opt_int32_max
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $w0
; This one should contain a MOV.
;
; If we add 1 to the constant, it will wrap around, and so it's not true
; that
;
; x slt c => x sle c - 1
; x sge c => x sgt c - 1
; CHECK-LABEL: name: no_opt_int32_max
; CHECK: liveins: $w0
; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 2147483647
; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY]], [[MOVi32imm]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
; CHECK: [[ANDWri:%[0-9]+]]:gpr32sp = ANDWri [[CSINCWr]], 0
; CHECK: $w0 = COPY [[ANDWri]]
; CHECK: RET_ReallyLR implicit $w0
%0:gpr(s32) = COPY $w0
%1:gpr(s32) = G_CONSTANT i32 2147483647
%4:gpr(s32) = G_ICMP intpred(sle), %0(s32), %1
%5:gpr(s32) = G_CONSTANT i32 1
%3:gpr(s32) = G_AND %4, %5
$w0 = COPY %3(s32)
RET_ReallyLR implicit $w0
...
---
name: no_opt_int64_max
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; This one should contain a MOV.
;
; If we add 1 to the constant, it will wrap around, and so it's not true
; that
;
; x slt c => x sle c - 1
; x sge c => x sgt c - 1
; CHECK-LABEL: name: no_opt_int64_max
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 9223372036854775807
; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
; CHECK: $x0 = COPY [[ANDXri]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 9223372036854775807
%4:gpr(s32) = G_ICMP intpred(sle), %0(s64), %1
%6:gpr(s64) = G_ANYEXT %4(s32)
%5:gpr(s64) = G_CONSTANT i64 1
%3:gpr(s64) = G_AND %6, %5
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
...
---
name: no_opt_zero
alignment: 4
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0
; This one should contain a MOV.
;
; This is an unsigned comparison, so when the constant is 0, the following
; does not hold:
;
; x slt c => x sle c - 1
; x sge c => x sgt c - 1
; CHECK-LABEL: name: no_opt_zero
; CHECK: liveins: $x0
; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
; CHECK: $xzr = SUBSXri [[COPY]], 0, 0, implicit-def $nzcv
; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 2, implicit $nzcv
; CHECK: [[DEF:%[0-9]+]]:gpr64all = IMPLICIT_DEF
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gpr64 = INSERT_SUBREG [[DEF]], [[CSINCWr]], %subreg.sub_32
; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[INSERT_SUBREG]], 4096
; CHECK: $x0 = COPY [[ANDXri]]
; CHECK: RET_ReallyLR implicit $x0
%0:gpr(s64) = COPY $x0
%1:gpr(s64) = G_CONSTANT i64 0
%4:gpr(s32) = G_ICMP intpred(ult), %0(s64), %1
%6:gpr(s64) = G_ANYEXT %4(s32)
%5:gpr(s64) = G_CONSTANT i64 1
%3:gpr(s64) = G_AND %6, %5
$x0 = COPY %3(s64)
RET_ReallyLR implicit $x0
...
---
name: more_than_one_use_select
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0, $x1, $x2
; Both of these selects use the same compare.
;
; They should both be optimized in the same way, so the SUBS produced for
; each CSEL should be the same.
; CHECK-LABEL: name: more_than_one_use_select
; CHECK: liveins: $x0, $x1, $x2
; CHECK: %a:gpr64common = COPY $x0
; CHECK: %b:gpr64 = COPY $x1
; CHECK: %c:gpr64 = COPY $x2
; CHECK: $xzr = SUBSXri %a, 0, 0, implicit-def $nzcv
; CHECK: %select1:gpr64 = CSELXr %a, %b, 11, implicit $nzcv
; CHECK: $xzr = SUBSXri %a, 0, 0, implicit-def $nzcv
; CHECK: %select2:gpr64 = CSELXr %b, %c, 11, implicit $nzcv
; CHECK: %add:gpr64 = ADDXrr %select1, %select2
; CHECK: $x0 = COPY %add
; CHECK: RET_ReallyLR implicit $x0
%a:gpr(s64) = COPY $x0
%b:gpr(s64) = COPY $x1
%c:gpr(s64) = COPY $x2
%cst:gpr(s64) = G_CONSTANT i64 -1
%cmp:gpr(s32) = G_ICMP intpred(sle), %a(s64), %cst
%trunc_cmp:gpr(s1) = G_TRUNC %cmp(s32)
%select1:gpr(s64) = G_SELECT %trunc_cmp(s1), %a, %b
%select2:gpr(s64) = G_SELECT %trunc_cmp(s1), %b, %c
%add:gpr(s64) = G_ADD %select1, %select2
$x0 = COPY %add(s64)
RET_ReallyLR implicit $x0
...
---
name: more_than_one_use_select_no_opt
legalized: true
regBankSelected: true
tracksRegLiveness: true
body: |
bb.0:
liveins: $x0, $x1, $x2
; When we don't end up doing the optimization, we should not change the
; predicate.
;
; In this case, the CSELXrs should both have predicate code 13.
; CHECK-LABEL: name: more_than_one_use_select_no_opt
; CHECK: liveins: $x0, $x1, $x2
; CHECK: %a:gpr64 = COPY $x0
; CHECK: %b:gpr64 = COPY $x1
; CHECK: %c:gpr64 = COPY $x2
; CHECK: %cst:gpr64 = MOVi64imm 922337203685477580
; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %a, %cst, implicit-def $nzcv
; CHECK: %select1:gpr64 = CSELXr %a, %b, 13, implicit $nzcv
; CHECK: [[SUBSXrr1:%[0-9]+]]:gpr64 = SUBSXrr %a, %cst, implicit-def $nzcv
; CHECK: %select2:gpr64 = CSELXr %b, %c, 13, implicit $nzcv
; CHECK: %add:gpr64 = ADDXrr %select1, %select2
; CHECK: $x0 = COPY %add
; CHECK: RET_ReallyLR implicit $x0
%a:gpr(s64) = COPY $x0
%b:gpr(s64) = COPY $x1
%c:gpr(s64) = COPY $x2
%cst:gpr(s64) = G_CONSTANT i64 922337203685477580
%cmp:gpr(s32) = G_ICMP intpred(sle), %a(s64), %cst
%trunc_cmp:gpr(s1) = G_TRUNC %cmp(s32)
%select1:gpr(s64) = G_SELECT %trunc_cmp(s1), %a, %b
%select2:gpr(s64) = G_SELECT %trunc_cmp(s1), %b, %c
%add:gpr(s64) = G_ADD %select1, %select2
$x0 = COPY %add(s64)
RET_ReallyLR implicit $x0
...