| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=loongarch32 < %s | FileCheck %s -check-prefix=LA32 |
| ; RUN: llc -mtriple=loongarch64 < %s | FileCheck %s -check-prefix=LA64 |
| |
| ; Check the PreserveMost calling convention works. |
| |
| declare void @standard_cc_func() |
| declare preserve_mostcc void @preserve_mostcc_func() |
| |
| define preserve_mostcc void @preserve_mostcc1() nounwind { |
| ; LA32-LABEL: preserve_mostcc1: |
| ; LA32: # %bb.0: # %entry |
| ; LA32-NEXT: addi.w $sp, $sp, -64 |
| ; LA32-NEXT: st.w $ra, $sp, 60 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $a0, $sp, 56 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $a1, $sp, 52 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $a2, $sp, 48 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $a3, $sp, 44 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $a4, $sp, 40 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $a5, $sp, 36 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $a6, $sp, 32 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $a7, $sp, 28 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $t4, $sp, 24 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $t5, $sp, 20 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $t6, $sp, 16 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $t7, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: bl standard_cc_func |
| ; LA32-NEXT: ld.w $t7, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $t6, $sp, 16 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $t5, $sp, 20 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $t4, $sp, 24 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $a7, $sp, 28 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $a6, $sp, 32 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $a5, $sp, 36 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $a4, $sp, 40 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $a3, $sp, 44 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $a2, $sp, 48 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $a1, $sp, 52 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $a0, $sp, 56 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 60 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 64 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: preserve_mostcc1: |
| ; LA64: # %bb.0: # %entry |
| ; LA64-NEXT: addi.d $sp, $sp, -112 |
| ; LA64-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $a0, $sp, 96 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $a1, $sp, 88 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $a2, $sp, 80 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $a3, $sp, 72 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $a4, $sp, 64 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $a5, $sp, 56 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $a6, $sp, 48 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $a7, $sp, 40 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $t4, $sp, 32 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $t5, $sp, 24 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $t6, $sp, 16 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $t7, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: pcaddu18i $ra, %call36(standard_cc_func) |
| ; LA64-NEXT: jirl $ra, $ra, 0 |
| ; LA64-NEXT: ld.d $t7, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $t6, $sp, 16 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $t5, $sp, 24 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $t4, $sp, 32 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $a7, $sp, 40 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $a6, $sp, 48 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $a5, $sp, 56 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $a4, $sp, 64 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $a3, $sp, 72 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $a2, $sp, 80 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $a1, $sp, 88 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $a0, $sp, 96 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 112 |
| ; LA64-NEXT: ret |
| entry: |
| call void @standard_cc_func() |
| ret void |
| } |
| |
| define preserve_mostcc void @preserve_mostcc2() nounwind { |
| ; LA32-LABEL: preserve_mostcc2: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: bl preserve_mostcc_func |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: preserve_mostcc2: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -16 |
| ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: pcaddu18i $ra, %call36(preserve_mostcc_func) |
| ; LA64-NEXT: jirl $ra, $ra, 0 |
| ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 16 |
| ; LA64-NEXT: ret |
| call preserve_mostcc void @preserve_mostcc_func() |
| ret void |
| } |
| |
| define void @preserve_mostcc3() nounwind { |
| ; LA32-LABEL: preserve_mostcc3: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -16 |
| ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s0, $sp, 8 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s1, $sp, 4 # 4-byte Folded Spill |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $a0, $t0 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $a1, $t1 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $a2, $t2 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $a3, $t3 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $a4, $t8 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: bl preserve_mostcc_func |
| ; LA32-NEXT: move $t0, $a0 |
| ; LA32-NEXT: move $t1, $a1 |
| ; LA32-NEXT: move $t2, $a2 |
| ; LA32-NEXT: move $t3, $a3 |
| ; LA32-NEXT: move $t8, $a4 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: ld.w $s1, $sp, 4 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s0, $sp, 8 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 16 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: preserve_mostcc3: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -96 |
| ; LA64-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s0, $sp, 80 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s1, $sp, 72 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs0, $sp, 64 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs1, $sp, 56 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs2, $sp, 48 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs3, $sp, 40 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs4, $sp, 32 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs5, $sp, 24 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs6, $sp, 16 # 8-byte Folded Spill |
| ; LA64-NEXT: fst.d $fs7, $sp, 8 # 8-byte Folded Spill |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $a0, $t0 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $a1, $t1 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $a2, $t2 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $a3, $t3 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $a4, $t8 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: pcaddu18i $ra, %call36(preserve_mostcc_func) |
| ; LA64-NEXT: jirl $ra, $ra, 0 |
| ; LA64-NEXT: move $t0, $a0 |
| ; LA64-NEXT: move $t1, $a1 |
| ; LA64-NEXT: move $t2, $a2 |
| ; LA64-NEXT: move $t3, $a3 |
| ; LA64-NEXT: move $t8, $a4 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: fld.d $fs7, $sp, 8 # 8-byte Folded Reload |
| ; LA64-NEXT: fld.d $fs6, $sp, 16 # 8-byte Folded Reload |
| ; LA64-NEXT: fld.d $fs5, $sp, 24 # 8-byte Folded Reload |
| ; LA64-NEXT: fld.d $fs4, $sp, 32 # 8-byte Folded Reload |
| ; LA64-NEXT: fld.d $fs3, $sp, 40 # 8-byte Folded Reload |
| ; LA64-NEXT: fld.d $fs2, $sp, 48 # 8-byte Folded Reload |
| ; LA64-NEXT: fld.d $fs1, $sp, 56 # 8-byte Folded Reload |
| ; LA64-NEXT: fld.d $fs0, $sp, 64 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $s1, $sp, 72 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $s0, $sp, 80 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 96 |
| ; LA64-NEXT: ret |
| %1 = call i32 asm sideeffect "", "={r12}"() nounwind |
| %2 = call i32 asm sideeffect "", "={r13}"() nounwind |
| %3 = call i32 asm sideeffect "", "={r14}"() nounwind |
| %4 = call i32 asm sideeffect "", "={r15}"() nounwind |
| %5 = call i32 asm sideeffect "", "={r20}"() nounwind |
| %6 = call i32 asm sideeffect "", "={r23}"() nounwind |
| %7 = call i32 asm sideeffect "", "={r24}"() nounwind |
| call preserve_mostcc void @preserve_mostcc_func() |
| call void asm sideeffect "", "{r12},{r13},{r14},{r15},{r20},{r23},{r24}"(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) |
| ret void |
| } |
| |
| define void @preserve_mostcc4() nounwind { |
| ; LA32-LABEL: preserve_mostcc4: |
| ; LA32: # %bb.0: |
| ; LA32-NEXT: addi.w $sp, $sp, -32 |
| ; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s0, $sp, 20 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s1, $sp, 16 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s2, $sp, 12 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s3, $sp, 8 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s4, $sp, 4 # 4-byte Folded Spill |
| ; LA32-NEXT: st.w $s5, $sp, 0 # 4-byte Folded Spill |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $fp, $t0 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $s2, $t1 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $s3, $t2 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $s4, $t3 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: move $s5, $t8 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: bl standard_cc_func |
| ; LA32-NEXT: move $t0, $fp |
| ; LA32-NEXT: move $t1, $s2 |
| ; LA32-NEXT: move $t2, $s3 |
| ; LA32-NEXT: move $t3, $s4 |
| ; LA32-NEXT: move $t8, $s5 |
| ; LA32-NEXT: #APP |
| ; LA32-NEXT: #NO_APP |
| ; LA32-NEXT: ld.w $s5, $sp, 0 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s4, $sp, 4 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s3, $sp, 8 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s2, $sp, 12 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s1, $sp, 16 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $s0, $sp, 20 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload |
| ; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload |
| ; LA32-NEXT: addi.w $sp, $sp, 32 |
| ; LA32-NEXT: ret |
| ; |
| ; LA64-LABEL: preserve_mostcc4: |
| ; LA64: # %bb.0: |
| ; LA64-NEXT: addi.d $sp, $sp, -80 |
| ; LA64-NEXT: st.d $ra, $sp, 72 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $fp, $sp, 64 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s0, $sp, 56 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s1, $sp, 48 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s2, $sp, 40 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s3, $sp, 32 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s4, $sp, 24 # 8-byte Folded Spill |
| ; LA64-NEXT: st.d $s5, $sp, 16 # 8-byte Folded Spill |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $fp, $t0 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $s2, $t1 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $s3, $t2 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $s4, $t3 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: move $s5, $t8 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: pcaddu18i $ra, %call36(standard_cc_func) |
| ; LA64-NEXT: jirl $ra, $ra, 0 |
| ; LA64-NEXT: move $t0, $fp |
| ; LA64-NEXT: move $t1, $s2 |
| ; LA64-NEXT: move $t2, $s3 |
| ; LA64-NEXT: move $t3, $s4 |
| ; LA64-NEXT: move $t8, $s5 |
| ; LA64-NEXT: #APP |
| ; LA64-NEXT: #NO_APP |
| ; LA64-NEXT: ld.d $s5, $sp, 16 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $s4, $sp, 24 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $s3, $sp, 32 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $s2, $sp, 40 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $s1, $sp, 48 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $s0, $sp, 56 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $fp, $sp, 64 # 8-byte Folded Reload |
| ; LA64-NEXT: ld.d $ra, $sp, 72 # 8-byte Folded Reload |
| ; LA64-NEXT: addi.d $sp, $sp, 80 |
| ; LA64-NEXT: ret |
| %1 = call i32 asm sideeffect "", "={r12}"() nounwind |
| %2 = call i32 asm sideeffect "", "={r13}"() nounwind |
| %3 = call i32 asm sideeffect "", "={r14}"() nounwind |
| %4 = call i32 asm sideeffect "", "={r15}"() nounwind |
| %5 = call i32 asm sideeffect "", "={r20}"() nounwind |
| %6 = call i32 asm sideeffect "", "={r23}"() nounwind |
| %7 = call i32 asm sideeffect "", "={r24}"() nounwind |
| call void @standard_cc_func() |
| call void asm sideeffect "", "{r12},{r13},{r14},{r15},{r20},{r23},{r24}"(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) |
| ret void |
| } |