| ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3 |
| ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV32,ILP32 %s |
| ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32 %s |
| ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -target-abi ilp32f \ |
| ; RUN: -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32F %s |
| ; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -mattr=+d -target-abi ilp32d \ |
| ; RUN: -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV32,RV32D-ILP32D %s |
| ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV64,LP64 %s |
| ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -mattr=+d -target-abi lp64f \ |
| ; RUN: -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV64,LP64F %s |
| ; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -mattr=+d -target-abi lp64d \ |
| ; RUN: -verify-machineinstrs < %s \ |
| ; RUN: | FileCheck -check-prefixes=RV64,LP64D %s |
| |
| ; The same vararg calling convention is used for ilp32/ilp32f/ilp32d and for |
| ; lp64/lp64f/lp64d. Different CHECK lines are required due to slight |
| ; codegen differences due to the way the f64 load operations are lowered and |
| ; because the PseudoCALL specifies the calling convention. |
| ; The nounwind attribute is omitted for some of the tests, to check that CFI |
| ; directives are correctly generated. |
| |
| declare void @llvm.va_start(ptr) |
| declare void @llvm.va_end(ptr) |
| |
| declare void @notdead(ptr) |
| |
| ; Although frontends are recommended to not generate va_arg due to the lack of |
| ; support for aggregate types, we test simple cases here to ensure they are |
| ; lowered correctly |
| |
| define i32 @va1(ptr %fmt, ...) { |
| ; RV32-LABEL: name: va1 |
| ; RV32: bb.1 (%ir-block.0): |
| ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32-NEXT: {{ $}} |
| ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va) |
| ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[LOAD]], [[C1]](s32) |
| ; RV32-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va) |
| ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur) |
| ; RV32-NEXT: $x10 = COPY [[LOAD1]](s32) |
| ; RV32-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV64-LABEL: name: va1 |
| ; RV64: bb.1 (%ir-block.0): |
| ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV64-NEXT: {{ $}} |
| ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (p0) from %ir.va, align 4) |
| ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 |
| ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[LOAD]], [[C1]](s64) |
| ; RV64-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4) |
| ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur) |
| ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32) |
| ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; RV64-NEXT: PseudoRET implicit $x10 |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %argp.cur = load ptr, ptr %va, align 4 |
| %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4 |
| store ptr %argp.next, ptr %va, align 4 |
| %1 = load i32, ptr %argp.cur, align 4 |
| call void @llvm.va_end(ptr %va) |
| ret i32 %1 |
| } |
| |
| ; Ensure the adjustment when restoring the stack pointer using the frame |
| ; pointer is correct |
| define i32 @va1_va_arg_alloca(ptr %fmt, ...) nounwind { |
| ; ILP32-LABEL: name: va1_va_arg_alloca |
| ; ILP32: bb.1 (%ir-block.0): |
| ; ILP32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; ILP32-NEXT: {{ $}} |
| ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; ILP32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; ILP32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; ILP32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; ILP32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; ILP32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; ILP32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; ILP32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; ILP32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; ILP32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; ILP32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; ILP32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; ILP32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; ILP32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 |
| ; ILP32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; ILP32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; ILP32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; ILP32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C1]] |
| ; ILP32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 |
| ; ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C2]] |
| ; ILP32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16 |
| ; ILP32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]] |
| ; ILP32-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1 |
| ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0) |
| ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10 |
| ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: $x10 = COPY [[VAARG]](s32) |
| ; ILP32-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV32D-ILP32-LABEL: name: va1_va_arg_alloca |
| ; RV32D-ILP32: bb.1 (%ir-block.0): |
| ; RV32D-ILP32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32D-ILP32-NEXT: {{ $}} |
| ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV32D-ILP32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 |
| ; RV32D-ILP32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32D-ILP32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32D-ILP32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C1]] |
| ; RV32D-ILP32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 |
| ; RV32D-ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C2]] |
| ; RV32D-ILP32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16 |
| ; RV32D-ILP32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]] |
| ; RV32D-ILP32-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1 |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0) |
| ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10 |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: $x10 = COPY [[VAARG]](s32) |
| ; RV32D-ILP32-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV32D-ILP32F-LABEL: name: va1_va_arg_alloca |
| ; RV32D-ILP32F: bb.1 (%ir-block.0): |
| ; RV32D-ILP32F-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32D-ILP32F-NEXT: {{ $}} |
| ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV32D-ILP32F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 |
| ; RV32D-ILP32F-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32D-ILP32F-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32D-ILP32F-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32F-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C1]] |
| ; RV32D-ILP32F-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 |
| ; RV32D-ILP32F-NEXT: [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C2]] |
| ; RV32D-ILP32F-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16 |
| ; RV32D-ILP32F-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]] |
| ; RV32D-ILP32F-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1 |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0) |
| ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10 |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: $x10 = COPY [[VAARG]](s32) |
| ; RV32D-ILP32F-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV32D-ILP32D-LABEL: name: va1_va_arg_alloca |
| ; RV32D-ILP32D: bb.1 (%ir-block.0): |
| ; RV32D-ILP32D-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32D-ILP32D-NEXT: {{ $}} |
| ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV32D-ILP32D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 |
| ; RV32D-ILP32D-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32D-ILP32D-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32D-ILP32D-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32D-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[VAARG]], [[C1]] |
| ; RV32D-ILP32D-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 |
| ; RV32D-ILP32D-NEXT: [[ADD:%[0-9]+]]:_(s32) = nuw G_ADD [[MUL]], [[C2]] |
| ; RV32D-ILP32D-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -16 |
| ; RV32D-ILP32D-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C3]] |
| ; RV32D-ILP32D-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s32), 1 |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0) |
| ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10 |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: $x10 = COPY [[VAARG]](s32) |
| ; RV32D-ILP32D-NEXT: PseudoRET implicit $x10 |
| ; |
| ; LP64-LABEL: name: va1_va_arg_alloca |
| ; LP64: bb.1 (%ir-block.0): |
| ; LP64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; LP64-NEXT: {{ $}} |
| ; LP64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; LP64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; LP64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; LP64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; LP64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; LP64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; LP64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; LP64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; LP64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; LP64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; LP64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; LP64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; LP64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; LP64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; LP64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; LP64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; LP64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; LP64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; LP64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; LP64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; LP64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; LP64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; LP64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; LP64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 |
| ; LP64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; LP64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; LP64-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[VAARG]](s32) |
| ; LP64-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C1]] |
| ; LP64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 |
| ; LP64-NEXT: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C2]] |
| ; LP64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 |
| ; LP64-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C3]] |
| ; LP64-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1 |
| ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0) |
| ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10 |
| ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32) |
| ; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; LP64-NEXT: PseudoRET implicit $x10 |
| ; |
| ; LP64F-LABEL: name: va1_va_arg_alloca |
| ; LP64F: bb.1 (%ir-block.0): |
| ; LP64F-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; LP64F-NEXT: {{ $}} |
| ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; LP64F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; LP64F-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; LP64F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; LP64F-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; LP64F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; LP64F-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; LP64F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; LP64F-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; LP64F-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; LP64F-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; LP64F-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; LP64F-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; LP64F-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; LP64F-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; LP64F-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 |
| ; LP64F-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; LP64F-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; LP64F-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64F-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[VAARG]](s32) |
| ; LP64F-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C1]] |
| ; LP64F-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 |
| ; LP64F-NEXT: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C2]] |
| ; LP64F-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 |
| ; LP64F-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C3]] |
| ; LP64F-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1 |
| ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0) |
| ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10 |
| ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32) |
| ; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; LP64F-NEXT: PseudoRET implicit $x10 |
| ; |
| ; LP64D-LABEL: name: va1_va_arg_alloca |
| ; LP64D: bb.1 (%ir-block.0): |
| ; LP64D-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; LP64D-NEXT: {{ $}} |
| ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; LP64D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; LP64D-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; LP64D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; LP64D-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; LP64D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; LP64D-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; LP64D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; LP64D-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; LP64D-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; LP64D-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; LP64D-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; LP64D-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; LP64D-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; LP64D-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; LP64D-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 |
| ; LP64D-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; LP64D-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; LP64D-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64D-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[VAARG]](s32) |
| ; LP64D-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[ZEXT]], [[C1]] |
| ; LP64D-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 |
| ; LP64D-NEXT: [[ADD:%[0-9]+]]:_(s64) = nuw G_ADD [[MUL]], [[C2]] |
| ; LP64D-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 -16 |
| ; LP64D-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C3]] |
| ; LP64D-NEXT: [[DYN_STACKALLOC:%[0-9]+]]:_(p0) = G_DYN_STACKALLOC [[AND]](s64), 1 |
| ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: $x10 = COPY [[DYN_STACKALLOC]](p0) |
| ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10 |
| ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32) |
| ; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; LP64D-NEXT: PseudoRET implicit $x10 |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %1 = va_arg ptr %va, i32 |
| %2 = alloca i8, i32 %1 |
| call void @notdead(ptr %2) |
| call void @llvm.va_end(ptr %va) |
| ret i32 %1 |
| } |
| |
| |
| define i32 @va1_va_arg(ptr %fmt, ...) nounwind { |
| ; RV32-LABEL: name: va1_va_arg |
| ; RV32: bb.1 (%ir-block.0): |
| ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32-NEXT: {{ $}} |
| ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32-NEXT: $x10 = COPY [[VAARG]](s32) |
| ; RV32-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV64-LABEL: name: va1_va_arg |
| ; RV64: bb.1 (%ir-block.0): |
| ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV64-NEXT: {{ $}} |
| ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; RV64-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32) |
| ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; RV64-NEXT: PseudoRET implicit $x10 |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %1 = va_arg ptr %va, i32 |
| call void @llvm.va_end(ptr %va) |
| ret i32 %1 |
| } |
| |
| define void @va1_caller() nounwind { |
| ; ILP32-LABEL: name: va1_caller |
| ; ILP32: bb.1 (%ir-block.0): |
| ; ILP32-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; ILP32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; ILP32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64) |
| ; ILP32-NEXT: $x10 = COPY [[DEF]](p0) |
| ; ILP32-NEXT: $x12 = COPY [[UV]](s32) |
| ; ILP32-NEXT: $x13 = COPY [[UV1]](s32) |
| ; ILP32-NEXT: $x14 = COPY [[C1]](s32) |
| ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10 |
| ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; ILP32-NEXT: PseudoRET |
| ; |
| ; RV32D-ILP32-LABEL: name: va1_caller |
| ; RV32D-ILP32: bb.1 (%ir-block.0): |
| ; RV32D-ILP32-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; RV32D-ILP32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64) |
| ; RV32D-ILP32-NEXT: $x10 = COPY [[DEF]](p0) |
| ; RV32D-ILP32-NEXT: $x12 = COPY [[UV]](s32) |
| ; RV32D-ILP32-NEXT: $x13 = COPY [[UV1]](s32) |
| ; RV32D-ILP32-NEXT: $x14 = COPY [[C1]](s32) |
| ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10 |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32-NEXT: PseudoRET |
| ; |
| ; RV32D-ILP32F-LABEL: name: va1_caller |
| ; RV32D-ILP32F: bb.1 (%ir-block.0): |
| ; RV32D-ILP32F-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; RV32D-ILP32F-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64) |
| ; RV32D-ILP32F-NEXT: $x10 = COPY [[DEF]](p0) |
| ; RV32D-ILP32F-NEXT: $x12 = COPY [[UV]](s32) |
| ; RV32D-ILP32F-NEXT: $x13 = COPY [[UV1]](s32) |
| ; RV32D-ILP32F-NEXT: $x14 = COPY [[C1]](s32) |
| ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10 |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32F-NEXT: PseudoRET |
| ; |
| ; RV32D-ILP32D-LABEL: name: va1_caller |
| ; RV32D-ILP32D: bb.1 (%ir-block.0): |
| ; RV32D-ILP32D-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; RV32D-ILP32D-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64) |
| ; RV32D-ILP32D-NEXT: $x10 = COPY [[DEF]](p0) |
| ; RV32D-ILP32D-NEXT: $x12 = COPY [[UV]](s32) |
| ; RV32D-ILP32D-NEXT: $x13 = COPY [[UV1]](s32) |
| ; RV32D-ILP32D-NEXT: $x14 = COPY [[C1]](s32) |
| ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit $x14, implicit-def $x10 |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32D-NEXT: PseudoRET |
| ; |
| ; LP64-LABEL: name: va1_caller |
| ; LP64: bb.1 (%ir-block.0): |
| ; LP64-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; LP64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) |
| ; LP64-NEXT: $x10 = COPY [[DEF]](p0) |
| ; LP64-NEXT: $x11 = COPY [[C]](s64) |
| ; LP64-NEXT: $x12 = COPY [[ANYEXT]](s64) |
| ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10 |
| ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) |
| ; LP64-NEXT: PseudoRET |
| ; |
| ; LP64F-LABEL: name: va1_caller |
| ; LP64F: bb.1 (%ir-block.0): |
| ; LP64F-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) |
| ; LP64F-NEXT: $x10 = COPY [[DEF]](p0) |
| ; LP64F-NEXT: $x11 = COPY [[C]](s64) |
| ; LP64F-NEXT: $x12 = COPY [[ANYEXT]](s64) |
| ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10 |
| ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64F-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) |
| ; LP64F-NEXT: PseudoRET |
| ; |
| ; LP64D-LABEL: name: va1_caller |
| ; LP64D: bb.1 (%ir-block.0): |
| ; LP64D-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C1]](s32) |
| ; LP64D-NEXT: $x10 = COPY [[DEF]](p0) |
| ; LP64D-NEXT: $x11 = COPY [[C]](s64) |
| ; LP64D-NEXT: $x12 = COPY [[ANYEXT]](s64) |
| ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @va1, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10 |
| ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) |
| ; LP64D-NEXT: PseudoRET |
| %1 = call i32 (ptr, ...) @va1(ptr undef, double 1.0, i32 2) |
| ret void |
| } |
| |
| ; Ensure that 2x xlen size+alignment varargs are accessed via an "aligned" |
| ; register pair (where the first register is even-numbered). |
| |
| define i64 @va2(ptr %fmt, ...) nounwind { |
| ; RV32-LABEL: name: va2 |
| ; RV32: bb.1 (%ir-block.0): |
| ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32-NEXT: {{ $}} |
| ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 |
| ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8 |
| ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s32) from %ir.va) |
| ; RV32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C1]] |
| ; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]] |
| ; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32) |
| ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 |
| ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[INTTOPTR]], [[C3]](s32) |
| ; RV32-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va) |
| ; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32) |
| ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3) |
| ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD1]](s64) |
| ; RV32-NEXT: $x10 = COPY [[UV]](s32) |
| ; RV32-NEXT: $x11 = COPY [[UV1]](s32) |
| ; RV32-NEXT: PseudoRET implicit $x10, implicit $x11 |
| ; |
| ; RV64-LABEL: name: va2 |
| ; RV64: bb.1 (%ir-block.0): |
| ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV64-NEXT: {{ $}} |
| ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 |
| ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8 |
| ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s32) from %ir.va) |
| ; RV64-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C1]] |
| ; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]] |
| ; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32) |
| ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[INTTOPTR]], [[C3]](s64) |
| ; RV64-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4) |
| ; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32) |
| ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3) |
| ; RV64-NEXT: $x10 = COPY [[LOAD1]](s64) |
| ; RV64-NEXT: PseudoRET implicit $x10 |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %argp.cur = load i32, ptr %va, align 4 |
| %1 = add i32 %argp.cur, 7 |
| %2 = and i32 %1, -8 |
| %argp.cur.aligned = inttoptr i32 %1 to ptr |
| %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8 |
| store ptr %argp.next, ptr %va, align 4 |
| %3 = inttoptr i32 %2 to ptr |
| %4 = load double, ptr %3, align 8 |
| %5 = bitcast double %4 to i64 |
| call void @llvm.va_end(ptr %va) |
| ret i64 %5 |
| } |
| |
| define i64 @va2_va_arg(ptr %fmt, ...) nounwind { |
| ; RV32-LABEL: name: va2_va_arg |
| ; RV32: bb.1 (%ir-block.0): |
| ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32-NEXT: {{ $}} |
| ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32-NEXT: [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX1]](p0), 8 |
| ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[VAARG]](s64) |
| ; RV32-NEXT: $x10 = COPY [[UV]](s32) |
| ; RV32-NEXT: $x11 = COPY [[UV1]](s32) |
| ; RV32-NEXT: PseudoRET implicit $x10, implicit $x11 |
| ; |
| ; RV64-LABEL: name: va2_va_arg |
| ; RV64: bb.1 (%ir-block.0): |
| ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV64-NEXT: {{ $}} |
| ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; RV64-NEXT: [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX1]](p0), 8 |
| ; RV64-NEXT: $x10 = COPY [[VAARG]](s64) |
| ; RV64-NEXT: PseudoRET implicit $x10 |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %1 = va_arg ptr %va, double |
| call void @llvm.va_end(ptr %va) |
| %2 = bitcast double %1 to i64 |
| ret i64 %2 |
| } |
| |
| define void @va2_caller() nounwind { |
| ; ILP32-LABEL: name: va2_caller |
| ; ILP32: bb.1 (%ir-block.0): |
| ; ILP32-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; ILP32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64) |
| ; ILP32-NEXT: $x10 = COPY [[DEF]](p0) |
| ; ILP32-NEXT: $x12 = COPY [[UV]](s32) |
| ; ILP32-NEXT: $x13 = COPY [[UV1]](s32) |
| ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 |
| ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; ILP32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| ; ILP32-NEXT: PseudoRET |
| ; |
| ; RV32D-ILP32-LABEL: name: va2_caller |
| ; RV32D-ILP32: bb.1 (%ir-block.0): |
| ; RV32D-ILP32-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64) |
| ; RV32D-ILP32-NEXT: $x10 = COPY [[DEF]](p0) |
| ; RV32D-ILP32-NEXT: $x12 = COPY [[UV]](s32) |
| ; RV32D-ILP32-NEXT: $x13 = COPY [[UV1]](s32) |
| ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| ; RV32D-ILP32-NEXT: PseudoRET |
| ; |
| ; RV32D-ILP32F-LABEL: name: va2_caller |
| ; RV32D-ILP32F: bb.1 (%ir-block.0): |
| ; RV32D-ILP32F-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64) |
| ; RV32D-ILP32F-NEXT: $x10 = COPY [[DEF]](p0) |
| ; RV32D-ILP32F-NEXT: $x12 = COPY [[UV]](s32) |
| ; RV32D-ILP32F-NEXT: $x13 = COPY [[UV1]](s32) |
| ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32F-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| ; RV32D-ILP32F-NEXT: PseudoRET |
| ; |
| ; RV32D-ILP32D-LABEL: name: va2_caller |
| ; RV32D-ILP32D: bb.1 (%ir-block.0): |
| ; RV32D-ILP32D-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64) |
| ; RV32D-ILP32D-NEXT: $x10 = COPY [[DEF]](p0) |
| ; RV32D-ILP32D-NEXT: $x12 = COPY [[UV]](s32) |
| ; RV32D-ILP32D-NEXT: $x13 = COPY [[UV1]](s32) |
| ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x12, implicit $x13, implicit-def $x10, implicit-def $x11 |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32D-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| ; RV32D-ILP32D-NEXT: PseudoRET |
| ; |
| ; LP64-LABEL: name: va2_caller |
| ; LP64: bb.1 (%ir-block.0): |
| ; LP64-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: $x10 = COPY [[DEF]](p0) |
| ; LP64-NEXT: $x11 = COPY [[C]](s64) |
| ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 |
| ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64-NEXT: PseudoRET |
| ; |
| ; LP64F-LABEL: name: va2_caller |
| ; LP64F: bb.1 (%ir-block.0): |
| ; LP64F-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: $x10 = COPY [[DEF]](p0) |
| ; LP64F-NEXT: $x11 = COPY [[C]](s64) |
| ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 |
| ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64F-NEXT: PseudoRET |
| ; |
| ; LP64D-LABEL: name: va2_caller |
| ; LP64D: bb.1 (%ir-block.0): |
| ; LP64D-NEXT: [[DEF:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF |
| ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 1.000000e+00 |
| ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: $x10 = COPY [[DEF]](p0) |
| ; LP64D-NEXT: $x11 = COPY [[C]](s64) |
| ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @va2, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit-def $x10 |
| ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64D-NEXT: PseudoRET |
| %1 = call i64 (ptr, ...) @va2(ptr undef, double 1.000000e+00) |
| ret void |
| } |
| |
| ; On RV32, Ensure a named 2*xlen argument is passed in a1 and a2, while the |
| ; vararg double is passed in a4 and a5 (rather than a3 and a4) |
| |
| define i64 @va3(i32 %a, i64 %b, ...) nounwind { |
| ; RV32-LABEL: name: va3 |
| ; RV32: bb.1 (%ir-block.0): |
| ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32-NEXT: {{ $}} |
| ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32) |
| ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 |
| ; RV32-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8 |
| ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s32) from %ir.va) |
| ; RV32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C1]] |
| ; RV32-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]] |
| ; RV32-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32) |
| ; RV32-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 |
| ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[INTTOPTR]], [[C3]](s32) |
| ; RV32-NEXT: G_STORE [[PTR_ADD5]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va) |
| ; RV32-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32) |
| ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3) |
| ; RV32-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[LOAD1]] |
| ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ADD1]](s64) |
| ; RV32-NEXT: $x10 = COPY [[UV]](s32) |
| ; RV32-NEXT: $x11 = COPY [[UV1]](s32) |
| ; RV32-NEXT: PseudoRET implicit $x10, implicit $x11 |
| ; |
| ; RV64-LABEL: name: va3 |
| ; RV64: bb.1 (%ir-block.0): |
| ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV64-NEXT: {{ $}} |
| ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) |
| ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 |
| ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16) |
| ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8) |
| ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16) |
| ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24) |
| ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.0 + 32, align 16) |
| ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.0 + 40) |
| ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; RV64-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 7 |
| ; RV64-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -8 |
| ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (dereferenceable load (s32) from %ir.va) |
| ; RV64-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[C1]] |
| ; RV64-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C2]] |
| ; RV64-NEXT: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[ADD]](s32) |
| ; RV64-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[INTTOPTR]], [[C3]](s64) |
| ; RV64-NEXT: G_STORE [[PTR_ADD6]](p0), [[FRAME_INDEX1]](p0) :: (store (p0) into %ir.va, align 4) |
| ; RV64-NEXT: [[INTTOPTR1:%[0-9]+]]:_(p0) = G_INTTOPTR [[AND]](s32) |
| ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[INTTOPTR1]](p0) :: (load (s64) from %ir.3) |
| ; RV64-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[LOAD1]] |
| ; RV64-NEXT: $x10 = COPY [[ADD1]](s64) |
| ; RV64-NEXT: PseudoRET implicit $x10 |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %argp.cur = load i32, ptr %va, align 4 |
| %1 = add i32 %argp.cur, 7 |
| %2 = and i32 %1, -8 |
| %argp.cur.aligned = inttoptr i32 %1 to ptr |
| %argp.next = getelementptr inbounds i8, ptr %argp.cur.aligned, i32 8 |
| store ptr %argp.next, ptr %va, align 4 |
| %3 = inttoptr i32 %2 to ptr |
| %4 = load double, ptr %3, align 8 |
| call void @llvm.va_end(ptr %va) |
| %5 = bitcast double %4 to i64 |
| %6 = add i64 %b, %5 |
| ret i64 %6 |
| } |
| |
| define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind { |
| ; RV32-LABEL: name: va3_va_arg |
| ; RV32: bb.1 (%ir-block.0): |
| ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32-NEXT: {{ $}} |
| ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32) |
| ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32-NEXT: [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX1]](p0), 8 |
| ; RV32-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MV]], [[VAARG]] |
| ; RV32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ADD]](s64) |
| ; RV32-NEXT: $x10 = COPY [[UV]](s32) |
| ; RV32-NEXT: $x11 = COPY [[UV1]](s32) |
| ; RV32-NEXT: PseudoRET implicit $x10, implicit $x11 |
| ; |
| ; RV64-LABEL: name: va3_va_arg |
| ; RV64: bb.1 (%ir-block.0): |
| ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV64-NEXT: {{ $}} |
| ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; RV64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) |
| ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 |
| ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16) |
| ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8) |
| ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16) |
| ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24) |
| ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.0 + 32, align 16) |
| ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.0 + 40) |
| ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; RV64-NEXT: [[VAARG:%[0-9]+]]:_(s64) = G_VAARG [[FRAME_INDEX1]](p0), 8 |
| ; RV64-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY1]], [[VAARG]] |
| ; RV64-NEXT: $x10 = COPY [[ADD]](s64) |
| ; RV64-NEXT: PseudoRET implicit $x10 |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %1 = va_arg ptr %va, double |
| call void @llvm.va_end(ptr %va) |
| %2 = bitcast double %1 to i64 |
| %3 = add i64 %b, %2 |
| ret i64 %3 |
| } |
| |
| define void @va3_caller() nounwind { |
| ; ILP32-LABEL: name: va3_caller |
| ; ILP32: bb.1 (%ir-block.0): |
| ; ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; ILP32-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111 |
| ; ILP32-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 |
| ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64) |
| ; ILP32-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64) |
| ; ILP32-NEXT: $x10 = COPY [[C]](s32) |
| ; ILP32-NEXT: $x11 = COPY [[UV]](s32) |
| ; ILP32-NEXT: $x12 = COPY [[UV1]](s32) |
| ; ILP32-NEXT: $x14 = COPY [[UV2]](s32) |
| ; ILP32-NEXT: $x15 = COPY [[UV3]](s32) |
| ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11 |
| ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; ILP32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| ; ILP32-NEXT: PseudoRET |
| ; |
| ; RV32D-ILP32-LABEL: name: va3_caller |
| ; RV32D-ILP32: bb.1 (%ir-block.0): |
| ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; RV32D-ILP32-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111 |
| ; RV32D-ILP32-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64) |
| ; RV32D-ILP32-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64) |
| ; RV32D-ILP32-NEXT: $x10 = COPY [[C]](s32) |
| ; RV32D-ILP32-NEXT: $x11 = COPY [[UV]](s32) |
| ; RV32D-ILP32-NEXT: $x12 = COPY [[UV1]](s32) |
| ; RV32D-ILP32-NEXT: $x14 = COPY [[UV2]](s32) |
| ; RV32D-ILP32-NEXT: $x15 = COPY [[UV3]](s32) |
| ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11 |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| ; RV32D-ILP32-NEXT: PseudoRET |
| ; |
| ; RV32D-ILP32F-LABEL: name: va3_caller |
| ; RV32D-ILP32F: bb.1 (%ir-block.0): |
| ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; RV32D-ILP32F-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111 |
| ; RV32D-ILP32F-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64) |
| ; RV32D-ILP32F-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64) |
| ; RV32D-ILP32F-NEXT: $x10 = COPY [[C]](s32) |
| ; RV32D-ILP32F-NEXT: $x11 = COPY [[UV]](s32) |
| ; RV32D-ILP32F-NEXT: $x12 = COPY [[UV1]](s32) |
| ; RV32D-ILP32F-NEXT: $x14 = COPY [[UV2]](s32) |
| ; RV32D-ILP32F-NEXT: $x15 = COPY [[UV3]](s32) |
| ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11 |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32F-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| ; RV32D-ILP32F-NEXT: PseudoRET |
| ; |
| ; RV32D-ILP32D-LABEL: name: va3_caller |
| ; RV32D-ILP32D: bb.1 (%ir-block.0): |
| ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; RV32D-ILP32D-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111 |
| ; RV32D-ILP32D-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64) |
| ; RV32D-ILP32D-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64) |
| ; RV32D-ILP32D-NEXT: $x10 = COPY [[C]](s32) |
| ; RV32D-ILP32D-NEXT: $x11 = COPY [[UV]](s32) |
| ; RV32D-ILP32D-NEXT: $x12 = COPY [[UV1]](s32) |
| ; RV32D-ILP32D-NEXT: $x14 = COPY [[UV2]](s32) |
| ; RV32D-ILP32D-NEXT: $x15 = COPY [[UV3]](s32) |
| ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit $x14, implicit $x15, implicit-def $x10, implicit-def $x11 |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32D-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) |
| ; RV32D-ILP32D-NEXT: PseudoRET |
| ; |
| ; LP64-LABEL: name: va3_caller |
| ; LP64: bb.1 (%ir-block.0): |
| ; LP64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; LP64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111 |
| ; LP64-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 |
| ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) |
| ; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; LP64-NEXT: $x11 = COPY [[C1]](s64) |
| ; LP64-NEXT: $x12 = COPY [[C2]](s64) |
| ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32_lp64, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10 |
| ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64-NEXT: PseudoRET |
| ; |
| ; LP64F-LABEL: name: va3_caller |
| ; LP64F: bb.1 (%ir-block.0): |
| ; LP64F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; LP64F-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111 |
| ; LP64F-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 |
| ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) |
| ; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; LP64F-NEXT: $x11 = COPY [[C1]](s64) |
| ; LP64F-NEXT: $x12 = COPY [[C2]](s64) |
| ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10 |
| ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64F-NEXT: PseudoRET |
| ; |
| ; LP64D-LABEL: name: va3_caller |
| ; LP64D: bb.1 (%ir-block.0): |
| ; LP64D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 |
| ; LP64D-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1111 |
| ; LP64D-NEXT: [[C2:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00 |
| ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[C]](s32) |
| ; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; LP64D-NEXT: $x11 = COPY [[C1]](s64) |
| ; LP64D-NEXT: $x12 = COPY [[C2]](s64) |
| ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @va3, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10, implicit $x11, implicit $x12, implicit-def $x10 |
| ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64D-NEXT: PseudoRET |
| %1 = call i64 (i32, i64, ...) @va3(i32 2, i64 1111, double 2.000000e+00) |
| ret void |
| } |
| |
| declare void @llvm.va_copy(ptr, ptr) |
| |
| define i32 @va4_va_copy(i32 %argno, ...) nounwind { |
| ; ILP32-LABEL: name: va4_va_copy |
| ; ILP32: bb.1 (%ir-block.0): |
| ; ILP32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; ILP32-NEXT: {{ $}} |
| ; ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; ILP32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; ILP32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; ILP32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; ILP32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; ILP32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; ILP32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; ILP32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; ILP32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; ILP32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; ILP32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; ILP32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; ILP32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; ILP32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; ILP32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs |
| ; ILP32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs |
| ; ILP32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.vargs) |
| ; ILP32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; ILP32-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0) |
| ; ILP32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs) |
| ; ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: $x10 = COPY [[LOAD]](p0) |
| ; ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10 |
| ; ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; ILP32-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; ILP32-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; ILP32-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]] |
| ; ILP32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]] |
| ; ILP32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]] |
| ; ILP32-NEXT: $x10 = COPY [[ADD2]](s32) |
| ; ILP32-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV32D-ILP32-LABEL: name: va4_va_copy |
| ; RV32D-ILP32: bb.1 (%ir-block.0): |
| ; RV32D-ILP32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32D-ILP32-NEXT: {{ $}} |
| ; RV32D-ILP32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32D-ILP32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32D-ILP32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32D-ILP32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32D-ILP32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32D-ILP32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs |
| ; RV32D-ILP32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs |
| ; RV32D-ILP32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.vargs) |
| ; RV32D-ILP32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0) |
| ; RV32D-ILP32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs) |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: $x10 = COPY [[LOAD]](p0) |
| ; RV32D-ILP32-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10 |
| ; RV32D-ILP32-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]] |
| ; RV32D-ILP32-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]] |
| ; RV32D-ILP32-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]] |
| ; RV32D-ILP32-NEXT: $x10 = COPY [[ADD2]](s32) |
| ; RV32D-ILP32-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV32D-ILP32F-LABEL: name: va4_va_copy |
| ; RV32D-ILP32F: bb.1 (%ir-block.0): |
| ; RV32D-ILP32F-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32D-ILP32F-NEXT: {{ $}} |
| ; RV32D-ILP32F-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32D-ILP32F-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32D-ILP32F-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32D-ILP32F-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32D-ILP32F-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32D-ILP32F-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs |
| ; RV32D-ILP32F-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs |
| ; RV32D-ILP32F-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.vargs) |
| ; RV32D-ILP32F-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32F-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0) |
| ; RV32D-ILP32F-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs) |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: $x10 = COPY [[LOAD]](p0) |
| ; RV32D-ILP32F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10 |
| ; RV32D-ILP32F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32F-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32F-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32F-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32F-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]] |
| ; RV32D-ILP32F-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]] |
| ; RV32D-ILP32F-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]] |
| ; RV32D-ILP32F-NEXT: $x10 = COPY [[ADD2]](s32) |
| ; RV32D-ILP32F-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV32D-ILP32D-LABEL: name: va4_va_copy |
| ; RV32D-ILP32D: bb.1 (%ir-block.0): |
| ; RV32D-ILP32D-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32D-ILP32D-NEXT: {{ $}} |
| ; RV32D-ILP32D-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32D-ILP32D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32D-ILP32D-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32D-ILP32D-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32D-ILP32D-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32D-ILP32D-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32D-ILP32D-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs |
| ; RV32D-ILP32D-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs |
| ; RV32D-ILP32D-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.vargs) |
| ; RV32D-ILP32D-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32D-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0) |
| ; RV32D-ILP32D-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs) |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: $x10 = COPY [[LOAD]](p0) |
| ; RV32D-ILP32D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10 |
| ; RV32D-ILP32D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; RV32D-ILP32D-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32D-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32D-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32D-ILP32D-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]] |
| ; RV32D-ILP32D-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]] |
| ; RV32D-ILP32D-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]] |
| ; RV32D-ILP32D-NEXT: $x10 = COPY [[ADD2]](s32) |
| ; RV32D-ILP32D-NEXT: PseudoRET implicit $x10 |
| ; |
| ; LP64-LABEL: name: va4_va_copy |
| ; LP64: bb.1 (%ir-block.0): |
| ; LP64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; LP64-NEXT: {{ $}} |
| ; LP64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) |
| ; LP64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; LP64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; LP64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; LP64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; LP64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; LP64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; LP64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; LP64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; LP64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; LP64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; LP64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; LP64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; LP64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; LP64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; LP64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; LP64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; LP64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; LP64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; LP64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; LP64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; LP64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; LP64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; LP64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; LP64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs |
| ; LP64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs |
| ; LP64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.vargs) |
| ; LP64-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0) |
| ; LP64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4) |
| ; LP64-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: $x10 = COPY [[LOAD]](p0) |
| ; LP64-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32_lp64, implicit-def $x1, implicit $x10 |
| ; LP64-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]] |
| ; LP64-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]] |
| ; LP64-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]] |
| ; LP64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD2]](s32) |
| ; LP64-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; LP64-NEXT: PseudoRET implicit $x10 |
| ; |
| ; LP64F-LABEL: name: va4_va_copy |
| ; LP64F: bb.1 (%ir-block.0): |
| ; LP64F-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; LP64F-NEXT: {{ $}} |
| ; LP64F-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64F-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) |
| ; LP64F-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; LP64F-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; LP64F-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; LP64F-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; LP64F-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; LP64F-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; LP64F-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; LP64F-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; LP64F-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; LP64F-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; LP64F-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; LP64F-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; LP64F-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; LP64F-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; LP64F-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; LP64F-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; LP64F-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; LP64F-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; LP64F-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs |
| ; LP64F-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs |
| ; LP64F-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.vargs) |
| ; LP64F-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64F-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0) |
| ; LP64F-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4) |
| ; LP64F-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: $x10 = COPY [[LOAD]](p0) |
| ; LP64F-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32f_lp64f, implicit-def $x1, implicit $x10 |
| ; LP64F-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64F-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64F-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64F-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64F-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]] |
| ; LP64F-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]] |
| ; LP64F-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]] |
| ; LP64F-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD2]](s32) |
| ; LP64F-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; LP64F-NEXT: PseudoRET implicit $x10 |
| ; |
| ; LP64D-LABEL: name: va4_va_copy |
| ; LP64D: bb.1 (%ir-block.0): |
| ; LP64D-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; LP64D-NEXT: {{ $}} |
| ; LP64D-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; LP64D-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64) |
| ; LP64D-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; LP64D-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; LP64D-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; LP64D-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; LP64D-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; LP64D-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; LP64D-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; LP64D-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; LP64D-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; LP64D-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; LP64D-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; LP64D-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; LP64D-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; LP64D-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; LP64D-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; LP64D-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; LP64D-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; LP64D-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; LP64D-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.vargs |
| ; LP64D-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.wargs |
| ; LP64D-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.vargs) |
| ; LP64D-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64D-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.va_copy), [[FRAME_INDEX2]](p0), [[FRAME_INDEX1]](p0) |
| ; LP64D-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.wargs, align 4) |
| ; LP64D-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: $x10 = COPY [[LOAD]](p0) |
| ; LP64D-NEXT: PseudoCALL target-flags(riscv-call) @notdead, csr_ilp32d_lp64d, implicit-def $x1, implicit $x10 |
| ; LP64D-NEXT: ADJCALLSTACKUP 0, 0, implicit-def $x2, implicit $x2 |
| ; LP64D-NEXT: [[VAARG1:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64D-NEXT: [[VAARG2:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64D-NEXT: [[VAARG3:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; LP64D-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[VAARG1]], [[VAARG]] |
| ; LP64D-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[VAARG2]] |
| ; LP64D-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[VAARG3]] |
| ; LP64D-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[ADD2]](s32) |
| ; LP64D-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; LP64D-NEXT: PseudoRET implicit $x10 |
| %vargs = alloca ptr |
| %wargs = alloca ptr |
| call void @llvm.va_start(ptr %vargs) |
| %1 = va_arg ptr %vargs, i32 |
| call void @llvm.va_copy(ptr %wargs, ptr %vargs) |
| %2 = load ptr, ptr %wargs, align 4 |
| call void @notdead(ptr %2) |
| %3 = va_arg ptr %vargs, i32 |
| %4 = va_arg ptr %vargs, i32 |
| %5 = va_arg ptr %vargs, i32 |
| call void @llvm.va_end(ptr %vargs) |
| call void @llvm.va_end(ptr %wargs) |
| %add1 = add i32 %3, %1 |
| %add2 = add i32 %add1, %4 |
| %add3 = add i32 %add2, %5 |
| ret i32 %add3 |
| } |
| |
| ; A function with no fixed arguments is not valid C, but can be |
| ; specified in LLVM IR. We must ensure the vararg save area is |
| ; still set up correctly. |
| |
| define i32 @va6_no_fixed_args(...) nounwind { |
| ; RV32-LABEL: name: va6_no_fixed_args |
| ; RV32: bb.1 (%ir-block.0): |
| ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32-NEXT: {{ $}} |
| ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 |
| ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 |
| ; RV32-NEXT: G_STORE [[COPY]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.0, align 16) |
| ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32-NEXT: G_STORE [[COPY1]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.0 + 4) |
| ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.0 + 8, align 8) |
| ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.0 + 12) |
| ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.0 + 16, align 16) |
| ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.0 + 20) |
| ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.0 + 24, align 8) |
| ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD6]](p0) :: (store (s32) into %fixed-stack.0 + 28) |
| ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD6]], [[C]](s32) |
| ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV32-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s32) into %ir.va) |
| ; RV32-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV32-NEXT: $x10 = COPY [[VAARG]](s32) |
| ; RV32-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV64-LABEL: name: va6_no_fixed_args |
| ; RV64: bb.1 (%ir-block.0): |
| ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV64-NEXT: {{ $}} |
| ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 |
| ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 |
| ; RV64-NEXT: G_STORE [[COPY]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.0, align 16) |
| ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; RV64-NEXT: G_STORE [[COPY1]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.0 + 8) |
| ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.0 + 16, align 16) |
| ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.0 + 24) |
| ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.0 + 32, align 16) |
| ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.0 + 40) |
| ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.0 + 48, align 16) |
| ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD6]](p0) :: (store (s64) into %fixed-stack.0 + 56) |
| ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD6]], [[C]](s64) |
| ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.va |
| ; RV64-NEXT: G_VASTART [[FRAME_INDEX1]](p0) :: (store (s64) into %ir.va) |
| ; RV64-NEXT: [[VAARG:%[0-9]+]]:_(s32) = G_VAARG [[FRAME_INDEX1]](p0), 4 |
| ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[VAARG]](s32) |
| ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; RV64-NEXT: PseudoRET implicit $x10 |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %1 = va_arg ptr %va, i32 |
| call void @llvm.va_end(ptr %va) |
| ret i32 %1 |
| } |
| |
| ; TODO: improve constant materialization of stack addresses |
| |
| define i32 @va_large_stack(ptr %fmt, ...) { |
| ; RV32-LABEL: name: va_large_stack |
| ; RV32: bb.1 (%ir-block.0): |
| ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV32-NEXT: {{ $}} |
| ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 |
| ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32) into %fixed-stack.1) |
| ; RV32-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s32) |
| ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 |
| ; RV32-NEXT: G_STORE [[COPY2]](s32), [[PTR_ADD]](p0) :: (store (s32) into %fixed-stack.1 + 4) |
| ; RV32-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s32) |
| ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 |
| ; RV32-NEXT: G_STORE [[COPY3]](s32), [[PTR_ADD1]](p0) :: (store (s32) into %fixed-stack.1 + 8) |
| ; RV32-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s32) |
| ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 |
| ; RV32-NEXT: G_STORE [[COPY4]](s32), [[PTR_ADD2]](p0) :: (store (s32) into %fixed-stack.1 + 12) |
| ; RV32-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s32) |
| ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 |
| ; RV32-NEXT: G_STORE [[COPY5]](s32), [[PTR_ADD3]](p0) :: (store (s32) into %fixed-stack.1 + 16) |
| ; RV32-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s32) |
| ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 |
| ; RV32-NEXT: G_STORE [[COPY6]](s32), [[PTR_ADD4]](p0) :: (store (s32) into %fixed-stack.1 + 20) |
| ; RV32-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s32) |
| ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 |
| ; RV32-NEXT: G_STORE [[COPY7]](s32), [[PTR_ADD5]](p0) :: (store (s32) into %fixed-stack.1 + 24) |
| ; RV32-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s32) |
| ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.large |
| ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.va |
| ; RV32-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s32) into %ir.va) |
| ; RV32-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va) |
| ; RV32-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 |
| ; RV32-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[LOAD]], [[C1]](s32) |
| ; RV32-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va) |
| ; RV32-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur) |
| ; RV32-NEXT: $x10 = COPY [[LOAD1]](s32) |
| ; RV32-NEXT: PseudoRET implicit $x10 |
| ; |
| ; RV64-LABEL: name: va_large_stack |
| ; RV64: bb.1 (%ir-block.0): |
| ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 |
| ; RV64-NEXT: {{ $}} |
| ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 |
| ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 |
| ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 |
| ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64) into %fixed-stack.1) |
| ; RV64-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[FRAME_INDEX]], [[C]](s64) |
| ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 |
| ; RV64-NEXT: G_STORE [[COPY2]](s64), [[PTR_ADD]](p0) :: (store (s64) into %fixed-stack.1 + 8) |
| ; RV64-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C]](s64) |
| ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 |
| ; RV64-NEXT: G_STORE [[COPY3]](s64), [[PTR_ADD1]](p0) :: (store (s64) into %fixed-stack.1 + 16) |
| ; RV64-NEXT: [[PTR_ADD2:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD1]], [[C]](s64) |
| ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 |
| ; RV64-NEXT: G_STORE [[COPY4]](s64), [[PTR_ADD2]](p0) :: (store (s64) into %fixed-stack.1 + 24) |
| ; RV64-NEXT: [[PTR_ADD3:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD2]], [[C]](s64) |
| ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 |
| ; RV64-NEXT: G_STORE [[COPY5]](s64), [[PTR_ADD3]](p0) :: (store (s64) into %fixed-stack.1 + 32) |
| ; RV64-NEXT: [[PTR_ADD4:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD3]], [[C]](s64) |
| ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 |
| ; RV64-NEXT: G_STORE [[COPY6]](s64), [[PTR_ADD4]](p0) :: (store (s64) into %fixed-stack.1 + 40) |
| ; RV64-NEXT: [[PTR_ADD5:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD4]], [[C]](s64) |
| ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 |
| ; RV64-NEXT: G_STORE [[COPY7]](s64), [[PTR_ADD5]](p0) :: (store (s64) into %fixed-stack.1 + 48) |
| ; RV64-NEXT: [[PTR_ADD6:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD5]], [[C]](s64) |
| ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.0.large |
| ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %stack.1.va |
| ; RV64-NEXT: G_VASTART [[FRAME_INDEX2]](p0) :: (store (s64) into %ir.va) |
| ; RV64-NEXT: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (dereferenceable load (p0) from %ir.va, align 4) |
| ; RV64-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4 |
| ; RV64-NEXT: [[PTR_ADD7:%[0-9]+]]:_(p0) = nuw G_PTR_ADD [[LOAD]], [[C1]](s64) |
| ; RV64-NEXT: G_STORE [[PTR_ADD7]](p0), [[FRAME_INDEX2]](p0) :: (store (p0) into %ir.va, align 4) |
| ; RV64-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load (s32) from %ir.argp.cur) |
| ; RV64-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD1]](s32) |
| ; RV64-NEXT: $x10 = COPY [[ANYEXT]](s64) |
| ; RV64-NEXT: PseudoRET implicit $x10 |
| %large = alloca [ 100000000 x i8 ] |
| %va = alloca ptr |
| call void @llvm.va_start(ptr %va) |
| %argp.cur = load ptr, ptr %va, align 4 |
| %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4 |
| store ptr %argp.next, ptr %va, align 4 |
| %1 = load i32, ptr %argp.cur, align 4 |
| call void @llvm.va_end(ptr %va) |
| ret i32 %1 |
| } |