| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=riscv64 -verify-machineinstrs \ |
| ; RUN: -target-abi lp64 -disable-strictnode-mutation < %s | \ |
| ; RUN: FileCheck %s -check-prefixes=CHECK,RV64I |
| ; RUN: llc -mtriple=riscv64 -mattr=+zfh -verify-machineinstrs \ |
| ; RUN: -target-abi lp64f -disable-strictnode-mutation < %s | \ |
| ; RUN: FileCheck %s -check-prefixes=CHECK,RV64IZFH |
| ; RUN: llc -mtriple=riscv64 -mattr=+zhinx -verify-machineinstrs \ |
| ; RUN: -target-abi lp64 -disable-strictnode-mutation < %s | \ |
| ; RUN: FileCheck %s -check-prefixes=CHECK,RV64IZHINX |
| |
| define i128 @fptosi_f16_to_i128(half %a) nounwind strictfp { |
| ; RV64I-LABEL: fptosi_f16_to_i128: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __extendhfsf2 |
| ; RV64I-NEXT: call __fixsfti |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IZFH-LABEL: fptosi_f16_to_i128: |
| ; RV64IZFH: # %bb.0: |
| ; RV64IZFH-NEXT: addi sp, sp, -16 |
| ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IZFH-NEXT: call __fixhfti |
| ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IZFH-NEXT: addi sp, sp, 16 |
| ; RV64IZFH-NEXT: ret |
| ; |
| ; RV64IZHINX-LABEL: fptosi_f16_to_i128: |
| ; RV64IZHINX: # %bb.0: |
| ; RV64IZHINX-NEXT: addi sp, sp, -16 |
| ; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IZHINX-NEXT: call __fixhfti |
| ; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IZHINX-NEXT: addi sp, sp, 16 |
| ; RV64IZHINX-NEXT: ret |
| %1 = call i128 @llvm.experimental.constrained.fptosi.i128.f16(half %a, metadata !"fpexcept.strict") |
| ret i128 %1 |
| } |
| |
| define i128 @fptoui_f16_to_i128(half %a) nounwind strictfp { |
| ; RV64I-LABEL: fptoui_f16_to_i128: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __extendhfsf2 |
| ; RV64I-NEXT: call __fixunssfti |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IZFH-LABEL: fptoui_f16_to_i128: |
| ; RV64IZFH: # %bb.0: |
| ; RV64IZFH-NEXT: addi sp, sp, -16 |
| ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IZFH-NEXT: call __fixunshfti |
| ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IZFH-NEXT: addi sp, sp, 16 |
| ; RV64IZFH-NEXT: ret |
| ; |
| ; RV64IZHINX-LABEL: fptoui_f16_to_i128: |
| ; RV64IZHINX: # %bb.0: |
| ; RV64IZHINX-NEXT: addi sp, sp, -16 |
| ; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IZHINX-NEXT: call __fixunshfti |
| ; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IZHINX-NEXT: addi sp, sp, 16 |
| ; RV64IZHINX-NEXT: ret |
| %1 = call i128 @llvm.experimental.constrained.fptoui.i128.f16(half %a, metadata !"fpexcept.strict") |
| ret i128 %1 |
| } |
| |
| define half @sitofp_i128_to_f16(i128 %a) nounwind strictfp { |
| ; RV64I-LABEL: sitofp_i128_to_f16: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __floattisf |
| ; RV64I-NEXT: call __truncsfhf2 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IZFH-LABEL: sitofp_i128_to_f16: |
| ; RV64IZFH: # %bb.0: |
| ; RV64IZFH-NEXT: addi sp, sp, -16 |
| ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IZFH-NEXT: call __floattihf |
| ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IZFH-NEXT: addi sp, sp, 16 |
| ; RV64IZFH-NEXT: ret |
| ; |
| ; RV64IZHINX-LABEL: sitofp_i128_to_f16: |
| ; RV64IZHINX: # %bb.0: |
| ; RV64IZHINX-NEXT: addi sp, sp, -16 |
| ; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IZHINX-NEXT: call __floattihf |
| ; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IZHINX-NEXT: addi sp, sp, 16 |
| ; RV64IZHINX-NEXT: ret |
| %1 = call half @llvm.experimental.constrained.sitofp.f16.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") |
| ret half %1 |
| } |
| |
| define half @uitofp_i128_to_f16(i128 %a) nounwind strictfp { |
| ; RV64I-LABEL: uitofp_i128_to_f16: |
| ; RV64I: # %bb.0: |
| ; RV64I-NEXT: addi sp, sp, -16 |
| ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64I-NEXT: call __floatuntisf |
| ; RV64I-NEXT: call __truncsfhf2 |
| ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64I-NEXT: addi sp, sp, 16 |
| ; RV64I-NEXT: ret |
| ; |
| ; RV64IZFH-LABEL: uitofp_i128_to_f16: |
| ; RV64IZFH: # %bb.0: |
| ; RV64IZFH-NEXT: addi sp, sp, -16 |
| ; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IZFH-NEXT: call __floatuntihf |
| ; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IZFH-NEXT: addi sp, sp, 16 |
| ; RV64IZFH-NEXT: ret |
| ; |
| ; RV64IZHINX-LABEL: uitofp_i128_to_f16: |
| ; RV64IZHINX: # %bb.0: |
| ; RV64IZHINX-NEXT: addi sp, sp, -16 |
| ; RV64IZHINX-NEXT: sd ra, 8(sp) # 8-byte Folded Spill |
| ; RV64IZHINX-NEXT: call __floatuntihf |
| ; RV64IZHINX-NEXT: ld ra, 8(sp) # 8-byte Folded Reload |
| ; RV64IZHINX-NEXT: addi sp, sp, 16 |
| ; RV64IZHINX-NEXT: ret |
| %1 = call half @llvm.experimental.constrained.uitofp.f16.i128(i128 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") |
| ret half %1 |
| } |
| ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: |
| ; CHECK: {{.*}} |