| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 |
| ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -fp-contract=on < %s | FileCheck -check-prefixes=SI,SI-STRICT %s |
| ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=verde -fp-contract=on < %s | FileCheck -check-prefixes=SI,SI-STRICT %s |
| ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tahiti -fp-contract=fast < %s | FileCheck -check-prefixes=SI,SI-CONTRACT %s |
| ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=verde -fp-contract=fast < %s | FileCheck -check-prefixes=SI,SI-CONTRACT %s |
| ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -fp-contract=on < %s | FileCheck -check-prefixes=VI,VI-STRICT %s |
| ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -fp-contract=fast < %s | FileCheck -check-prefixes=VI,VI-CONTRACT %s |
| |
| define amdgpu_kernel void @fmuladd_f64(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2, ptr addrspace(1) %in3) #0 { |
| ; SI-LABEL: fmuladd_f64: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 |
| ; SI-NEXT: s_mov_b32 s11, 0xf000 |
| ; SI-NEXT: s_mov_b32 s10, -1 |
| ; SI-NEXT: s_mov_b32 s14, s10 |
| ; SI-NEXT: s_mov_b32 s15, s11 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_mov_b32 s12, s2 |
| ; SI-NEXT: s_mov_b32 s13, s3 |
| ; SI-NEXT: s_mov_b32 s16, s4 |
| ; SI-NEXT: s_mov_b32 s17, s5 |
| ; SI-NEXT: s_mov_b32 s18, s10 |
| ; SI-NEXT: s_mov_b32 s19, s11 |
| ; SI-NEXT: s_mov_b32 s4, s6 |
| ; SI-NEXT: s_mov_b32 s5, s7 |
| ; SI-NEXT: s_mov_b32 s6, s10 |
| ; SI-NEXT: s_mov_b32 s7, s11 |
| ; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0 |
| ; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0 |
| ; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 |
| ; SI-NEXT: s_mov_b32 s8, s0 |
| ; SI-NEXT: s_mov_b32 s9, s1 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[4:5] |
| ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: fmuladd_f64: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24 |
| ; VI-NEXT: s_mov_b32 s11, 0xf000 |
| ; VI-NEXT: s_mov_b32 s10, -1 |
| ; VI-NEXT: s_mov_b32 s14, s10 |
| ; VI-NEXT: s_mov_b32 s15, s11 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_mov_b32 s12, s2 |
| ; VI-NEXT: s_mov_b32 s13, s3 |
| ; VI-NEXT: s_mov_b32 s16, s4 |
| ; VI-NEXT: s_mov_b32 s17, s5 |
| ; VI-NEXT: s_mov_b32 s18, s10 |
| ; VI-NEXT: s_mov_b32 s19, s11 |
| ; VI-NEXT: s_mov_b32 s4, s6 |
| ; VI-NEXT: s_mov_b32 s5, s7 |
| ; VI-NEXT: s_mov_b32 s6, s10 |
| ; VI-NEXT: s_mov_b32 s7, s11 |
| ; VI-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0 |
| ; VI-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0 |
| ; VI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 |
| ; VI-NEXT: s_mov_b32 s8, s0 |
| ; VI-NEXT: s_mov_b32 s9, s1 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[4:5] |
| ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 |
| ; VI-NEXT: s_endpgm |
| %r0 = load double, ptr addrspace(1) %in1 |
| %r1 = load double, ptr addrspace(1) %in2 |
| %r2 = load double, ptr addrspace(1) %in3 |
| %r3 = tail call double @llvm.fmuladd.f64(double %r0, double %r1, double %r2) |
| store double %r3, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @fmul_fadd_f64(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2, ptr addrspace(1) %in3) #0 { |
| ; SI-STRICT-LABEL: fmul_fadd_f64: |
| ; SI-STRICT: ; %bb.0: |
| ; SI-STRICT-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 |
| ; SI-STRICT-NEXT: s_mov_b32 s11, 0xf000 |
| ; SI-STRICT-NEXT: s_mov_b32 s10, -1 |
| ; SI-STRICT-NEXT: s_mov_b32 s14, s10 |
| ; SI-STRICT-NEXT: s_mov_b32 s15, s11 |
| ; SI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-STRICT-NEXT: s_mov_b32 s12, s2 |
| ; SI-STRICT-NEXT: s_mov_b32 s13, s3 |
| ; SI-STRICT-NEXT: s_mov_b32 s16, s4 |
| ; SI-STRICT-NEXT: s_mov_b32 s17, s5 |
| ; SI-STRICT-NEXT: s_mov_b32 s18, s10 |
| ; SI-STRICT-NEXT: s_mov_b32 s19, s11 |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0 |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0 |
| ; SI-STRICT-NEXT: s_mov_b32 s4, s6 |
| ; SI-STRICT-NEXT: s_mov_b32 s5, s7 |
| ; SI-STRICT-NEXT: s_mov_b32 s6, s10 |
| ; SI-STRICT-NEXT: s_mov_b32 s7, s11 |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 |
| ; SI-STRICT-NEXT: s_mov_b32 s8, s0 |
| ; SI-STRICT-NEXT: s_mov_b32 s9, s1 |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(1) |
| ; SI-STRICT-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] |
| ; SI-STRICT-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 |
| ; SI-STRICT-NEXT: s_endpgm |
| ; |
| ; SI-CONTRACT-LABEL: fmul_fadd_f64: |
| ; SI-CONTRACT: ; %bb.0: |
| ; SI-CONTRACT-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s11, 0xf000 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s10, -1 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s14, s10 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s15, s11 |
| ; SI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-CONTRACT-NEXT: s_mov_b32 s12, s2 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s13, s3 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s16, s4 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s17, s5 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s18, s10 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s19, s11 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s4, s6 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s5, s7 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s6, s10 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s7, s11 |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0 |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0 |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s8, s0 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s9, s1 |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[4:5] |
| ; SI-CONTRACT-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 |
| ; SI-CONTRACT-NEXT: s_endpgm |
| ; |
| ; VI-STRICT-LABEL: fmul_fadd_f64: |
| ; VI-STRICT: ; %bb.0: |
| ; VI-STRICT-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24 |
| ; VI-STRICT-NEXT: s_mov_b32 s11, 0xf000 |
| ; VI-STRICT-NEXT: s_mov_b32 s10, -1 |
| ; VI-STRICT-NEXT: s_mov_b32 s14, s10 |
| ; VI-STRICT-NEXT: s_mov_b32 s15, s11 |
| ; VI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-STRICT-NEXT: s_mov_b32 s12, s2 |
| ; VI-STRICT-NEXT: s_mov_b32 s13, s3 |
| ; VI-STRICT-NEXT: s_mov_b32 s16, s4 |
| ; VI-STRICT-NEXT: s_mov_b32 s17, s5 |
| ; VI-STRICT-NEXT: s_mov_b32 s18, s10 |
| ; VI-STRICT-NEXT: s_mov_b32 s19, s11 |
| ; VI-STRICT-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0 |
| ; VI-STRICT-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0 |
| ; VI-STRICT-NEXT: s_mov_b32 s4, s6 |
| ; VI-STRICT-NEXT: s_mov_b32 s5, s7 |
| ; VI-STRICT-NEXT: s_mov_b32 s6, s10 |
| ; VI-STRICT-NEXT: s_mov_b32 s7, s11 |
| ; VI-STRICT-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 |
| ; VI-STRICT-NEXT: s_mov_b32 s8, s0 |
| ; VI-STRICT-NEXT: s_mov_b32 s9, s1 |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(1) |
| ; VI-STRICT-NEXT: v_mul_f64 v[0:1], v[0:1], v[2:3] |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] |
| ; VI-STRICT-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 |
| ; VI-STRICT-NEXT: s_endpgm |
| ; |
| ; VI-CONTRACT-LABEL: fmul_fadd_f64: |
| ; VI-CONTRACT: ; %bb.0: |
| ; VI-CONTRACT-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s11, 0xf000 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s10, -1 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s14, s10 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s15, s11 |
| ; VI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-CONTRACT-NEXT: s_mov_b32 s12, s2 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s13, s3 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s16, s4 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s17, s5 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s18, s10 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s19, s11 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s4, s6 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s5, s7 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s6, s10 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s7, s11 |
| ; VI-CONTRACT-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0 |
| ; VI-CONTRACT-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0 |
| ; VI-CONTRACT-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s8, s0 |
| ; VI-CONTRACT-NEXT: s_mov_b32 s9, s1 |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[4:5] |
| ; VI-CONTRACT-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 |
| ; VI-CONTRACT-NEXT: s_endpgm |
| %r0 = load double, ptr addrspace(1) %in1 |
| %r1 = load double, ptr addrspace(1) %in2 |
| %r2 = load double, ptr addrspace(1) %in3 |
| %tmp = fmul double %r0, %r1 |
| %r3 = fadd double %tmp, %r2 |
| store double %r3, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @fmul_fadd_contract_f64(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2, ptr addrspace(1) %in3) #0 { |
| ; SI-LABEL: fmul_fadd_contract_f64: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 |
| ; SI-NEXT: s_mov_b32 s11, 0xf000 |
| ; SI-NEXT: s_mov_b32 s10, -1 |
| ; SI-NEXT: s_mov_b32 s14, s10 |
| ; SI-NEXT: s_mov_b32 s15, s11 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: s_mov_b32 s12, s2 |
| ; SI-NEXT: s_mov_b32 s13, s3 |
| ; SI-NEXT: s_mov_b32 s16, s4 |
| ; SI-NEXT: s_mov_b32 s17, s5 |
| ; SI-NEXT: s_mov_b32 s18, s10 |
| ; SI-NEXT: s_mov_b32 s19, s11 |
| ; SI-NEXT: s_mov_b32 s4, s6 |
| ; SI-NEXT: s_mov_b32 s5, s7 |
| ; SI-NEXT: s_mov_b32 s6, s10 |
| ; SI-NEXT: s_mov_b32 s7, s11 |
| ; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0 |
| ; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0 |
| ; SI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 |
| ; SI-NEXT: s_mov_b32 s8, s0 |
| ; SI-NEXT: s_mov_b32 s9, s1 |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[4:5] |
| ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: fmul_fadd_contract_f64: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24 |
| ; VI-NEXT: s_mov_b32 s11, 0xf000 |
| ; VI-NEXT: s_mov_b32 s10, -1 |
| ; VI-NEXT: s_mov_b32 s14, s10 |
| ; VI-NEXT: s_mov_b32 s15, s11 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: s_mov_b32 s12, s2 |
| ; VI-NEXT: s_mov_b32 s13, s3 |
| ; VI-NEXT: s_mov_b32 s16, s4 |
| ; VI-NEXT: s_mov_b32 s17, s5 |
| ; VI-NEXT: s_mov_b32 s18, s10 |
| ; VI-NEXT: s_mov_b32 s19, s11 |
| ; VI-NEXT: s_mov_b32 s4, s6 |
| ; VI-NEXT: s_mov_b32 s5, s7 |
| ; VI-NEXT: s_mov_b32 s6, s10 |
| ; VI-NEXT: s_mov_b32 s7, s11 |
| ; VI-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0 |
| ; VI-NEXT: buffer_load_dwordx2 v[2:3], off, s[16:19], 0 |
| ; VI-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], 0 |
| ; VI-NEXT: s_mov_b32 s8, s0 |
| ; VI-NEXT: s_mov_b32 s9, s1 |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], v[4:5] |
| ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 |
| ; VI-NEXT: s_endpgm |
| %r0 = load double, ptr addrspace(1) %in1 |
| %r1 = load double, ptr addrspace(1) %in2 |
| %r2 = load double, ptr addrspace(1) %in3 |
| %tmp = fmul contract double %r0, %r1 |
| %r3 = fadd contract double %tmp, %r2 |
| store double %r3, ptr addrspace(1) %out |
| ret void |
| } |
| |
| define amdgpu_kernel void @fadd_a_a_b_f64(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 { |
| ; SI-STRICT-LABEL: fadd_a_a_b_f64: |
| ; SI-STRICT: ; %bb.0: |
| ; SI-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI-STRICT-NEXT: s_mov_b32 s3, 0xf000 |
| ; SI-STRICT-NEXT: s_mov_b32 s2, 0 |
| ; SI-STRICT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-STRICT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[2:3] |
| ; SI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[4:5] |
| ; SI-STRICT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-STRICT-NEXT: s_endpgm |
| ; |
| ; SI-CONTRACT-LABEL: fadd_a_a_b_f64: |
| ; SI-CONTRACT: ; %bb.0: |
| ; SI-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s3, 0xf000 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s2, 0 |
| ; SI-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: v_fma_f64 v[2:3], v[2:3], 2.0, v[4:5] |
| ; SI-CONTRACT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-CONTRACT-NEXT: s_endpgm |
| ; |
| ; VI-STRICT-LABEL: fadd_a_a_b_f64: |
| ; VI-STRICT: ; %bb.0: |
| ; VI-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI-STRICT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; VI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-STRICT-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-STRICT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[2:3], v[0:1] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_u32_e32 v4, vcc, 8, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[4:5], v[4:5] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[2:3] |
| ; VI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[4:5] |
| ; VI-STRICT-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; VI-STRICT-NEXT: s_endpgm |
| ; |
| ; VI-CONTRACT-LABEL: fadd_a_a_b_f64: |
| ; VI-CONTRACT: ; %bb.0: |
| ; VI-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; VI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-CONTRACT-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v2, vcc, 8, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[4:5], v[0:1] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[2:3], v[2:3] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: v_fma_f64 v[2:3], v[4:5], 2.0, v[2:3] |
| ; VI-CONTRACT-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; VI-CONTRACT-NEXT: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone |
| %gep.0 = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| %gep.1 = getelementptr double, ptr addrspace(1) %gep.0, i32 1 |
| %gep.out = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| |
| %r0 = load volatile double, ptr addrspace(1) %gep.0 |
| %r1 = load volatile double, ptr addrspace(1) %gep.1 |
| |
| %add.0 = fadd double %r0, %r0 |
| %add.1 = fadd double %add.0, %r1 |
| store double %add.1, ptr addrspace(1) %gep.out |
| ret void |
| } |
| |
| define amdgpu_kernel void @fadd_b_a_a_f64(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 { |
| ; SI-STRICT-LABEL: fadd_b_a_a_f64: |
| ; SI-STRICT: ; %bb.0: |
| ; SI-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI-STRICT-NEXT: s_mov_b32 s3, 0xf000 |
| ; SI-STRICT-NEXT: s_mov_b32 s2, 0 |
| ; SI-STRICT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-STRICT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[2:3] |
| ; SI-STRICT-NEXT: v_add_f64 v[2:3], v[4:5], v[2:3] |
| ; SI-STRICT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-STRICT-NEXT: s_endpgm |
| ; |
| ; SI-CONTRACT-LABEL: fadd_b_a_a_f64: |
| ; SI-CONTRACT: ; %bb.0: |
| ; SI-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s3, 0xf000 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s2, 0 |
| ; SI-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: v_fma_f64 v[2:3], v[2:3], 2.0, v[4:5] |
| ; SI-CONTRACT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-CONTRACT-NEXT: s_endpgm |
| ; |
| ; VI-STRICT-LABEL: fadd_b_a_a_f64: |
| ; VI-STRICT: ; %bb.0: |
| ; VI-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI-STRICT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; VI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-STRICT-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-STRICT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[2:3], v[0:1] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_u32_e32 v4, vcc, 8, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[4:5], v[4:5] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[2:3] |
| ; VI-STRICT-NEXT: v_add_f64 v[2:3], v[4:5], v[2:3] |
| ; VI-STRICT-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; VI-STRICT-NEXT: s_endpgm |
| ; |
| ; VI-CONTRACT-LABEL: fadd_b_a_a_f64: |
| ; VI-CONTRACT: ; %bb.0: |
| ; VI-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; VI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-CONTRACT-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v2, vcc, 8, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[4:5], v[0:1] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[2:3], v[2:3] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: v_fma_f64 v[2:3], v[4:5], 2.0, v[2:3] |
| ; VI-CONTRACT-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; VI-CONTRACT-NEXT: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone |
| %gep.0 = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| %gep.1 = getelementptr double, ptr addrspace(1) %gep.0, i32 1 |
| %gep.out = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| |
| %r0 = load volatile double, ptr addrspace(1) %gep.0 |
| %r1 = load volatile double, ptr addrspace(1) %gep.1 |
| |
| %add.0 = fadd double %r0, %r0 |
| %add.1 = fadd double %r1, %add.0 |
| store double %add.1, ptr addrspace(1) %gep.out |
| ret void |
| } |
| |
| define amdgpu_kernel void @mad_sub_f64(ptr addrspace(1) noalias nocapture %out, ptr addrspace(1) noalias nocapture readonly %ptr) #1 { |
| ; SI-STRICT-LABEL: mad_sub_f64: |
| ; SI-STRICT: ; %bb.0: |
| ; SI-STRICT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI-STRICT-NEXT: s_mov_b32 s7, 0xf000 |
| ; SI-STRICT-NEXT: s_mov_b32 s6, 0 |
| ; SI-STRICT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-STRICT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-STRICT-NEXT: s_mov_b64 s[4:5], s[2:3] |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64 offset:8 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:16 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: s_mov_b64 s[2:3], s[6:7] |
| ; SI-STRICT-NEXT: v_mul_f64 v[2:3], v[2:3], v[4:5] |
| ; SI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], -v[6:7] |
| ; SI-STRICT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-STRICT-NEXT: s_endpgm |
| ; |
| ; SI-CONTRACT-LABEL: mad_sub_f64: |
| ; SI-CONTRACT: ; %bb.0: |
| ; SI-CONTRACT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s7, 0xf000 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s6, 0 |
| ; SI-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-CONTRACT-NEXT: s_mov_b64 s[4:5], s[2:3] |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[4:7], 0 addr64 offset:8 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:16 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: s_mov_b64 s[2:3], s[6:7] |
| ; SI-CONTRACT-NEXT: v_fma_f64 v[2:3], v[2:3], v[4:5], -v[6:7] |
| ; SI-CONTRACT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-CONTRACT-NEXT: s_endpgm |
| ; |
| ; VI-STRICT-LABEL: mad_sub_f64: |
| ; VI-STRICT: ; %bb.0: |
| ; VI-STRICT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI-STRICT-NEXT: v_lshlrev_b32_e32 v6, 3, v0 |
| ; VI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-STRICT-NEXT: v_mov_b32_e32 v1, s3 |
| ; VI-STRICT-NEXT: v_add_u32_e32 v0, vcc, s2, v6 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: v_add_u32_e32 v2, vcc, 8, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[4:5], v[0:1] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[2:3], v[2:3] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_u32_e32 v0, vcc, 16, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_mul_f64 v[2:3], v[4:5], v[2:3] |
| ; VI-STRICT-NEXT: v_add_f64 v[0:1], v[2:3], -v[0:1] |
| ; VI-STRICT-NEXT: v_mov_b32_e32 v3, s1 |
| ; VI-STRICT-NEXT: v_add_u32_e32 v2, vcc, s0, v6 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc |
| ; VI-STRICT-NEXT: flat_store_dwordx2 v[2:3], v[0:1] |
| ; VI-STRICT-NEXT: s_endpgm |
| ; |
| ; VI-CONTRACT-LABEL: mad_sub_f64: |
| ; VI-CONTRACT: ; %bb.0: |
| ; VI-CONTRACT-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 |
| ; VI-CONTRACT-NEXT: v_lshlrev_b32_e32 v6, 3, v0 |
| ; VI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-CONTRACT-NEXT: v_mov_b32_e32 v1, s3 |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v0, vcc, s2, v6 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v2, vcc, 8, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v4, vcc, 16, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[2:3], v[2:3] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[4:5], v[4:5] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], -v[4:5] |
| ; VI-CONTRACT-NEXT: v_mov_b32_e32 v3, s1 |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v2, vcc, s0, v6 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc |
| ; VI-CONTRACT-NEXT: flat_store_dwordx2 v[2:3], v[0:1] |
| ; VI-CONTRACT-NEXT: s_endpgm |
| %tid = tail call i32 @llvm.amdgcn.workitem.id.x() #0 |
| %tid.ext = sext i32 %tid to i64 |
| %gep0 = getelementptr double, ptr addrspace(1) %ptr, i64 %tid.ext |
| %add1 = add i64 %tid.ext, 1 |
| %gep1 = getelementptr double, ptr addrspace(1) %ptr, i64 %add1 |
| %add2 = add i64 %tid.ext, 2 |
| %gep2 = getelementptr double, ptr addrspace(1) %ptr, i64 %add2 |
| %outgep = getelementptr double, ptr addrspace(1) %out, i64 %tid.ext |
| %a = load volatile double, ptr addrspace(1) %gep0, align 8 |
| %b = load volatile double, ptr addrspace(1) %gep1, align 8 |
| %c = load volatile double, ptr addrspace(1) %gep2, align 8 |
| %mul = fmul double %a, %b |
| %sub = fsub double %mul, %c |
| store double %sub, ptr addrspace(1) %outgep, align 8 |
| ret void |
| } |
| |
| define amdgpu_kernel void @fadd_a_a_b_f64_fast_add0(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 { |
| ; SI-STRICT-LABEL: fadd_a_a_b_f64_fast_add0: |
| ; SI-STRICT: ; %bb.0: |
| ; SI-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI-STRICT-NEXT: s_mov_b32 s3, 0xf000 |
| ; SI-STRICT-NEXT: s_mov_b32 s2, 0 |
| ; SI-STRICT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-STRICT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[2:3] |
| ; SI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[4:5] |
| ; SI-STRICT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-STRICT-NEXT: s_endpgm |
| ; |
| ; SI-CONTRACT-LABEL: fadd_a_a_b_f64_fast_add0: |
| ; SI-CONTRACT: ; %bb.0: |
| ; SI-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s3, 0xf000 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s2, 0 |
| ; SI-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: v_fma_f64 v[2:3], v[2:3], 2.0, v[4:5] |
| ; SI-CONTRACT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-CONTRACT-NEXT: s_endpgm |
| ; |
| ; VI-STRICT-LABEL: fadd_a_a_b_f64_fast_add0: |
| ; VI-STRICT: ; %bb.0: |
| ; VI-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI-STRICT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; VI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-STRICT-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-STRICT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[2:3], v[0:1] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_u32_e32 v4, vcc, 8, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[4:5], v[4:5] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[2:3] |
| ; VI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[4:5] |
| ; VI-STRICT-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; VI-STRICT-NEXT: s_endpgm |
| ; |
| ; VI-CONTRACT-LABEL: fadd_a_a_b_f64_fast_add0: |
| ; VI-CONTRACT: ; %bb.0: |
| ; VI-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; VI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-CONTRACT-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v2, vcc, 8, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[4:5], v[0:1] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[2:3], v[2:3] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: v_fma_f64 v[2:3], v[4:5], 2.0, v[2:3] |
| ; VI-CONTRACT-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; VI-CONTRACT-NEXT: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone |
| %gep.0 = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| %gep.1 = getelementptr double, ptr addrspace(1) %gep.0, i32 1 |
| %gep.out = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| |
| %r0 = load volatile double, ptr addrspace(1) %gep.0 |
| %r1 = load volatile double, ptr addrspace(1) %gep.1 |
| |
| %add.0 = fadd fast double %r0, %r0 |
| %add.1 = fadd double %add.0, %r1 |
| store double %add.1, ptr addrspace(1) %gep.out |
| ret void |
| } |
| |
| define amdgpu_kernel void @fadd_a_a_b_f64_fast_add1(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 { |
| ; SI-STRICT-LABEL: fadd_a_a_b_f64_fast_add1: |
| ; SI-STRICT: ; %bb.0: |
| ; SI-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI-STRICT-NEXT: s_mov_b32 s3, 0xf000 |
| ; SI-STRICT-NEXT: s_mov_b32 s2, 0 |
| ; SI-STRICT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-STRICT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8 glc |
| ; SI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[2:3] |
| ; SI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[4:5] |
| ; SI-STRICT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-STRICT-NEXT: s_endpgm |
| ; |
| ; SI-CONTRACT-LABEL: fadd_a_a_b_f64_fast_add1: |
| ; SI-CONTRACT: ; %bb.0: |
| ; SI-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s3, 0xf000 |
| ; SI-CONTRACT-NEXT: s_mov_b32 s2, 0 |
| ; SI-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-CONTRACT-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8 glc |
| ; SI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; SI-CONTRACT-NEXT: v_fma_f64 v[2:3], v[2:3], 2.0, v[4:5] |
| ; SI-CONTRACT-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-CONTRACT-NEXT: s_endpgm |
| ; |
| ; VI-STRICT-LABEL: fadd_a_a_b_f64_fast_add1: |
| ; VI-STRICT: ; %bb.0: |
| ; VI-STRICT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI-STRICT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; VI-STRICT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-STRICT-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-STRICT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[2:3], v[0:1] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_u32_e32 v4, vcc, 8, v0 |
| ; VI-STRICT-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc |
| ; VI-STRICT-NEXT: flat_load_dwordx2 v[4:5], v[4:5] glc |
| ; VI-STRICT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[2:3] |
| ; VI-STRICT-NEXT: v_add_f64 v[2:3], v[2:3], v[4:5] |
| ; VI-STRICT-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; VI-STRICT-NEXT: s_endpgm |
| ; |
| ; VI-CONTRACT-LABEL: fadd_a_a_b_f64_fast_add1: |
| ; VI-CONTRACT: ; %bb.0: |
| ; VI-CONTRACT-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI-CONTRACT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; VI-CONTRACT-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-CONTRACT-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: v_add_u32_e32 v2, vcc, 8, v0 |
| ; VI-CONTRACT-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[4:5], v[0:1] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: flat_load_dwordx2 v[2:3], v[2:3] glc |
| ; VI-CONTRACT-NEXT: s_waitcnt vmcnt(0) |
| ; VI-CONTRACT-NEXT: v_fma_f64 v[2:3], v[4:5], 2.0, v[2:3] |
| ; VI-CONTRACT-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; VI-CONTRACT-NEXT: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone |
| %gep.0 = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| %gep.1 = getelementptr double, ptr addrspace(1) %gep.0, i32 1 |
| %gep.out = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| |
| %r0 = load volatile double, ptr addrspace(1) %gep.0 |
| %r1 = load volatile double, ptr addrspace(1) %gep.1 |
| |
| %add.0 = fadd double %r0, %r0 |
| %add.1 = fadd fast double %add.0, %r1 |
| store double %add.1, ptr addrspace(1) %gep.out |
| ret void |
| } |
| |
| define amdgpu_kernel void @fadd_a_a_b_f64_fast(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 { |
| ; SI-LABEL: fadd_a_a_b_f64_fast: |
| ; SI: ; %bb.0: |
| ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 |
| ; SI-NEXT: s_mov_b32 s3, 0xf000 |
| ; SI-NEXT: s_mov_b32 s2, 0 |
| ; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; SI-NEXT: v_mov_b32_e32 v1, 0 |
| ; SI-NEXT: s_waitcnt lgkmcnt(0) |
| ; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 glc |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: buffer_load_dwordx2 v[4:5], v[0:1], s[0:3], 0 addr64 offset:8 glc |
| ; SI-NEXT: s_waitcnt vmcnt(0) |
| ; SI-NEXT: v_fma_f64 v[2:3], v[2:3], 2.0, v[4:5] |
| ; SI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[0:3], 0 addr64 |
| ; SI-NEXT: s_endpgm |
| ; |
| ; VI-LABEL: fadd_a_a_b_f64_fast: |
| ; VI: ; %bb.0: |
| ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 |
| ; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 |
| ; VI-NEXT: s_waitcnt lgkmcnt(0) |
| ; VI-NEXT: v_mov_b32_e32 v1, s1 |
| ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 |
| ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc |
| ; VI-NEXT: v_add_u32_e32 v2, vcc, 8, v0 |
| ; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc |
| ; VI-NEXT: flat_load_dwordx2 v[4:5], v[0:1] glc |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3] glc |
| ; VI-NEXT: s_waitcnt vmcnt(0) |
| ; VI-NEXT: v_fma_f64 v[2:3], v[4:5], 2.0, v[2:3] |
| ; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3] |
| ; VI-NEXT: s_endpgm |
| %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone |
| %gep.0 = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| %gep.1 = getelementptr double, ptr addrspace(1) %gep.0, i32 1 |
| %gep.out = getelementptr double, ptr addrspace(1) %out, i32 %tid |
| |
| %r0 = load volatile double, ptr addrspace(1) %gep.0 |
| %r1 = load volatile double, ptr addrspace(1) %gep.1 |
| |
| %add.0 = fadd fast double %r0, %r0 |
| %add.1 = fadd fast double %add.0, %r1 |
| store double %add.1, ptr addrspace(1) %gep.out |
| ret void |
| } |
| |
| declare i32 @llvm.amdgcn.workitem.id.x() #1 |
| declare double @llvm.fmuladd.f64(double, double, double) #1 |
| |
| attributes #0 = { nounwind } |
| attributes #1 = { nounwind readnone } |