|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc -mtriple=amdgcn -mcpu=gfx90a < %s | FileCheck %s | 
|  |  | 
|  | define protected amdgpu_kernel void @InferNothing(i32 %a, ptr %b, double %c) { | 
|  | ; CHECK-LABEL: InferNothing: | 
|  | ; CHECK:       ; %bb.0: ; %entry | 
|  | ; CHECK-NEXT:    s_load_dword s6, s[4:5], 0x24 | 
|  | ; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c | 
|  | ; CHECK-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_ashr_i32 s7, s6, 31 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v2, s2 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v3, s3 | 
|  | ; CHECK-NEXT:    s_lshl_b64 s[2:3], s[6:7], 3 | 
|  | ; CHECK-NEXT:    s_add_u32 s0, s2, s0 | 
|  | ; CHECK-NEXT:    s_addc_u32 s1, s3, s1 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v1, s1 | 
|  | ; CHECK-NEXT:    v_add_co_u32_e64 v0, vcc, -8, s0 | 
|  | ; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, -1, v1, vcc | 
|  | ; CHECK-NEXT:    flat_atomic_add_f64 v[0:1], v[2:3] | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    buffer_wbinvl1_vol | 
|  | ; CHECK-NEXT:    s_endpgm | 
|  | entry: | 
|  | %i = add nsw i32 %a, -1 | 
|  | %i.2 = sext i32 %i to i64 | 
|  | %i.3 = getelementptr inbounds double, ptr %b, i64 %i.2 | 
|  | %i.4 = atomicrmw fadd ptr %i.3, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define protected amdgpu_kernel void @InferFadd(i32 %a, ptr addrspace(1) %b, double %c) { | 
|  | ; CHECK-LABEL: InferFadd: | 
|  | ; CHECK:       ; %bb.0: ; %entry | 
|  | ; CHECK-NEXT:    s_load_dword s6, s[4:5], 0x24 | 
|  | ; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c | 
|  | ; CHECK-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_ashr_i32 s7, s6, 31 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v2, s2 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v3, s3 | 
|  | ; CHECK-NEXT:    s_lshl_b64 s[2:3], s[6:7], 3 | 
|  | ; CHECK-NEXT:    s_add_u32 s0, s0, s2 | 
|  | ; CHECK-NEXT:    s_addc_u32 s1, s1, s3 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v1, s1 | 
|  | ; CHECK-NEXT:    v_add_co_u32_e64 v0, vcc, -8, s0 | 
|  | ; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, -1, v1, vcc | 
|  | ; CHECK-NEXT:    flat_atomic_add_f64 v[0:1], v[2:3] | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    buffer_wbinvl1_vol | 
|  | ; CHECK-NEXT:    s_endpgm | 
|  | entry: | 
|  | %i = add nsw i32 %a, -1 | 
|  | %i.2 = sext i32 %i to i64 | 
|  | %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2 | 
|  | %i.4 = addrspacecast ptr addrspace(1) %i.3 to ptr | 
|  | %0 = atomicrmw fadd ptr %i.4, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define protected amdgpu_kernel void @InferMixed(i32 %a, ptr addrspace(1) %b, double %c, ptr %d) { | 
|  | ; CHECK-LABEL: InferMixed: | 
|  | ; CHECK:       ; %bb.0: ; %entry | 
|  | ; CHECK-NEXT:    s_load_dword s6, s[4:5], 0x24 | 
|  | ; CHECK-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0x3c | 
|  | ; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c | 
|  | ; CHECK-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_ashr_i32 s7, s6, 31 | 
|  | ; CHECK-NEXT:    v_pk_mov_b32 v[0:1], s[8:9], s[8:9] op_sel:[0,1] | 
|  | ; CHECK-NEXT:    v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] | 
|  | ; CHECK-NEXT:    s_lshl_b64 s[2:3], s[6:7], 3 | 
|  | ; CHECK-NEXT:    s_add_u32 s0, s0, s2 | 
|  | ; CHECK-NEXT:    s_addc_u32 s1, s1, s3 | 
|  | ; CHECK-NEXT:    flat_atomic_add_f64 v[0:1], v[2:3] | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    buffer_wbinvl1_vol | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v1, s1 | 
|  | ; CHECK-NEXT:    v_add_co_u32_e64 v0, vcc, -7, s0 | 
|  | ; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, -1, v1, vcc | 
|  | ; CHECK-NEXT:    flat_atomic_add_f64 v[0:1], v[2:3] | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0) | 
|  | ; CHECK-NEXT:    buffer_wbinvl1_vol | 
|  | ; CHECK-NEXT:    s_endpgm | 
|  | entry: | 
|  | %i = add nsw i32 %a, -1 | 
|  | %i.2 = sext i32 %i to i64 | 
|  | %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2 | 
|  | br label %bb1 | 
|  |  | 
|  | bb1:                                              ; preds = %entry | 
|  | %i.7 = ptrtoint ptr addrspace(1) %i.3 to i64 | 
|  | %i.8 = add nsw i64 %i.7, 1 | 
|  | %i.9 = inttoptr i64 %i.8 to ptr addrspace(1) | 
|  | %0 = atomicrmw fadd ptr %d, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0 | 
|  | %i.11 = addrspacecast ptr addrspace(1) %i.9 to ptr | 
|  | %1 = atomicrmw fadd ptr %i.11, double %c syncscope("agent") seq_cst, align 8, !noalias.addrspace !1, !amdgpu.no.fine.grained.memory !0 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define protected amdgpu_kernel void @InferPHI(i32 %a, ptr addrspace(1) %b, double %c) { | 
|  | ; CHECK-LABEL: InferPHI: | 
|  | ; CHECK:       ; %bb.0: ; %entry | 
|  | ; CHECK-NEXT:    s_mov_b32 s12, SCRATCH_RSRC_DWORD0 | 
|  | ; CHECK-NEXT:    s_mov_b32 s13, SCRATCH_RSRC_DWORD1 | 
|  | ; CHECK-NEXT:    s_load_dword s6, s[4:5], 0x24 | 
|  | ; CHECK-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x2c | 
|  | ; CHECK-NEXT:    s_mov_b32 s14, -1 | 
|  | ; CHECK-NEXT:    s_mov_b32 s15, 0xe00000 | 
|  | ; CHECK-NEXT:    s_add_u32 s12, s12, s11 | 
|  | ; CHECK-NEXT:    s_addc_u32 s13, s13, 0 | 
|  | ; CHECK-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_ashr_i32 s7, s6, 31 | 
|  | ; CHECK-NEXT:    s_lshl_b64 s[4:5], s[6:7], 3 | 
|  | ; CHECK-NEXT:    s_add_u32 s0, s0, s4 | 
|  | ; CHECK-NEXT:    s_addc_u32 s1, s1, s5 | 
|  | ; CHECK-NEXT:    s_add_u32 s4, s0, -8 | 
|  | ; CHECK-NEXT:    s_addc_u32 s5, s1, -1 | 
|  | ; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 9 | 
|  | ; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0 | 
|  | ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1] | 
|  | ; CHECK-NEXT:    v_cmp_ne_u32_e64 s[0:1], 1, v0 | 
|  | ; CHECK-NEXT:  .LBB3_1: ; %bb0 | 
|  | ; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1 | 
|  | ; CHECK-NEXT:    s_and_b64 vcc, exec, s[0:1] | 
|  | ; CHECK-NEXT:    s_cbranch_vccnz .LBB3_1 | 
|  | ; CHECK-NEXT:  ; %bb.2: ; %bb1 | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], src_shared_base | 
|  | ; CHECK-NEXT:    s_cmp_eq_u32 s5, s1 | 
|  | ; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0 | 
|  | ; CHECK-NEXT:    s_andn2_b64 vcc, exec, s[0:1] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], -1 | 
|  | ; CHECK-NEXT:    s_cbranch_vccnz .LBB3_5 | 
|  | ; CHECK-NEXT:  ; %bb.3: ; %Flow6 | 
|  | ; CHECK-NEXT:    s_andn2_b64 vcc, exec, s[0:1] | 
|  | ; CHECK-NEXT:    s_cbranch_vccz .LBB3_10 | 
|  | ; CHECK-NEXT:  .LBB3_4: ; %atomicrmw.phi | 
|  | ; CHECK-NEXT:    s_endpgm | 
|  | ; CHECK-NEXT:  .LBB3_5: ; %atomicrmw.check.private | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], src_private_base | 
|  | ; CHECK-NEXT:    s_cmp_eq_u32 s5, s1 | 
|  | ; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0 | 
|  | ; CHECK-NEXT:    s_andn2_b64 vcc, exec, s[0:1] | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], -1 | 
|  | ; CHECK-NEXT:    s_cbranch_vccz .LBB3_7 | 
|  | ; CHECK-NEXT:  ; %bb.6: ; %atomicrmw.global | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v0, 0 | 
|  | ; CHECK-NEXT:    v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] | 
|  | ; CHECK-NEXT:    global_atomic_add_f64 v0, v[2:3], s[4:5] | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; CHECK-NEXT:    buffer_wbinvl1_vol | 
|  | ; CHECK-NEXT:    s_mov_b64 s[0:1], 0 | 
|  | ; CHECK-NEXT:  .LBB3_7: ; %Flow | 
|  | ; CHECK-NEXT:    s_andn2_b64 vcc, exec, s[0:1] | 
|  | ; CHECK-NEXT:    s_cbranch_vccnz .LBB3_9 | 
|  | ; CHECK-NEXT:  ; %bb.8: ; %atomicrmw.private | 
|  | ; CHECK-NEXT:    s_cmp_lg_u64 s[4:5], 0 | 
|  | ; CHECK-NEXT:    s_cselect_b32 s0, s4, -1 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v2, s0 | 
|  | ; CHECK-NEXT:    buffer_load_dword v0, v2, s[12:15], 0 offen | 
|  | ; CHECK-NEXT:    buffer_load_dword v1, v2, s[12:15], 0 offen offset:4 | 
|  | ; CHECK-NEXT:    s_waitcnt vmcnt(0) | 
|  | ; CHECK-NEXT:    v_add_f64 v[0:1], v[0:1], s[2:3] | 
|  | ; CHECK-NEXT:    buffer_store_dword v0, v2, s[12:15], 0 offen | 
|  | ; CHECK-NEXT:    buffer_store_dword v1, v2, s[12:15], 0 offen offset:4 | 
|  | ; CHECK-NEXT:  .LBB3_9: ; %Flow5 | 
|  | ; CHECK-NEXT:    s_cbranch_execnz .LBB3_4 | 
|  | ; CHECK-NEXT:  .LBB3_10: ; %atomicrmw.shared | 
|  | ; CHECK-NEXT:    s_cmp_lg_u64 s[4:5], 0 | 
|  | ; CHECK-NEXT:    s_cselect_b32 s0, s4, -1 | 
|  | ; CHECK-NEXT:    v_mov_b32_e32 v0, s0 | 
|  | ; CHECK-NEXT:    v_pk_mov_b32 v[2:3], s[2:3], s[2:3] op_sel:[0,1] | 
|  | ; CHECK-NEXT:    ds_add_f64 v0, v[2:3] | 
|  | ; CHECK-NEXT:    s_waitcnt lgkmcnt(0) | 
|  | ; CHECK-NEXT:    s_endpgm | 
|  | entry: | 
|  | %i = add nsw i32 %a, -1 | 
|  | %i.2 = sext i32 %i to i64 | 
|  | %i.3 = getelementptr inbounds double, ptr addrspace(1) %b, i64 %i.2 | 
|  | %i.4 = ptrtoint ptr addrspace(1) %i.3 to i64 | 
|  | br label %bb0 | 
|  |  | 
|  | bb0:                                              ; preds = %bb0, %entry | 
|  | %phi = phi ptr addrspace(1) [ %i.3, %entry ], [ %i.9, %bb0 ] | 
|  | %i.7 = ptrtoint ptr addrspace(1) %phi to i64 | 
|  | %i.8 = sub nsw i64 %i.7, 1 | 
|  | %cmp2 = icmp eq i64 %i.8, 0 | 
|  | %i.9 = inttoptr i64 %i.7 to ptr addrspace(1) | 
|  | br i1 %cmp2, label %bb1, label %bb0 | 
|  |  | 
|  | bb1:                                              ; preds = %bb0 | 
|  | %i.10 = addrspacecast ptr addrspace(1) %i.9 to ptr | 
|  | %0 = atomicrmw fadd ptr %i.10, double %c syncscope("agent") seq_cst, align 8, !amdgpu.no.fine.grained.memory !0 | 
|  | ret void | 
|  | } | 
|  |  | 
|  | attributes #0 = { nocallback nofree nounwind willreturn memory(argmem: readwrite) } | 
|  | attributes #1 = { mustprogress nounwind willreturn memory(argmem: readwrite) "target-cpu"="gfx90a" } | 
|  |  | 
|  | !0 = !{} | 
|  | !1 = !{i32 5, i32 6} |