blob: d3e591634503faa7b11533d315f74ee79a420ade [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -passes=amdgpu-atomic-optimizer %s | FileCheck %s
define amdgpu_kernel void @uniform_or_i8(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, i8 %val) {
; CHECK-LABEL: define amdgpu_kernel void @uniform_or_i8(
; CHECK-SAME: ptr addrspace(1) [[RESULT:%.*]], ptr addrspace(1) [[UNIFORM_PTR:%.*]], i8 [[VAL:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP1]], 32
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP2]], i32 0)
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TMP7]], label %[[BB8:.*]], label %[[BB10:.*]]
; CHECK: [[BB8]]:
; CHECK-NEXT: [[TMP9:%.*]] = atomicrmw or ptr addrspace(1) [[UNIFORM_PTR]], i8 [[VAL]] monotonic, align 1
; CHECK-NEXT: br label %[[BB10]]
; CHECK: [[BB10]]:
; CHECK-NEXT: [[TMP11:%.*]] = phi i8 [ poison, [[TMP0:%.*]] ], [ [[TMP9]], %[[BB8]] ]
; CHECK-NEXT: [[TMP16:%.*]] = zext i8 [[TMP11]] to i32
; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP16]])
; CHECK-NEXT: [[TMP12:%.*]] = trunc i32 [[TMP17]] to i8
; CHECK-NEXT: [[TMP13:%.*]] = trunc i32 [[TMP6]] to i8
; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP7]], i8 0, i8 [[VAL]]
; CHECK-NEXT: [[TMP15:%.*]] = or i8 [[TMP12]], [[TMP14]]
; CHECK-NEXT: store i8 [[TMP15]], ptr addrspace(1) [[RESULT]], align 1
; CHECK-NEXT: ret void
;
%rmw = atomicrmw or ptr addrspace(1) %uniform.ptr, i8 %val monotonic, align 1
store i8 %rmw, ptr addrspace(1) %result
ret void
}
define amdgpu_kernel void @uniform_add_i8(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, i8 %val) {
; CHECK-LABEL: define amdgpu_kernel void @uniform_add_i8(
; CHECK-SAME: ptr addrspace(1) [[RESULT:%.*]], ptr addrspace(1) [[UNIFORM_PTR:%.*]], i8 [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP1]], 32
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP2]], i32 0)
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP1]])
; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i8
; CHECK-NEXT: [[TMP9:%.*]] = mul i8 [[VAL]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TMP10]], label %[[BB11:.*]], label %[[BB13:.*]]
; CHECK: [[BB11]]:
; CHECK-NEXT: [[TMP12:%.*]] = atomicrmw add ptr addrspace(1) [[UNIFORM_PTR]], i8 [[TMP9]] monotonic, align 1
; CHECK-NEXT: br label %[[BB13]]
; CHECK: [[BB13]]:
; CHECK-NEXT: [[TMP14:%.*]] = phi i8 [ poison, [[TMP0:%.*]] ], [ [[TMP12]], %[[BB11]] ]
; CHECK-NEXT: [[TMP19:%.*]] = zext i8 [[TMP14]] to i32
; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]])
; CHECK-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP20]] to i8
; CHECK-NEXT: [[TMP16:%.*]] = trunc i32 [[TMP6]] to i8
; CHECK-NEXT: [[TMP17:%.*]] = mul i8 [[VAL]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = add i8 [[TMP15]], [[TMP17]]
; CHECK-NEXT: store i8 [[TMP18]], ptr addrspace(1) [[RESULT]], align 1
; CHECK-NEXT: ret void
;
%rmw = atomicrmw add ptr addrspace(1) %uniform.ptr, i8 %val monotonic, align 1
store i8 %rmw, ptr addrspace(1) %result
ret void
}
define amdgpu_kernel void @uniform_xchg_i8(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, i8 %val) {
; CHECK-LABEL: define amdgpu_kernel void @uniform_xchg_i8(
; CHECK-SAME: ptr addrspace(1) [[RESULT:%.*]], ptr addrspace(1) [[UNIFORM_PTR:%.*]], i8 [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[RMW:%.*]] = atomicrmw xchg ptr addrspace(1) [[UNIFORM_PTR]], i8 [[VAL]] monotonic, align 1
; CHECK-NEXT: store i8 [[RMW]], ptr addrspace(1) [[RESULT]], align 1
; CHECK-NEXT: ret void
;
%rmw = atomicrmw xchg ptr addrspace(1) %uniform.ptr, i8 %val monotonic, align 1
store i8 %rmw, ptr addrspace(1) %result
ret void
}
define amdgpu_kernel void @uniform_or_i16(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, i16 %val) {
; CHECK-LABEL: define amdgpu_kernel void @uniform_or_i16(
; CHECK-SAME: ptr addrspace(1) [[RESULT:%.*]], ptr addrspace(1) [[UNIFORM_PTR:%.*]], i16 [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP1]], 32
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP2]], i32 0)
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TMP7]], label %[[BB8:.*]], label %[[BB10:.*]]
; CHECK: [[BB8]]:
; CHECK-NEXT: [[TMP9:%.*]] = atomicrmw or ptr addrspace(1) [[UNIFORM_PTR]], i16 [[VAL]] monotonic, align 2
; CHECK-NEXT: br label %[[BB10]]
; CHECK: [[BB10]]:
; CHECK-NEXT: [[TMP11:%.*]] = phi i16 [ poison, [[TMP0:%.*]] ], [ [[TMP9]], %[[BB8]] ]
; CHECK-NEXT: [[TMP16:%.*]] = zext i16 [[TMP11]] to i32
; CHECK-NEXT: [[TMP17:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP16]])
; CHECK-NEXT: [[TMP12:%.*]] = trunc i32 [[TMP17]] to i16
; CHECK-NEXT: [[TMP13:%.*]] = trunc i32 [[TMP6]] to i16
; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP7]], i16 0, i16 [[VAL]]
; CHECK-NEXT: [[TMP15:%.*]] = or i16 [[TMP12]], [[TMP14]]
; CHECK-NEXT: store i16 [[TMP15]], ptr addrspace(1) [[RESULT]], align 2
; CHECK-NEXT: ret void
;
%rmw = atomicrmw or ptr addrspace(1) %uniform.ptr, i16 %val monotonic, align 2
store i16 %rmw, ptr addrspace(1) %result
ret void
}
define amdgpu_kernel void @uniform_add_i16(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, i16 %val) {
; CHECK-LABEL: define amdgpu_kernel void @uniform_add_i16(
; CHECK-SAME: ptr addrspace(1) [[RESULT:%.*]], ptr addrspace(1) [[UNIFORM_PTR:%.*]], i16 [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32
; CHECK-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP1]], 32
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP2]], i32 0)
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP4]], i32 [[TMP5]])
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP1]])
; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i16
; CHECK-NEXT: [[TMP9:%.*]] = mul i16 [[VAL]], [[TMP8]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TMP10]], label %[[BB11:.*]], label %[[BB13:.*]]
; CHECK: [[BB11]]:
; CHECK-NEXT: [[TMP12:%.*]] = atomicrmw add ptr addrspace(1) [[UNIFORM_PTR]], i16 [[TMP9]] monotonic, align 2
; CHECK-NEXT: br label %[[BB13]]
; CHECK: [[BB13]]:
; CHECK-NEXT: [[TMP14:%.*]] = phi i16 [ poison, [[TMP0:%.*]] ], [ [[TMP12]], %[[BB11]] ]
; CHECK-NEXT: [[TMP19:%.*]] = zext i16 [[TMP14]] to i32
; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.amdgcn.readfirstlane.i32(i32 [[TMP19]])
; CHECK-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP20]] to i16
; CHECK-NEXT: [[TMP16:%.*]] = trunc i32 [[TMP6]] to i16
; CHECK-NEXT: [[TMP17:%.*]] = mul i16 [[VAL]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = add i16 [[TMP15]], [[TMP17]]
; CHECK-NEXT: store i16 [[TMP18]], ptr addrspace(1) [[RESULT]], align 2
; CHECK-NEXT: ret void
;
%rmw = atomicrmw add ptr addrspace(1) %uniform.ptr, i16 %val monotonic, align 2
store i16 %rmw, ptr addrspace(1) %result
ret void
}
define amdgpu_kernel void @uniform_xchg_i16(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, i16 %val) {
; CHECK-LABEL: define amdgpu_kernel void @uniform_xchg_i16(
; CHECK-SAME: ptr addrspace(1) [[RESULT:%.*]], ptr addrspace(1) [[UNIFORM_PTR:%.*]], i16 [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[RMW:%.*]] = atomicrmw xchg ptr addrspace(1) [[UNIFORM_PTR]], i16 [[VAL]] monotonic, align 2
; CHECK-NEXT: store i16 [[RMW]], ptr addrspace(1) [[RESULT]], align 2
; CHECK-NEXT: ret void
;
%rmw = atomicrmw xchg ptr addrspace(1) %uniform.ptr, i16 %val monotonic, align 2
store i16 %rmw, ptr addrspace(1) %result
ret void
}
define amdgpu_kernel void @uniform_fadd_f16(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, half %val) {
; CHECK-LABEL: define amdgpu_kernel void @uniform_fadd_f16(
; CHECK-SAME: ptr addrspace(1) [[RESULT:%.*]], ptr addrspace(1) [[UNIFORM_PTR:%.*]], half [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[RMW:%.*]] = atomicrmw fadd ptr addrspace(1) [[UNIFORM_PTR]], half [[VAL]] monotonic, align 2
; CHECK-NEXT: store half [[RMW]], ptr addrspace(1) [[RESULT]], align 2
; CHECK-NEXT: ret void
;
%rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, half %val monotonic, align 2
store half %rmw, ptr addrspace(1) %result
ret void
}
define amdgpu_kernel void @uniform_fadd_bf16(ptr addrspace(1) %result, ptr addrspace(1) %uniform.ptr, bfloat %val) {
; CHECK-LABEL: define amdgpu_kernel void @uniform_fadd_bf16(
; CHECK-SAME: ptr addrspace(1) [[RESULT:%.*]], ptr addrspace(1) [[UNIFORM_PTR:%.*]], bfloat [[VAL:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[RMW:%.*]] = atomicrmw fadd ptr addrspace(1) [[UNIFORM_PTR]], bfloat [[VAL]] monotonic, align 2
; CHECK-NEXT: store bfloat [[RMW]], ptr addrspace(1) [[RESULT]], align 2
; CHECK-NEXT: ret void
;
%rmw = atomicrmw fadd ptr addrspace(1) %uniform.ptr, bfloat %val monotonic, align 2
store bfloat %rmw, ptr addrspace(1) %result
ret void
}