blob: 60b3cf40da97bf6b071013b2c8c6c01c3ecf15b6 [file] [log] [blame]
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/* based on pieces from si_pipe.c and radeon_llvm_emit.c */
#include "ac_llvm_build.h"
#include "ac_nir.h"
#include "ac_llvm_util.h"
#include "ac_shader_util.h"
#include "c11/threads.h"
#include "shader_enums.h"
#include "sid.h"
#include "util/bitscan.h"
#include "util/macros.h"
#include "util/u_atomic.h"
#include "util/u_math.h"
#include <llvm-c/Core.h>
#include <llvm/Config/llvm-config.h>
#include <assert.h>
#include <stdio.h>
#define AC_LLVM_INITIAL_CF_DEPTH 4
/* Data for if/else/endif and bgnloop/endloop control flow structures.
*/
struct ac_llvm_flow {
/* Loop exit or next part of if/else/endif. */
LLVMBasicBlockRef next_block;
LLVMBasicBlockRef loop_entry_block;
};
/* Initialize module-independent parts of the context.
*
* The caller is responsible for initializing ctx::module and ctx::builder.
*/
void ac_llvm_context_init(struct ac_llvm_context *ctx, struct ac_llvm_compiler *compiler,
enum amd_gfx_level gfx_level, enum radeon_family family,
bool has_3d_cube_border_color_mipmap,
enum ac_float_mode float_mode, unsigned wave_size,
unsigned ballot_mask_bits)
{
ctx->context = LLVMContextCreate();
ctx->gfx_level = gfx_level;
ctx->family = family;
ctx->has_3d_cube_border_color_mipmap = has_3d_cube_border_color_mipmap;
ctx->wave_size = wave_size;
ctx->ballot_mask_bits = ballot_mask_bits;
ctx->float_mode = float_mode;
ctx->module = ac_create_module(compiler->tm, ctx->context);
ctx->builder = ac_create_builder(ctx->context, float_mode);
ctx->voidt = LLVMVoidTypeInContext(ctx->context);
ctx->i1 = LLVMInt1TypeInContext(ctx->context);
ctx->i8 = LLVMInt8TypeInContext(ctx->context);
ctx->i16 = LLVMIntTypeInContext(ctx->context, 16);
ctx->i32 = LLVMIntTypeInContext(ctx->context, 32);
ctx->i64 = LLVMIntTypeInContext(ctx->context, 64);
ctx->i128 = LLVMIntTypeInContext(ctx->context, 128);
ctx->intptr = ctx->i32;
ctx->f16 = LLVMHalfTypeInContext(ctx->context);
ctx->f32 = LLVMFloatTypeInContext(ctx->context);
ctx->f64 = LLVMDoubleTypeInContext(ctx->context);
ctx->v2i16 = LLVMVectorType(ctx->i16, 2);
ctx->v4i16 = LLVMVectorType(ctx->i16, 4);
ctx->v2f16 = LLVMVectorType(ctx->f16, 2);
ctx->v4f16 = LLVMVectorType(ctx->f16, 4);
ctx->v2i32 = LLVMVectorType(ctx->i32, 2);
ctx->v3i32 = LLVMVectorType(ctx->i32, 3);
ctx->v4i32 = LLVMVectorType(ctx->i32, 4);
ctx->v2f32 = LLVMVectorType(ctx->f32, 2);
ctx->v3f32 = LLVMVectorType(ctx->f32, 3);
ctx->v4f32 = LLVMVectorType(ctx->f32, 4);
ctx->v8i32 = LLVMVectorType(ctx->i32, 8);
ctx->iN_wavemask = LLVMIntTypeInContext(ctx->context, ctx->wave_size);
ctx->iN_ballotmask = LLVMIntTypeInContext(ctx->context, ballot_mask_bits);
ctx->i8_0 = LLVMConstInt(ctx->i8, 0, false);
ctx->i8_1 = LLVMConstInt(ctx->i8, 1, false);
ctx->i16_0 = LLVMConstInt(ctx->i16, 0, false);
ctx->i16_1 = LLVMConstInt(ctx->i16, 1, false);
ctx->i32_0 = LLVMConstInt(ctx->i32, 0, false);
ctx->i32_1 = LLVMConstInt(ctx->i32, 1, false);
ctx->i64_0 = LLVMConstInt(ctx->i64, 0, false);
ctx->i64_1 = LLVMConstInt(ctx->i64, 1, false);
ctx->i128_0 = LLVMConstInt(ctx->i128, 0, false);
ctx->i128_1 = LLVMConstInt(ctx->i128, 1, false);
ctx->f16_0 = LLVMConstReal(ctx->f16, 0.0);
ctx->f16_1 = LLVMConstReal(ctx->f16, 1.0);
ctx->f32_0 = LLVMConstReal(ctx->f32, 0.0);
ctx->f32_1 = LLVMConstReal(ctx->f32, 1.0);
ctx->f64_0 = LLVMConstReal(ctx->f64, 0.0);
ctx->f64_1 = LLVMConstReal(ctx->f64, 1.0);
ctx->i1false = LLVMConstInt(ctx->i1, 0, false);
ctx->i1true = LLVMConstInt(ctx->i1, 1, false);
ctx->range_md_kind = LLVMGetMDKindIDInContext(ctx->context, "range", 5);
ctx->invariant_load_md_kind = LLVMGetMDKindIDInContext(ctx->context, "invariant.load", 14);
ctx->uniform_md_kind = LLVMGetMDKindIDInContext(ctx->context, "amdgpu.uniform", 14);
ctx->empty_md = LLVMMDNodeInContext(ctx->context, NULL, 0);
ctx->flow = calloc(1, sizeof(*ctx->flow));
}
void ac_llvm_context_dispose(struct ac_llvm_context *ctx)
{
free(ctx->flow->stack);
free(ctx->flow);
ctx->flow = NULL;
}
int ac_get_llvm_num_components(LLVMValueRef value)
{
LLVMTypeRef type = LLVMTypeOf(value);
unsigned num_components =
LLVMGetTypeKind(type) == LLVMVectorTypeKind ? LLVMGetVectorSize(type) : 1;
return num_components;
}
LLVMValueRef ac_llvm_extract_elem(struct ac_llvm_context *ac, LLVMValueRef value, int index)
{
if (LLVMGetTypeKind(LLVMTypeOf(value)) != LLVMVectorTypeKind) {
assert(index == 0);
return value;
}
return LLVMBuildExtractElement(ac->builder, value, LLVMConstInt(ac->i32, index, false), "");
}
int ac_get_elem_bits(struct ac_llvm_context *ctx, LLVMTypeRef type)
{
if (LLVMGetTypeKind(type) == LLVMVectorTypeKind)
type = LLVMGetElementType(type);
if (LLVMGetTypeKind(type) == LLVMIntegerTypeKind)
return LLVMGetIntTypeWidth(type);
if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
if (LLVMGetPointerAddressSpace(type) == AC_ADDR_SPACE_LDS)
return 32;
}
if (type == ctx->f16)
return 16;
if (type == ctx->f32)
return 32;
if (type == ctx->f64)
return 64;
unreachable("Unhandled type kind in get_elem_bits");
}
unsigned ac_get_type_size(LLVMTypeRef type)
{
LLVMTypeKind kind = LLVMGetTypeKind(type);
switch (kind) {
case LLVMIntegerTypeKind:
return LLVMGetIntTypeWidth(type) / 8;
case LLVMHalfTypeKind:
return 2;
case LLVMFloatTypeKind:
return 4;
case LLVMDoubleTypeKind:
return 8;
case LLVMPointerTypeKind:
if (LLVMGetPointerAddressSpace(type) == AC_ADDR_SPACE_CONST_32BIT)
return 4;
return 8;
case LLVMVectorTypeKind:
return LLVMGetVectorSize(type) * ac_get_type_size(LLVMGetElementType(type));
case LLVMArrayTypeKind:
return LLVMGetArrayLength(type) * ac_get_type_size(LLVMGetElementType(type));
default:
assert(0);
return 0;
}
}
static LLVMTypeRef to_integer_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
{
if (t == ctx->i1)
return ctx->i1;
else if (t == ctx->i8)
return ctx->i8;
else if (t == ctx->f16 || t == ctx->i16)
return ctx->i16;
else if (t == ctx->f32 || t == ctx->i32)
return ctx->i32;
else if (t == ctx->f64 || t == ctx->i64)
return ctx->i64;
else
unreachable("Unhandled integer size");
}
LLVMTypeRef ac_to_integer_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
{
if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
LLVMTypeRef elem_type = LLVMGetElementType(t);
return LLVMVectorType(to_integer_type_scalar(ctx, elem_type), LLVMGetVectorSize(t));
}
if (LLVMGetTypeKind(t) == LLVMPointerTypeKind) {
switch (LLVMGetPointerAddressSpace(t)) {
case AC_ADDR_SPACE_GLOBAL:
return ctx->i64;
case AC_ADDR_SPACE_CONST_32BIT:
case AC_ADDR_SPACE_LDS:
return ctx->i32;
default:
unreachable("unhandled address space");
}
}
return to_integer_type_scalar(ctx, t);
}
LLVMValueRef ac_to_integer(struct ac_llvm_context *ctx, LLVMValueRef v)
{
LLVMTypeRef type = LLVMTypeOf(v);
if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
return LLVMBuildPtrToInt(ctx->builder, v, ac_to_integer_type(ctx, type), "");
}
return LLVMBuildBitCast(ctx->builder, v, ac_to_integer_type(ctx, type), "");
}
LLVMValueRef ac_to_integer_or_pointer(struct ac_llvm_context *ctx, LLVMValueRef v)
{
LLVMTypeRef type = LLVMTypeOf(v);
if (LLVMGetTypeKind(type) == LLVMPointerTypeKind)
return v;
return ac_to_integer(ctx, v);
}
static LLVMTypeRef to_float_type_scalar(struct ac_llvm_context *ctx, LLVMTypeRef t)
{
if (t == ctx->i8)
return ctx->i8;
else if (t == ctx->i16 || t == ctx->f16)
return ctx->f16;
else if (t == ctx->i32 || t == ctx->f32)
return ctx->f32;
else if (t == ctx->i64 || t == ctx->f64)
return ctx->f64;
else
unreachable("Unhandled float size");
}
LLVMTypeRef ac_to_float_type(struct ac_llvm_context *ctx, LLVMTypeRef t)
{
if (LLVMGetTypeKind(t) == LLVMVectorTypeKind) {
LLVMTypeRef elem_type = LLVMGetElementType(t);
return LLVMVectorType(to_float_type_scalar(ctx, elem_type), LLVMGetVectorSize(t));
}
return to_float_type_scalar(ctx, t);
}
LLVMValueRef ac_to_float(struct ac_llvm_context *ctx, LLVMValueRef v)
{
LLVMTypeRef type = LLVMTypeOf(v);
return LLVMBuildBitCast(ctx->builder, v, ac_to_float_type(ctx, type), "");
}
LLVMValueRef ac_build_intrinsic(struct ac_llvm_context *ctx, const char *name,
LLVMTypeRef return_type, LLVMValueRef *params, unsigned param_count,
unsigned attrib_mask)
{
LLVMValueRef call;
bool set_callsite_attrs = !(attrib_mask & AC_FUNC_ATTR_LEGACY);
LLVMTypeRef param_types[32];
assert(param_count <= 32);
for (unsigned i = 0; i < param_count; ++i) {
assert(params[i]);
param_types[i] = LLVMTypeOf(params[i]);
}
LLVMTypeRef function_type = LLVMFunctionType(return_type, param_types, param_count, 0);
LLVMValueRef function = LLVMGetNamedFunction(ctx->module, name);
if (!function) {
function = LLVMAddFunction(ctx->module, name, function_type);
LLVMSetFunctionCallConv(function, LLVMCCallConv);
LLVMSetLinkage(function, LLVMExternalLinkage);
if (!set_callsite_attrs)
ac_add_func_attributes(ctx->context, function, attrib_mask);
}
call = LLVMBuildCall2(ctx->builder, function_type, function, params, param_count, "");
if (set_callsite_attrs)
ac_add_func_attributes(ctx->context, call, attrib_mask);
return call;
}
/**
* Given the i32 or vNi32 \p type, generate the textual name (e.g. for use with
* intrinsic names).
*/
void ac_build_type_name_for_intr(LLVMTypeRef type, char *buf, unsigned bufsize)
{
LLVMTypeRef elem_type = type;
if (LLVMGetTypeKind(type) == LLVMStructTypeKind) {
unsigned count = LLVMCountStructElementTypes(type);
int ret = snprintf(buf, bufsize, "sl_");
buf += ret;
bufsize -= ret;
LLVMTypeRef *elems = alloca(count * sizeof(LLVMTypeRef));
LLVMGetStructElementTypes(type, elems);
for (unsigned i = 0; i < count; i++) {
ac_build_type_name_for_intr(elems[i], buf, bufsize);
ret = strlen(buf);
buf += ret;
bufsize -= ret;
}
snprintf(buf, bufsize, "s");
return;
}
assert(bufsize >= 8);
if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
int ret = snprintf(buf, bufsize, "v%u", LLVMGetVectorSize(type));
if (ret < 0) {
char *type_name = LLVMPrintTypeToString(type);
fprintf(stderr, "Error building type name for: %s\n", type_name);
LLVMDisposeMessage(type_name);
return;
}
elem_type = LLVMGetElementType(type);
buf += ret;
bufsize -= ret;
}
switch (LLVMGetTypeKind(elem_type)) {
default:
break;
case LLVMIntegerTypeKind:
snprintf(buf, bufsize, "i%d", LLVMGetIntTypeWidth(elem_type));
break;
case LLVMHalfTypeKind:
snprintf(buf, bufsize, "f16");
break;
case LLVMFloatTypeKind:
snprintf(buf, bufsize, "f32");
break;
case LLVMDoubleTypeKind:
snprintf(buf, bufsize, "f64");
break;
}
}
/**
* Helper function that builds an LLVM IR PHI node and immediately adds
* incoming edges.
*/
LLVMValueRef ac_build_phi(struct ac_llvm_context *ctx, LLVMTypeRef type, unsigned count_incoming,
LLVMValueRef *values, LLVMBasicBlockRef *blocks)
{
LLVMValueRef phi = LLVMBuildPhi(ctx->builder, type, "");
LLVMAddIncoming(phi, values, blocks, count_incoming);
return phi;
}
void ac_build_s_barrier(struct ac_llvm_context *ctx, gl_shader_stage stage)
{
/* GFX6 only: s_barrier isn’t needed in TCS because an entire patch always fits into
* a single wave due to a bug workaround disallowing multi-wave HS workgroups.
*/
if (ctx->gfx_level == GFX6 && stage == MESA_SHADER_TESS_CTRL)
return;
ac_build_intrinsic(ctx, "llvm.amdgcn.s.barrier", ctx->voidt, NULL, 0, AC_FUNC_ATTR_CONVERGENT);
}
/* Prevent optimizations (at least of memory accesses) across the current
* point in the program by emitting empty inline assembly that is marked as
* having side effects.
*
* Optionally, a value can be passed through the inline assembly to prevent
* LLVM from hoisting calls to ReadNone functions.
*/
void ac_build_optimization_barrier(struct ac_llvm_context *ctx, LLVMValueRef *pgpr, bool sgpr)
{
static int counter = 0;
LLVMBuilderRef builder = ctx->builder;
char code[16];
const char *constraint = sgpr ? "=s,0" : "=v,0";
snprintf(code, sizeof(code), "; %d", (int)p_atomic_inc_return(&counter));
if (!pgpr) {
LLVMTypeRef ftype = LLVMFunctionType(ctx->voidt, NULL, 0, false);
LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, "", true, false);
LLVMBuildCall2(builder, ftype, inlineasm, NULL, 0, "");
} else if (LLVMTypeOf(*pgpr) == ctx->i32) {
/* Simple version for i32 that allows the caller to set LLVM metadata on the call
* instruction. */
LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, constraint, true, false);
*pgpr = LLVMBuildCall2(builder, ftype, inlineasm, pgpr, 1, "");
} else if (LLVMTypeOf(*pgpr) == ctx->i16) {
/* Simple version for i16 that allows the caller to set LLVM metadata on the call
* instruction. */
LLVMTypeRef ftype = LLVMFunctionType(ctx->i16, &ctx->i16, 1, false);
LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, constraint, true, false);
*pgpr = LLVMBuildCall2(builder, ftype, inlineasm, pgpr, 1, "");
} else if (LLVMGetTypeKind(LLVMTypeOf(*pgpr)) == LLVMPointerTypeKind) {
LLVMTypeRef type = LLVMTypeOf(*pgpr);
LLVMTypeRef ftype = LLVMFunctionType(type, &type, 1, false);
LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, constraint, true, false);
*pgpr = LLVMBuildCall2(builder, ftype, inlineasm, pgpr, 1, "");
} else {
LLVMTypeRef ftype = LLVMFunctionType(ctx->i32, &ctx->i32, 1, false);
LLVMValueRef inlineasm = LLVMConstInlineAsm(ftype, code, constraint, true, false);
LLVMTypeRef type = LLVMTypeOf(*pgpr);
unsigned bitsize = ac_get_elem_bits(ctx, type);
LLVMValueRef vgpr = *pgpr;
LLVMTypeRef vgpr_type;
unsigned vgpr_size;
LLVMValueRef vgpr0;
if (bitsize < 32)
vgpr = LLVMBuildZExt(ctx->builder, vgpr, ctx->i32, "");
vgpr_type = LLVMTypeOf(vgpr);
vgpr_size = ac_get_type_size(vgpr_type);
assert(vgpr_size % 4 == 0);
vgpr = LLVMBuildBitCast(builder, vgpr, LLVMVectorType(ctx->i32, vgpr_size / 4), "");
vgpr0 = LLVMBuildExtractElement(builder, vgpr, ctx->i32_0, "");
vgpr0 = LLVMBuildCall2(builder, ftype, inlineasm, &vgpr0, 1, "");
vgpr = LLVMBuildInsertElement(builder, vgpr, vgpr0, ctx->i32_0, "");
vgpr = LLVMBuildBitCast(builder, vgpr, vgpr_type, "");
if (bitsize < 32)
vgpr = LLVMBuildTrunc(builder, vgpr, type, "");
*pgpr = vgpr;
}
}
LLVMValueRef ac_build_shader_clock(struct ac_llvm_context *ctx, nir_scope scope)
{
if (ctx->gfx_level >= GFX11 && scope == NIR_SCOPE_DEVICE) {
const char *name = "llvm.amdgcn.s.sendmsg.rtn.i64";
LLVMValueRef arg = LLVMConstInt(ctx->i32, 0x83 /* realtime */, 0);
LLVMValueRef tmp = ac_build_intrinsic(ctx, name, ctx->i64, &arg, 1, 0);
return LLVMBuildBitCast(ctx->builder, tmp, ctx->v2i32, "");
}
const char *subgroup = "llvm.readcyclecounter";
const char *name = scope == NIR_SCOPE_DEVICE ? "llvm.amdgcn.s.memrealtime" : subgroup;
LLVMValueRef tmp = ac_build_intrinsic(ctx, name, ctx->i64, NULL, 0, 0);
return LLVMBuildBitCast(ctx->builder, tmp, ctx->v2i32, "");
}
LLVMValueRef ac_build_ballot(struct ac_llvm_context *ctx, LLVMValueRef value)
{
const char *name;
if (LLVMTypeOf(value) == ctx->i1)
value = LLVMBuildZExt(ctx->builder, value, ctx->i32, "");
if (ctx->wave_size == 64)
name = "llvm.amdgcn.icmp.i64.i32";
else
name = "llvm.amdgcn.icmp.i32.i32";
LLVMValueRef args[3] = {value, ctx->i32_0, LLVMConstInt(ctx->i32, LLVMIntNE, 0)};
/* We currently have no other way to prevent LLVM from lifting the icmp
* calls to a dominating basic block.
*/
ac_build_optimization_barrier(ctx, &args[0], false);
args[0] = ac_to_integer(ctx, args[0]);
return ac_build_intrinsic(
ctx, name, ctx->iN_wavemask, args, 3,
AC_FUNC_ATTR_NOUNWIND | AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
}
LLVMValueRef ac_get_i1_sgpr_mask(struct ac_llvm_context *ctx, LLVMValueRef value)
{
const char *name;
if (ctx->wave_size == 64)
name = "llvm.amdgcn.icmp.i64.i1";
else
name = "llvm.amdgcn.icmp.i32.i1";
LLVMValueRef args[3] = {
value,
ctx->i1false,
LLVMConstInt(ctx->i32, LLVMIntNE, 0),
};
return ac_build_intrinsic(
ctx, name, ctx->iN_wavemask, args, 3,
AC_FUNC_ATTR_NOUNWIND | AC_FUNC_ATTR_READNONE | AC_FUNC_ATTR_CONVERGENT);
}
LLVMValueRef ac_build_vote_all(struct ac_llvm_context *ctx, LLVMValueRef value)
{
LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
LLVMValueRef vote_set = ac_build_ballot(ctx, value);
return LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, active_set, "");
}
LLVMValueRef ac_build_vote_any(struct ac_llvm_context *ctx, LLVMValueRef value)
{
LLVMValueRef vote_set = ac_build_ballot(ctx, value);
return LLVMBuildICmp(ctx->builder, LLVMIntNE, vote_set, LLVMConstInt(ctx->iN_wavemask, 0, 0),
"");
}
LLVMValueRef ac_build_vote_eq(struct ac_llvm_context *ctx, LLVMValueRef value)
{
LLVMValueRef active_set = ac_build_ballot(ctx, ctx->i32_1);
LLVMValueRef vote_set = ac_build_ballot(ctx, value);
LLVMValueRef all = LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, active_set, "");
LLVMValueRef none =
LLVMBuildICmp(ctx->builder, LLVMIntEQ, vote_set, LLVMConstInt(ctx->iN_wavemask, 0, 0), "");
return LLVMBuildOr(ctx->builder, all, none, "");
}
LLVMValueRef ac_build_varying_gather_values(struct ac_llvm_context *ctx, LLVMValueRef *values,
unsigned value_count, unsigned component)
{
LLVMValueRef vec = NULL;
if (value_count == 1) {
return values[component];
} else if (!value_count)
unreachable("value_count is 0");
for (unsigned i = component; i < value_count + component; i++) {
LLVMValueRef value = values[i];
if (i == component)
vec = LLVMGetUndef(LLVMVectorType(LLVMTypeOf(value), value_count));
LLVMValueRef index = LLVMConstInt(ctx->i32, i - component, false);
vec = LLVMBuildInsertElement(ctx->builder, vec, value, index, "");
}
return vec;
}
LLVMValueRef ac_build_gather_values_extended(struct ac_llvm_context *ctx, LLVMValueRef *values,
unsigned value_count, unsigned value_stride,
bool always_vector)
{
LLVMBuilderRef builder = ctx->builder;
LLVMValueRef vec = NULL;
unsigned i;
if (value_count == 1 && !always_vector) {
return values[0];
} else if (!value_count)
unreachable("value_count is 0");
for (i = 0; i < value_count; i++) {
LLVMValueRef value = values[i * value_stride];
if (!i)
vec = LLVMGetUndef(LLVMVectorType(LLVMTypeOf(value), value_count));
LLVMValueRef index = LLVMConstInt(ctx->i32, i, false);
vec = LLVMBuildInsertElement(builder, vec, value, index, "");
}
return vec;
}
LLVMValueRef ac_build_gather_values(struct ac_llvm_context *ctx, LLVMValueRef *values,
unsigned value_count)
{
return ac_build_gather_values_extended(ctx, values, value_count, 1, false);
}
LLVMValueRef ac_build_concat(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
{
unsigned a_size = ac_get_llvm_num_components(a);
unsigned b_size = ac_get_llvm_num_components(b);
LLVMValueRef *elems = alloca((a_size + b_size) * sizeof(LLVMValueRef));
for (unsigned i = 0; i < a_size; i++)
elems[i] = ac_llvm_extract_elem(ctx, a, i);
for (unsigned i = 0; i < b_size; i++)
elems[a_size + i] = ac_llvm_extract_elem(ctx, b, i);
return ac_build_gather_values(ctx, elems, a_size + b_size);
}
/* Expand a scalar or vector to <dst_channels x type> by filling the remaining
* channels with undef. Extract at most src_channels components from the input.
*/
LLVMValueRef ac_build_expand(struct ac_llvm_context *ctx, LLVMValueRef value,
unsigned src_channels, unsigned dst_channels)
{
LLVMTypeRef elemtype;
LLVMValueRef *const chan = alloca(dst_channels * sizeof(LLVMValueRef));
if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind) {
unsigned vec_size = LLVMGetVectorSize(LLVMTypeOf(value));
if (src_channels == dst_channels && vec_size == dst_channels)
return value;
src_channels = MIN2(src_channels, vec_size);
for (unsigned i = 0; i < src_channels; i++)
chan[i] = ac_llvm_extract_elem(ctx, value, i);
elemtype = LLVMGetElementType(LLVMTypeOf(value));
} else {
if (src_channels) {
assert(src_channels == 1);
chan[0] = value;
}
elemtype = LLVMTypeOf(value);
}
for (unsigned i = src_channels; i < dst_channels; i++)
chan[i] = LLVMGetUndef(elemtype);
return ac_build_gather_values(ctx, chan, dst_channels);
}
/* Extract components [start, start + channels) from a vector.
*/
LLVMValueRef ac_extract_components(struct ac_llvm_context *ctx, LLVMValueRef value, unsigned start,
unsigned channels)
{
LLVMValueRef *const chan = alloca(channels * sizeof(LLVMValueRef));
for (unsigned i = 0; i < channels; i++)
chan[i] = ac_llvm_extract_elem(ctx, value, i + start);
return ac_build_gather_values(ctx, chan, channels);
}
/* Expand a scalar or vector to <4 x type> by filling the remaining channels
* with undef. Extract at most num_channels components from the input.
*/
LLVMValueRef ac_build_expand_to_vec4(struct ac_llvm_context *ctx, LLVMValueRef value,
unsigned num_channels)
{
return ac_build_expand(ctx, value, num_channels, 4);
}
LLVMValueRef ac_build_round(struct ac_llvm_context *ctx, LLVMValueRef value)
{
unsigned type_size = ac_get_type_size(LLVMTypeOf(value));
const char *name;
if (type_size == 2)
name = "llvm.rint.f16";
else if (type_size == 4)
name = "llvm.rint.f32";
else
name = "llvm.rint.f64";
return ac_build_intrinsic(ctx, name, LLVMTypeOf(value), &value, 1, AC_FUNC_ATTR_READNONE);
}
LLVMValueRef ac_build_fdiv(struct ac_llvm_context *ctx, LLVMValueRef num, LLVMValueRef den)
{
unsigned type_size = ac_get_type_size(LLVMTypeOf(den));
const char *name;
/* For doubles, we need precise division to pass GLCTS. */
if (ctx->float_mode == AC_FLOAT_MODE_DEFAULT_OPENGL && type_size == 8)
return LLVMBuildFDiv(ctx->builder, num, den, "");
if (type_size == 2)
name = "llvm.amdgcn.rcp.f16";
else if (type_size == 4)
name = "llvm.amdgcn.rcp.f32";
else
name = "llvm.amdgcn.rcp.f64";
LLVMValueRef rcp =
ac_build_intrinsic(ctx, name, LLVMTypeOf(den), &den, 1, AC_FUNC_ATTR_READNONE);
return LLVMBuildFMul(ctx->builder, num, rcp, "");
}
/* See fast_idiv_by_const.h. */
/* Set: increment = util_fast_udiv_info::increment ? multiplier : 0; */
LLVMValueRef ac_build_fast_udiv(struct ac_llvm_context *ctx, LLVMValueRef num,
LLVMValueRef multiplier, LLVMValueRef pre_shift,
LLVMValueRef post_shift, LLVMValueRef increment)
{
LLVMBuilderRef builder = ctx->builder;
num = LLVMBuildLShr(builder, num, pre_shift, "");
num = LLVMBuildMul(builder, LLVMBuildZExt(builder, num, ctx->i64, ""),
LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
num = LLVMBuildAdd(builder, num, LLVMBuildZExt(builder, increment, ctx->i64, ""), "");
num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
num = LLVMBuildTrunc(builder, num, ctx->i32, "");
return LLVMBuildLShr(builder, num, post_shift, "");
}
/* See fast_idiv_by_const.h. */
/* If num != UINT_MAX, this more efficient version can be used. */
/* Set: increment = util_fast_udiv_info::increment; */
LLVMValueRef ac_build_fast_udiv_nuw(struct ac_llvm_context *ctx, LLVMValueRef num,
LLVMValueRef multiplier, LLVMValueRef pre_shift,
LLVMValueRef post_shift, LLVMValueRef increment)
{
LLVMBuilderRef builder = ctx->builder;
num = LLVMBuildLShr(builder, num, pre_shift, "");
num = LLVMBuildNUWAdd(builder, num, increment, "");
num = LLVMBuildMul(builder, LLVMBuildZExt(builder, num, ctx->i64, ""),
LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
num = LLVMBuildTrunc(builder, num, ctx->i32, "");
return LLVMBuildLShr(builder, num, post_shift, "");
}
/* See fast_idiv_by_const.h. */
/* Both operands must fit in 31 bits and the divisor must not be 1. */
LLVMValueRef ac_build_fast_udiv_u31_d_not_one(struct ac_llvm_context *ctx, LLVMValueRef num,
LLVMValueRef multiplier, LLVMValueRef post_shift)
{
LLVMBuilderRef builder = ctx->builder;
num = LLVMBuildMul(builder, LLVMBuildZExt(builder, num, ctx->i64, ""),
LLVMBuildZExt(builder, multiplier, ctx->i64, ""), "");
num = LLVMBuildLShr(builder, num, LLVMConstInt(ctx->i64, 32, 0), "");
num = LLVMBuildTrunc(builder, num, ctx->i32, "");
return LLVMBuildLShr(builder, num, post_shift, "");
}
/* Coordinates for cube map selection. sc, tc, and ma are as in Table 8.27
* of the OpenGL 4.5 (Compatibility Profile) specification, except ma is
* already multiplied by two. id is the cube face number.
*/
struct cube_selection_coords {
LLVMValueRef stc[2];
LLVMValueRef ma;
LLVMValueRef id;
};
static void build_cube_intrinsic(struct ac_llvm_context *ctx, LLVMValueRef in[3],
struct cube_selection_coords *out)
{
LLVMTypeRef f32 = ctx->f32;
out->stc[1] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubetc", f32, in, 3, AC_FUNC_ATTR_READNONE);
out->stc[0] = ac_build_intrinsic(ctx, "llvm.amdgcn.cubesc", f32, in, 3, AC_FUNC_ATTR_READNONE);
out->ma = ac_build_intrinsic(ctx, "llvm.amdgcn.cubema", f32, in, 3, AC_FUNC_ATTR_READNONE);
out->id = ac_build_intrinsic(ctx, "llvm.amdgcn.cubeid", f32, in, 3, AC_FUNC_ATTR_READNONE);
}
/**
* Build a manual selection sequence for cube face sc/tc coordinates and
* major axis vector (multiplied by 2 for consistency) for the given
* vec3 \p coords, for the face implied by \p selcoords.
*
* For the major axis, we always adjust the sign to be in the direction of
* selcoords.ma; i.e., a positive out_ma means that coords is pointed towards
* the selcoords major axis.
*/
static void build_cube_select(struct ac_llvm_context *ctx,
const struct cube_selection_coords *selcoords,
const LLVMValueRef *coords, LLVMValueRef *out_st,
LLVMValueRef *out_ma)
{
LLVMBuilderRef builder = ctx->builder;
LLVMTypeRef f32 = LLVMTypeOf(coords[0]);
LLVMValueRef is_ma_positive;
LLVMValueRef sgn_ma;
LLVMValueRef is_ma_z, is_not_ma_z;
LLVMValueRef is_ma_y;
LLVMValueRef is_ma_x;
LLVMValueRef sgn;
LLVMValueRef tmp;
is_ma_positive = LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->ma, LLVMConstReal(f32, 0.0), "");
sgn_ma = LLVMBuildSelect(builder, is_ma_positive, LLVMConstReal(f32, 1.0),
LLVMConstReal(f32, -1.0), "");
is_ma_z = LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 4.0), "");
is_not_ma_z = LLVMBuildNot(builder, is_ma_z, "");
is_ma_y = LLVMBuildAnd(
builder, is_not_ma_z,
LLVMBuildFCmp(builder, LLVMRealUGE, selcoords->id, LLVMConstReal(f32, 2.0), ""), "");
is_ma_x = LLVMBuildAnd(builder, is_not_ma_z, LLVMBuildNot(builder, is_ma_y, ""), "");
/* Select sc */
tmp = LLVMBuildSelect(builder, is_ma_x, coords[2], coords[0], "");
sgn = LLVMBuildSelect(
builder, is_ma_y, LLVMConstReal(f32, 1.0),
LLVMBuildSelect(builder, is_ma_z, sgn_ma, LLVMBuildFNeg(builder, sgn_ma, ""), ""), "");
out_st[0] = LLVMBuildFMul(builder, tmp, sgn, "");
/* Select tc */
tmp = LLVMBuildSelect(builder, is_ma_y, coords[2], coords[1], "");
sgn = LLVMBuildSelect(builder, is_ma_y, sgn_ma, LLVMConstReal(f32, -1.0), "");
out_st[1] = LLVMBuildFMul(builder, tmp, sgn, "");
/* Select ma */
tmp = LLVMBuildSelect(builder, is_ma_z, coords[2],
LLVMBuildSelect(builder, is_ma_y, coords[1], coords[0], ""), "");
tmp = ac_build_intrinsic(ctx, "llvm.fabs.f32", ctx->f32, &tmp, 1, AC_FUNC_ATTR_READNONE);
*out_ma = LLVMBuildFMul(builder, tmp, LLVMConstReal(f32, 2.0), "");
}
void ac_prepare_cube_coords(struct ac_llvm_context *ctx, bool is_deriv, bool is_array, bool is_lod,
LLVMValueRef *coords_arg, LLVMValueRef *derivs_arg)
{
LLVMBuilderRef builder = ctx->builder;
struct cube_selection_coords selcoords;
LLVMValueRef coords[3];
LLVMValueRef invma;
if (is_array && !is_lod) {
LLVMValueRef tmp = ac_build_round(ctx, coords_arg[3]);
/* Section 8.9 (Texture Functions) of the GLSL 4.50 spec says:
*
* "For Array forms, the array layer used will be
*
* max(0, min(d−1, floor(layer+0.5)))
*
* where d is the depth of the texture array and layer
* comes from the component indicated in the tables below.
* Workaroudn for an issue where the layer is taken from a
* helper invocation which happens to fall on a different
* layer due to extrapolation."
*
* GFX8 and earlier attempt to implement this in hardware by
* clamping the value of coords[2] = (8 * layer) + face.
* Unfortunately, this means that the we end up with the wrong
* face when clamping occurs.
*
* Clamp the layer earlier to work around the issue.
*/
if (ctx->gfx_level <= GFX8) {
LLVMValueRef ge0;
ge0 = LLVMBuildFCmp(builder, LLVMRealOGE, tmp, ctx->f32_0, "");
tmp = LLVMBuildSelect(builder, ge0, tmp, ctx->f32_0, "");
}
coords_arg[3] = tmp;
}
build_cube_intrinsic(ctx, coords_arg, &selcoords);
invma =
ac_build_intrinsic(ctx, "llvm.fabs.f32", ctx->f32, &selcoords.ma, 1, AC_FUNC_ATTR_READNONE);
invma = ac_build_fdiv(ctx, LLVMConstReal(ctx->f32, 1.0), invma);
for (int i = 0; i < 2; ++i)
coords[i] = LLVMBuildFMul(builder, selcoords.stc[i], invma, "");
coords[2] = selcoords.id;
if (is_deriv && derivs_arg) {
LLVMValueRef derivs[4];
int axis;
/* Convert cube derivatives to 2D derivatives. */
for (axis = 0; axis < 2; axis++) {
LLVMValueRef deriv_st[2];
LLVMValueRef deriv_ma;
/* Transform the derivative alongside the texture
* coordinate. Mathematically, the correct formula is
* as follows. Assume we're projecting onto the +Z face
* and denote by dx/dh the derivative of the (original)
* X texture coordinate with respect to horizontal
* window coordinates. The projection onto the +Z face
* plane is:
*
* f(x,z) = x/z
*
* Then df/dh = df/dx * dx/dh + df/dz * dz/dh
* = 1/z * dx/dh - x/z * 1/z * dz/dh.
*
* This motivatives the implementation below.
*
* Whether this actually gives the expected results for
* apps that might feed in derivatives obtained via
* finite differences is anyone's guess. The OpenGL spec
* seems awfully quiet about how textureGrad for cube
* maps should be handled.
*/
build_cube_select(ctx, &selcoords, &derivs_arg[axis * 3], deriv_st, &deriv_ma);
deriv_ma = LLVMBuildFMul(builder, deriv_ma, invma, "");
for (int i = 0; i < 2; ++i)
derivs[axis * 2 + i] =
LLVMBuildFSub(builder, LLVMBuildFMul(builder, deriv_st[i], invma, ""),
LLVMBuildFMul(builder, deriv_ma, coords[i], ""), "");
}
memcpy(derivs_arg, derivs, sizeof(derivs));
}
/* Shift the texture coordinate. This must be applied after the
* derivative calculation.
*/
for (int i = 0; i < 2; ++i)
coords[i] = LLVMBuildFAdd(builder, coords[i], LLVMConstReal(ctx->f32, 1.5), "");
if (is_array) {
/* for cube arrays coord.z = coord.w(array_index) * 8 + face */
/* coords_arg.w component - array_index for cube arrays */
coords[2] = ac_build_fmad(ctx, coords_arg[3], LLVMConstReal(ctx->f32, 8.0), coords[2]);
}
memcpy(coords_arg, coords, sizeof(coords));
}
LLVMValueRef ac_build_fs_interp(struct ac_llvm_context *ctx, LLVMValueRef llvm_chan,
LLVMValueRef attr_number, LLVMValueRef params, LLVMValueRef i,
LLVMValueRef j)
{
LLVMValueRef args[5];
if (ctx->gfx_level >= GFX11) {
LLVMValueRef p;
LLVMValueRef p10;
args[0] = llvm_chan;
args[1] = attr_number;
args[2] = params;
p = ac_build_intrinsic(ctx, "llvm.amdgcn.lds.param.load",
ctx->f32, args, 3, AC_FUNC_ATTR_READNONE);
args[0] = p;
args[1] = i;
args[2] = p;
p10 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.inreg.p10",
ctx->f32, args, 3, AC_FUNC_ATTR_READNONE);
args[0] = p;
args[1] = j;
args[2] = p10;
return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.inreg.p2",
ctx->f32, args, 3, AC_FUNC_ATTR_READNONE);
} else {
LLVMValueRef p1;
args[0] = i;
args[1] = llvm_chan;
args[2] = attr_number;
args[3] = params;
p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1",
ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
args[0] = p1;
args[1] = j;
args[2] = llvm_chan;
args[3] = attr_number;
args[4] = params;
return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2",
ctx->f32, args, 5, AC_FUNC_ATTR_READNONE);
}
}
LLVMValueRef ac_build_fs_interp_f16(struct ac_llvm_context *ctx, LLVMValueRef llvm_chan,
LLVMValueRef attr_number, LLVMValueRef params, LLVMValueRef i,
LLVMValueRef j, bool high_16bits)
{
LLVMValueRef args[6];
if (ctx->gfx_level >= GFX11) {
LLVMValueRef p;
LLVMValueRef p10;
args[0] = llvm_chan;
args[1] = attr_number;
args[2] = params;
p = ac_build_intrinsic(ctx, "llvm.amdgcn.lds.param.load",
ctx->f32, args, 3, AC_FUNC_ATTR_READNONE);
args[0] = p;
args[1] = i;
args[2] = p;
args[3] = high_16bits ? ctx->i1true : ctx->i1false;
p10 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.inreg.p10.f16",
ctx->f32, args, 4, AC_FUNC_ATTR_READNONE);
args[0] = p;
args[1] = j;
args[2] = p10;
args[3] = high_16bits ? ctx->i1true : ctx->i1false;
return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.inreg.p2.f16",
ctx->f16, args, 4, AC_FUNC_ATTR_READNONE);
} else {
LLVMValueRef p1;
args[0] = i;
args[1] = llvm_chan;
args[2] = attr_number;
args[3] = high_16bits ? ctx->i1true : ctx->i1false;
args[4] = params;
p1 = ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p1.f16", ctx->f32, args, 5,
AC_FUNC_ATTR_READNONE);
args[0] = p1;
args[1] = j;
args[2] = llvm_chan;
args[3] = attr_number;
args[4] = high_16bits ? ctx->i1true : ctx->i1false;
args[5] = params;
return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.p2.f16", ctx->f16, args, 6,
AC_FUNC_ATTR_READNONE);
}
}
LLVMValueRef ac_build_fs_interp_mov(struct ac_llvm_context *ctx, LLVMValueRef parameter,
LLVMValueRef llvm_chan, LLVMValueRef attr_number,
LLVMValueRef params)
{
LLVMValueRef args[4];
if (ctx->gfx_level >= GFX11) {
LLVMValueRef p;
args[0] = llvm_chan;
args[1] = attr_number;
args[2] = params;
p = ac_build_intrinsic(ctx, "llvm.amdgcn.lds.param.load",
ctx->f32, args, 3, AC_FUNC_ATTR_READNONE);
p = ac_build_quad_swizzle(ctx, p, 0, 0, 0 ,0);
return ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.f32", ctx->f32, &p, 1, AC_FUNC_ATTR_READNONE);
} else {
args[0] = parameter;
args[1] = llvm_chan;
args[2] = attr_number;
args[3] = params;
return ac_build_intrinsic(ctx, "llvm.amdgcn.interp.mov", ctx->f32, args, 4,
AC_FUNC_ATTR_READNONE);
}
}
LLVMValueRef ac_build_gep_ptr(struct ac_llvm_context *ctx, LLVMTypeRef type, LLVMValueRef base_ptr,
LLVMValueRef index)
{
return LLVMBuildGEP2(ctx->builder, type, base_ptr, &index, 1, "");
}
LLVMTypeRef ac_build_gep0_type(LLVMTypeRef pointee_type, LLVMValueRef index)
{
switch (LLVMGetTypeKind(pointee_type)) {
case LLVMPointerTypeKind:
return pointee_type;
case LLVMArrayTypeKind:
/* If input is a pointer to an array GEP2 will return a pointer to
* the array elements type.
*/
return LLVMGetElementType(pointee_type);
case LLVMStructTypeKind:
/* If input is a pointer to a struct, GEP2 will return a pointer to
* the index-nth field, so get its type.
*/
return LLVMStructGetTypeAtIndex(pointee_type, LLVMConstIntGetZExtValue(index));
default:
/* gep0 shouldn't receive any other types. */
assert(false);
}
return NULL;
}
LLVMValueRef ac_build_gep0(struct ac_llvm_context *ctx, struct ac_llvm_pointer ptr, LLVMValueRef index)
{
LLVMValueRef indices[2] = {
ctx->i32_0,
index,
};
return LLVMBuildGEP2(ctx->builder, ptr.t, ptr.v, indices, 2, "");
}
LLVMValueRef ac_build_pointer_add(struct ac_llvm_context *ctx, LLVMTypeRef type, LLVMValueRef ptr, LLVMValueRef index)
{
return LLVMBuildGEP2(ctx->builder, type, ptr, &index, 1, "");
}
void ac_build_indexed_store(struct ac_llvm_context *ctx, struct ac_llvm_pointer ptr, LLVMValueRef index,
LLVMValueRef value)
{
LLVMBuildStore(ctx->builder, value, ac_build_gep0(ctx, ptr, index));
}
/**
* Build an LLVM bytecode indexed load using LLVMBuildGEP + LLVMBuildLoad.
* It's equivalent to doing a load from &base_ptr[index].
*
* \param base_ptr Where the array starts.
* \param index The element index into the array.
* \param uniform Whether the base_ptr and index can be assumed to be
* dynamically uniform (i.e. load to an SGPR)
* \param invariant Whether the load is invariant (no other opcodes affect it)
* \param no_unsigned_wraparound
* For all possible re-associations and re-distributions of an expression
* "base_ptr + index * elemsize" into "addr + offset" (excluding GEPs
* without inbounds in base_ptr), this parameter is true if "addr + offset"
* does not result in an unsigned integer wraparound. This is used for
* optimal code generation of 32-bit pointer arithmetic.
*
* For example, a 32-bit immediate offset that causes a 32-bit unsigned
* integer wraparound can't be an imm offset in s_load_dword, because
* the instruction performs "addr + offset" in 64 bits.
*
* Expected usage for bindless textures by chaining GEPs:
* // possible unsigned wraparound, don't use InBounds:
* ptr1 = LLVMBuildGEP(base_ptr, index);
* image = load(ptr1); // becomes "s_load ptr1, 0"
*
* ptr2 = LLVMBuildInBoundsGEP(ptr1, 32 / elemsize);
* sampler = load(ptr2); // becomes "s_load ptr1, 32" thanks to InBounds
*/
static LLVMValueRef ac_build_load_custom(struct ac_llvm_context *ctx, LLVMTypeRef type,
LLVMValueRef base_ptr, LLVMValueRef index,
bool uniform, bool invariant, bool no_unsigned_wraparound)
{
LLVMValueRef pointer, result;
if (no_unsigned_wraparound &&
LLVMGetPointerAddressSpace(LLVMTypeOf(base_ptr)) == AC_ADDR_SPACE_CONST_32BIT)
pointer = LLVMBuildInBoundsGEP2(ctx->builder, type, base_ptr, &index, 1, "");
else
pointer = LLVMBuildGEP2(ctx->builder, type, base_ptr, &index, 1, "");
if (uniform)
LLVMSetMetadata(pointer, ctx->uniform_md_kind, ctx->empty_md);
result = LLVMBuildLoad2(ctx->builder, type, pointer, "");
if (invariant)
LLVMSetMetadata(result, ctx->invariant_load_md_kind, ctx->empty_md);
LLVMSetAlignment(result, 4);
return result;
}
LLVMValueRef ac_build_load(struct ac_llvm_context *ctx, struct ac_llvm_pointer ptr, LLVMValueRef index)
{
return ac_build_load_custom(ctx, ptr.t, ptr.v, index, false, false, false);
}
LLVMValueRef ac_build_load_invariant(struct ac_llvm_context *ctx, struct ac_llvm_pointer ptr,
LLVMValueRef index)
{
return ac_build_load_custom(ctx, ptr.t, ptr.v, index, false, true, false);
}
/* This assumes that there is no unsigned integer wraparound during the address
* computation, excluding all GEPs within base_ptr. */
LLVMValueRef ac_build_load_to_sgpr(struct ac_llvm_context *ctx, struct ac_llvm_pointer ptr,
LLVMValueRef index)
{
return ac_build_load_custom(ctx, ptr.t, ptr.v, index, true, true, true);
}
/* See ac_build_load_custom() documentation. */
LLVMValueRef ac_build_load_to_sgpr_uint_wraparound(struct ac_llvm_context *ctx, struct ac_llvm_pointer ptr, LLVMValueRef index)
{
return ac_build_load_custom(ctx, ptr.t, ptr.v, index, true, true, false);
}
static unsigned get_load_cache_policy(struct ac_llvm_context *ctx, unsigned cache_policy)
{
return cache_policy |
(ctx->gfx_level >= GFX10 && ctx->gfx_level < GFX11 && cache_policy & ac_glc ? ac_dlc : 0);
}
static unsigned get_store_cache_policy(struct ac_llvm_context *ctx, unsigned cache_policy)
{
if (ctx->gfx_level >= GFX11)
cache_policy &= ~ac_glc; /* GLC has no effect on stores */
return cache_policy;
}
static void ac_build_buffer_store_common(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef data, LLVMValueRef vindex,
LLVMValueRef voffset, LLVMValueRef soffset,
unsigned cache_policy, bool use_format)
{
LLVMValueRef args[6];
int idx = 0;
args[idx++] = data;
args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
if (vindex)
args[idx++] = vindex ? vindex : ctx->i32_0;
args[idx++] = voffset ? voffset : ctx->i32_0;
args[idx++] = soffset ? soffset : ctx->i32_0;
args[idx++] = LLVMConstInt(ctx->i32, get_store_cache_policy(ctx, cache_policy), 0);
const char *indexing_kind = vindex ? "struct" : "raw";
char name[256], type_name[8];
ac_build_type_name_for_intr(LLVMTypeOf(data), type_name, sizeof(type_name));
if (use_format) {
snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.store.format.%s", indexing_kind,
type_name);
} else {
snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.store.%s", indexing_kind, type_name);
}
ac_build_intrinsic(ctx, name, ctx->voidt, args, idx, AC_FUNC_ATTR_INACCESSIBLE_MEM_ONLY);
}
void ac_build_buffer_store_format(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef data,
LLVMValueRef vindex, LLVMValueRef voffset, unsigned cache_policy)
{
ac_build_buffer_store_common(ctx, rsrc, data, vindex, voffset, NULL, cache_policy, true);
}
/* buffer_store_dword(,x2,x3,x4) <- the suffix is selected by the type of vdata. */
void ac_build_buffer_store_dword(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef vdata,
LLVMValueRef vindex, LLVMValueRef voffset, LLVMValueRef soffset,
unsigned cache_policy)
{
unsigned num_channels = ac_get_llvm_num_components(vdata);
/* Split 3 channel stores if unsupported. */
if (num_channels == 3 && !ac_has_vec3_support(ctx->gfx_level, false)) {
LLVMValueRef v[3], v01, voffset2;
for (int i = 0; i < 3; i++) {
v[i] = LLVMBuildExtractElement(ctx->builder, vdata, LLVMConstInt(ctx->i32, i, 0), "");
}
v01 = ac_build_gather_values(ctx, v, 2);
voffset2 = LLVMBuildAdd(ctx->builder, voffset ? voffset : ctx->i32_0,
LLVMConstInt(ctx->i32, 8, 0), "");
ac_build_buffer_store_dword(ctx, rsrc, v01, vindex, voffset, soffset, cache_policy);
ac_build_buffer_store_dword(ctx, rsrc, v[2], vindex, voffset2, soffset, cache_policy);
return;
}
ac_build_buffer_store_common(ctx, rsrc, ac_to_float(ctx, vdata), vindex, voffset, soffset,
cache_policy, false);
}
static LLVMValueRef ac_build_buffer_load_common(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef vindex, LLVMValueRef voffset,
LLVMValueRef soffset, unsigned num_channels,
LLVMTypeRef channel_type, unsigned cache_policy,
bool can_speculate, bool use_format,
bool structurized)
{
LLVMValueRef args[5];
int idx = 0;
args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
if (structurized)
args[idx++] = vindex ? vindex : ctx->i32_0;
args[idx++] = voffset ? voffset : ctx->i32_0;
args[idx++] = soffset ? soffset : ctx->i32_0;
args[idx++] = LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0);
unsigned func =
!ac_has_vec3_support(ctx->gfx_level, use_format) && num_channels == 3 ? 4 : num_channels;
const char *indexing_kind = structurized ? "struct" : "raw";
char name[256], type_name[8];
/* D16 is only supported on gfx8+ */
assert(!use_format || (channel_type != ctx->f16 && channel_type != ctx->i16) ||
ctx->gfx_level >= GFX8);
LLVMTypeRef type = func > 1 ? LLVMVectorType(channel_type, func) : channel_type;
ac_build_type_name_for_intr(type, type_name, sizeof(type_name));
if (use_format) {
snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.format.%s", indexing_kind,
type_name);
} else {
snprintf(name, sizeof(name), "llvm.amdgcn.%s.buffer.load.%s", indexing_kind, type_name);
}
return ac_build_intrinsic(ctx, name, type, args, idx, ac_get_load_intr_attribs(can_speculate));
}
LLVMValueRef ac_build_buffer_load(struct ac_llvm_context *ctx, LLVMValueRef rsrc, int num_channels,
LLVMValueRef vindex, LLVMValueRef voffset, LLVMValueRef soffset,
LLVMTypeRef channel_type, unsigned cache_policy,
bool can_speculate, bool allow_smem)
{
if (allow_smem && !(cache_policy & ac_slc) &&
(!(cache_policy & ac_glc) || ctx->gfx_level >= GFX8)) {
assert(vindex == NULL);
LLVMValueRef result[32];
LLVMValueRef offset = voffset ? voffset : ctx->i32_0;
if (soffset)
offset = LLVMBuildAdd(ctx->builder, offset, soffset, "");
for (int i = 0; i < num_channels; i++) {
if (i) {
offset = LLVMBuildAdd(ctx->builder, offset, LLVMConstInt(ctx->i32, 4, 0), "");
}
LLVMValueRef args[3] = {
rsrc,
offset,
LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0),
};
result[i] = ac_build_intrinsic(ctx, "llvm.amdgcn.s.buffer.load.f32", ctx->f32, args, 3,
AC_FUNC_ATTR_READNONE);
}
if (num_channels == 1)
return result[0];
if (num_channels == 3 && !ac_has_vec3_support(ctx->gfx_level, false))
result[num_channels++] = LLVMGetUndef(ctx->f32);
return ac_build_gather_values(ctx, result, num_channels);
}
return ac_build_buffer_load_common(ctx, rsrc, vindex, voffset, soffset, num_channels,
channel_type, cache_policy, can_speculate, false, false);
}
LLVMValueRef ac_build_buffer_load_format(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef vindex, LLVMValueRef voffset,
unsigned num_channels, unsigned cache_policy,
bool can_speculate, bool d16, bool tfe)
{
if (tfe) {
assert(!d16);
cache_policy = get_load_cache_policy(ctx, cache_policy);
char code[256];
/* The definition in the assembly and the one in the constraint string
* differs because of an assembler bug.
*/
snprintf(code, sizeof(code),
"v_mov_b32 v0, 0\n"
"v_mov_b32 v1, 0\n"
"v_mov_b32 v2, 0\n"
"v_mov_b32 v3, 0\n"
"v_mov_b32 v4, 0\n"
"buffer_load_format_xyzw v[0:3], $1, $2, 0, idxen offen %s %s tfe %s\n"
"s_waitcnt vmcnt(0)",
cache_policy & ac_glc ? "glc" : "",
cache_policy & ac_slc ? "slc" : "",
cache_policy & ac_dlc ? "dlc" : "");
LLVMTypeRef param_types[] = {ctx->v2i32, ctx->v4i32};
LLVMTypeRef calltype = LLVMFunctionType(LLVMVectorType(ctx->f32, 5), param_types, 2, false);
LLVMValueRef inlineasm = LLVMConstInlineAsm(calltype, code, "=&{v[0:4]},v,s", false, false);
LLVMValueRef addr_comp[2] = {vindex ? vindex : ctx->i32_0,
voffset ? voffset : ctx->i32_0};
LLVMValueRef args[] = {ac_build_gather_values(ctx, addr_comp, 2),
LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "")};
LLVMValueRef res = LLVMBuildCall2(ctx->builder, calltype, inlineasm, args, 2, "");
return ac_build_concat(ctx, ac_trim_vector(ctx, res, num_channels),
ac_llvm_extract_elem(ctx, res, 4));
}
return ac_build_buffer_load_common(ctx, rsrc, vindex, voffset, ctx->i32_0, num_channels,
d16 ? ctx->f16 : ctx->f32, cache_policy, can_speculate, true,
true);
}
static LLVMValueRef ac_build_tbuffer_load(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef vindex, LLVMValueRef voffset,
LLVMValueRef soffset, unsigned num_channels,
unsigned dfmt, unsigned nfmt, unsigned cache_policy,
bool can_speculate, bool structurized)
{
LLVMValueRef args[6];
int idx = 0;
args[idx++] = LLVMBuildBitCast(ctx->builder, rsrc, ctx->v4i32, "");
if (structurized)
args[idx++] = vindex ? vindex : ctx->i32_0;
args[idx++] = voffset ? voffset : ctx->i32_0;
args[idx++] = soffset ? soffset : ctx->i32_0;
args[idx++] = LLVMConstInt(ctx->i32, ac_get_tbuffer_format(ctx->gfx_level, dfmt, nfmt), 0);
args[idx++] = LLVMConstInt(ctx->i32, get_load_cache_policy(ctx, cache_policy), 0);
unsigned func =
!ac_has_vec3_support(ctx->gfx_level, true) && num_channels == 3 ? 4 : num_channels;
const char *indexing_kind = structurized ? "struct" : "raw";
char name[256], type_name[8];
LLVMTypeRef type = func > 1 ? LLVMVectorType(ctx->i32, func) : ctx->i32;
ac_build_type_name_for_intr(type, type_name, sizeof(type_name));
snprintf(name, sizeof(name), "llvm.amdgcn.%s.tbuffer.load.%s", indexing_kind, type_name);
return ac_build_intrinsic(ctx, name, type, args, idx, ac_get_load_intr_attribs(can_speculate));
}
LLVMValueRef ac_build_struct_tbuffer_load(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef vindex, LLVMValueRef voffset,
LLVMValueRef soffset, unsigned num_channels,
unsigned dfmt, unsigned nfmt, unsigned cache_policy,
bool can_speculate)
{
return ac_build_tbuffer_load(ctx, rsrc, vindex, voffset, soffset, num_channels, dfmt,
nfmt, cache_policy, can_speculate, true);
}
LLVMValueRef ac_build_buffer_load_short(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef voffset, LLVMValueRef soffset,
unsigned cache_policy)
{
return ac_build_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, 1, ctx->i16,
cache_policy, false, false, false);
}
LLVMValueRef ac_build_buffer_load_byte(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef voffset, LLVMValueRef soffset,
unsigned cache_policy)
{
return ac_build_buffer_load_common(ctx, rsrc, NULL, voffset, soffset, 1, ctx->i8, cache_policy,
false, false, false);
}
/**
* Convert an 11- or 10-bit unsigned floating point number to an f32.
*
* The input exponent is expected to be biased analogous to IEEE-754, i.e. by
* 2^(exp_bits-1) - 1 (as defined in OpenGL and other graphics APIs).
*/
static LLVMValueRef ac_ufN_to_float(struct ac_llvm_context *ctx, LLVMValueRef src,
unsigned exp_bits, unsigned mant_bits)
{
assert(LLVMTypeOf(src) == ctx->i32);
LLVMValueRef tmp;
LLVMValueRef mantissa;
mantissa =
LLVMBuildAnd(ctx->builder, src, LLVMConstInt(ctx->i32, (1 << mant_bits) - 1, false), "");
/* Converting normal numbers is just a shift + correcting the exponent bias */
unsigned normal_shift = 23 - mant_bits;
unsigned bias_shift = 127 - ((1 << (exp_bits - 1)) - 1);
LLVMValueRef shifted, normal;
shifted = LLVMBuildShl(ctx->builder, src, LLVMConstInt(ctx->i32, normal_shift, false), "");
normal =
LLVMBuildAdd(ctx->builder, shifted, LLVMConstInt(ctx->i32, bias_shift << 23, false), "");
/* Converting nan/inf numbers is the same, but with a different exponent update */
LLVMValueRef naninf;
naninf = LLVMBuildOr(ctx->builder, normal, LLVMConstInt(ctx->i32, 0xff << 23, false), "");
/* Converting denormals is the complex case: determine the leading zeros of the
* mantissa to obtain the correct shift for the mantissa and exponent correction.
*/
LLVMValueRef denormal;
LLVMValueRef params[2] = {
mantissa, ctx->i1true, /* result can be undef when arg is 0 */
};
LLVMValueRef ctlz =
ac_build_intrinsic(ctx, "llvm.ctlz.i32", ctx->i32, params, 2, AC_FUNC_ATTR_READNONE);
/* Shift such that the leading 1 ends up as the LSB of the exponent field. */
tmp = LLVMBuildSub(ctx->builder, ctlz, LLVMConstInt(ctx->i32, 8, false), "");
denormal = LLVMBuildShl(ctx->builder, mantissa, tmp, "");
unsigned denormal_exp = bias_shift + (32 - mant_bits) - 1;
tmp = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, denormal_exp, false), ctlz, "");
tmp = LLVMBuildShl(ctx->builder, tmp, LLVMConstInt(ctx->i32, 23, false), "");
denormal = LLVMBuildAdd(ctx->builder, denormal, tmp, "");
/* Select the final result. */
LLVMValueRef result;
tmp = LLVMBuildICmp(ctx->builder, LLVMIntUGE, src,
LLVMConstInt(ctx->i32, ((1ULL << exp_bits) - 1) << mant_bits, false), "");
result = LLVMBuildSelect(ctx->builder, tmp, naninf, normal, "");
tmp = LLVMBuildICmp(ctx->builder, LLVMIntUGE, src,
LLVMConstInt(ctx->i32, 1ULL << mant_bits, false), "");
result = LLVMBuildSelect(ctx->builder, tmp, result, denormal, "");
tmp = LLVMBuildICmp(ctx->builder, LLVMIntNE, src, ctx->i32_0, "");
result = LLVMBuildSelect(ctx->builder, tmp, result, ctx->i32_0, "");
return ac_to_float(ctx, result);
}
/**
* Generate a fully general open coded buffer format fetch with all required
* fixups suitable for vertex fetch, using non-format buffer loads.
*
* Some combinations of argument values have special interpretations:
* - size = 8 bytes, format = fixed indicates PIPE_FORMAT_R11G11B10_FLOAT
* - size = 8 bytes, format != {float,fixed} indicates a 2_10_10_10 data format
*
* \param log_size log(size of channel in bytes)
* \param num_channels number of channels (1 to 4)
* \param format AC_FETCH_FORMAT_xxx value
* \param reverse whether XYZ channels are reversed
* \param known_aligned whether the source is known to be aligned to hardware's
* effective element size for loading the given format
* (note: this means dword alignment for 8_8_8_8, 16_16, etc.)
* \param rsrc buffer resource descriptor
* \return the resulting vector of floats or integers bitcast to <4 x i32>
*/
LLVMValueRef ac_build_opencoded_load_format(struct ac_llvm_context *ctx, unsigned log_size,
unsigned num_channels, unsigned format, bool reverse,
bool known_aligned, LLVMValueRef rsrc,
LLVMValueRef vindex, LLVMValueRef voffset,
LLVMValueRef soffset, unsigned cache_policy,
bool can_speculate)
{
LLVMValueRef tmp;
unsigned load_log_size = log_size;
unsigned load_num_channels = num_channels;
if (log_size == 3) {
load_log_size = 2;
if (format == AC_FETCH_FORMAT_FLOAT) {
load_num_channels = 2 * num_channels;
} else {
load_num_channels = 1; /* 10_11_11 or 2_10_10_10 */
}
}
int log_recombine = 0;
if ((ctx->gfx_level == GFX6 || ctx->gfx_level >= GFX10) && !known_aligned) {
/* Avoid alignment restrictions by loading one byte at a time. */
load_num_channels <<= load_log_size;
log_recombine = load_log_size;
load_log_size = 0;
} else if (load_num_channels == 2 || load_num_channels == 4) {
log_recombine = -util_logbase2(load_num_channels);
load_num_channels = 1;
load_log_size += -log_recombine;
}
LLVMValueRef loads[32]; /* up to 32 bytes */
for (unsigned i = 0; i < load_num_channels; ++i) {
tmp =
LLVMBuildAdd(ctx->builder, soffset, LLVMConstInt(ctx->i32, i << load_log_size, false), "");
LLVMTypeRef channel_type =
load_log_size == 0 ? ctx->i8 : load_log_size == 1 ? ctx->i16 : ctx->i32;
unsigned num_channels = 1 << (MAX2(load_log_size, 2) - 2);
loads[i] =
ac_build_buffer_load_common(ctx, rsrc, vindex, voffset, tmp, num_channels, channel_type,
cache_policy, can_speculate, false, true);
if (load_log_size >= 2)
loads[i] = ac_to_integer(ctx, loads[i]);
}
if (log_recombine > 0) {
/* Recombine bytes if necessary (GFX6 only) */
LLVMTypeRef dst_type = log_recombine == 2 ? ctx->i32 : ctx->i16;
for (unsigned src = 0, dst = 0; src < load_num_channels; ++dst) {
LLVMValueRef accum = NULL;
for (unsigned i = 0; i < (1 << log_recombine); ++i, ++src) {
tmp = LLVMBuildZExt(ctx->builder, loads[src], dst_type, "");
if (i == 0) {
accum = tmp;
} else {
tmp = LLVMBuildShl(ctx->builder, tmp, LLVMConstInt(dst_type, 8 * i, false), "");
accum = LLVMBuildOr(ctx->builder, accum, tmp, "");
}
}
loads[dst] = accum;
}
} else if (log_recombine < 0) {
/* Split vectors of dwords */
if (load_log_size > 2) {
assert(load_num_channels == 1);
LLVMValueRef loaded = loads[0];
unsigned log_split = load_log_size - 2;
log_recombine += log_split;
load_num_channels = 1 << log_split;
load_log_size = 2;
for (unsigned i = 0; i < load_num_channels; ++i) {
tmp = LLVMConstInt(ctx->i32, i, false);
loads[i] = LLVMBuildExtractElement(ctx->builder, loaded, tmp, "");
}
}
/* Further split dwords and shorts if required */
if (log_recombine < 0) {
for (unsigned src = load_num_channels, dst = load_num_channels << -log_recombine; src > 0;
--src) {
unsigned dst_bits = 1 << (3 + load_log_size + log_recombine);
LLVMTypeRef dst_type = LLVMIntTypeInContext(ctx->context, dst_bits);
LLVMValueRef loaded = loads[src - 1];
LLVMTypeRef loaded_type = LLVMTypeOf(loaded);
for (unsigned i = 1 << -log_recombine; i > 0; --i, --dst) {
tmp = LLVMConstInt(loaded_type, dst_bits * (i - 1), false);
tmp = LLVMBuildLShr(ctx->builder, loaded, tmp, "");
loads[dst - 1] = LLVMBuildTrunc(ctx->builder, tmp, dst_type, "");
}
}
}
}
if (log_size == 3) {
if (format == AC_FETCH_FORMAT_FLOAT) {
for (unsigned i = 0; i < num_channels; ++i) {
tmp = ac_build_gather_values(ctx, &loads[2 * i], 2);
loads[i] = LLVMBuildBitCast(ctx->builder, tmp, ctx->f64, "");
}
} else if (format == AC_FETCH_FORMAT_FIXED) {
/* 10_11_11_FLOAT */
LLVMValueRef data = loads[0];
LLVMValueRef i32_2047 = LLVMConstInt(ctx->i32, 2047, false);
LLVMValueRef r = LLVMBuildAnd(ctx->builder, data, i32_2047, "");
tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 11, false), "");
LLVMValueRef g = LLVMBuildAnd(ctx->builder, tmp, i32_2047, "");
LLVMValueRef b = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 22, false), "");
loads[0] = ac_to_integer(ctx, ac_ufN_to_float(ctx, r, 5, 6));
loads[1] = ac_to_integer(ctx, ac_ufN_to_float(ctx, g, 5, 6));
loads[2] = ac_to_integer(ctx, ac_ufN_to_float(ctx, b, 5, 5));
num_channels = 3;
log_size = 2;
format = AC_FETCH_FORMAT_FLOAT;
} else {
/* 2_10_10_10 data formats */
LLVMValueRef data = loads[0];
LLVMTypeRef i10 = LLVMIntTypeInContext(ctx->context, 10);
LLVMTypeRef i2 = LLVMIntTypeInContext(ctx->context, 2);
loads[0] = LLVMBuildTrunc(ctx->builder, data, i10, "");
tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 10, false), "");
loads[1] = LLVMBuildTrunc(ctx->builder, tmp, i10, "");
tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 20, false), "");
loads[2] = LLVMBuildTrunc(ctx->builder, tmp, i10, "");
tmp = LLVMBuildLShr(ctx->builder, data, LLVMConstInt(ctx->i32, 30, false), "");
loads[3] = LLVMBuildTrunc(ctx->builder, tmp, i2, "");
num_channels = 4;
}
}
if (format == AC_FETCH_FORMAT_FLOAT) {
if (log_size != 2) {
for (unsigned chan = 0; chan < num_channels; ++chan) {
tmp = ac_to_float(ctx, loads[chan]);
if (log_size == 3)
tmp = LLVMBuildFPTrunc(ctx->builder, tmp, ctx->f32, "");
else if (log_size == 1)
tmp = LLVMBuildFPExt(ctx->builder, tmp, ctx->f32, "");
loads[chan] = ac_to_integer(ctx, tmp);
}
}
} else if (format == AC_FETCH_FORMAT_UINT) {
if (log_size != 2) {
for (unsigned chan = 0; chan < num_channels; ++chan)
loads[chan] = LLVMBuildZExt(ctx->builder, loads[chan], ctx->i32, "");
}
} else if (format == AC_FETCH_FORMAT_SINT) {
if (log_size != 2) {
for (unsigned chan = 0; chan < num_channels; ++chan)
loads[chan] = LLVMBuildSExt(ctx->builder, loads[chan], ctx->i32, "");
}
} else {
bool unsign = format == AC_FETCH_FORMAT_UNORM || format == AC_FETCH_FORMAT_USCALED ||
format == AC_FETCH_FORMAT_UINT;
for (unsigned chan = 0; chan < num_channels; ++chan) {
if (unsign) {
tmp = LLVMBuildUIToFP(ctx->builder, loads[chan], ctx->f32, "");
} else {
tmp = LLVMBuildSIToFP(ctx->builder, loads[chan], ctx->f32, "");
}
LLVMValueRef scale = NULL;
if (format == AC_FETCH_FORMAT_FIXED) {
assert(log_size == 2);
scale = LLVMConstReal(ctx->f32, 1.0 / 0x10000);
} else if (format == AC_FETCH_FORMAT_UNORM) {
unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(loads[chan]));
scale = LLVMConstReal(ctx->f32, 1.0 / (((uint64_t)1 << bits) - 1));
} else if (format == AC_FETCH_FORMAT_SNORM) {
unsigned bits = LLVMGetIntTypeWidth(LLVMTypeOf(loads[chan]));
scale = LLVMConstReal(ctx->f32, 1.0 / (((uint64_t)1 << (bits - 1)) - 1));
}
if (scale)
tmp = LLVMBuildFMul(ctx->builder, tmp, scale, "");
if (format == AC_FETCH_FORMAT_SNORM) {
/* Clamp to [-1, 1] */
LLVMValueRef neg_one = LLVMConstReal(ctx->f32, -1.0);
LLVMValueRef clamp = LLVMBuildFCmp(ctx->builder, LLVMRealULT, tmp, neg_one, "");
tmp = LLVMBuildSelect(ctx->builder, clamp, neg_one, tmp, "");
}
loads[chan] = ac_to_integer(ctx, tmp);
}
}
while (num_channels < 4) {
if (format == AC_FETCH_FORMAT_UINT || format == AC_FETCH_FORMAT_SINT) {
loads[num_channels] = num_channels == 3 ? ctx->i32_1 : ctx->i32_0;
} else {
loads[num_channels] = ac_to_integer(ctx, num_channels == 3 ? ctx->f32_1 : ctx->f32_0);
}
num_channels++;
}
if (reverse) {
tmp = loads[0];
loads[0] = loads[2];
loads[2] = tmp;
}
return ac_build_gather_values(ctx, loads, 4);
}
void ac_build_buffer_store_short(struct ac_llvm_context *ctx, LLVMValueRef rsrc,
LLVMValueRef vdata, LLVMValueRef voffset, LLVMValueRef soffset,
unsigned cache_policy)
{
vdata = LLVMBuildBitCast(ctx->builder, vdata, ctx->i16, "");
ac_build_buffer_store_common(ctx, rsrc, vdata, NULL, voffset, soffset, cache_policy, false);
}
void ac_build_buffer_store_byte(struct ac_llvm_context *ctx, LLVMValueRef rsrc, LLVMValueRef vdata,
LLVMValueRef voffset, LLVMValueRef soffset, unsigned cache_policy)
{
vdata = LLVMBuildBitCast(ctx->builder, vdata, ctx->i8, "");
ac_build_buffer_store_common(ctx, rsrc, vdata, NULL, voffset, soffset, cache_policy, false);
}
/**
* Set range metadata on an instruction. This can only be used on load and
* call instructions. If you know an instruction can only produce the values
* 0, 1, 2, you would do set_range_metadata(value, 0, 3);
* \p lo is the minimum value inclusive.
* \p hi is the maximum value exclusive.
*/
void ac_set_range_metadata(struct ac_llvm_context *ctx, LLVMValueRef value, unsigned lo,
unsigned hi)
{
LLVMValueRef range_md, md_args[2];
LLVMTypeRef type = LLVMTypeOf(value);
LLVMContextRef context = LLVMGetTypeContext(type);
md_args[0] = LLVMConstInt(type, lo, false);
md_args[1] = LLVMConstInt(type, hi, false);
range_md = LLVMMDNodeInContext(context, md_args, 2);
LLVMSetMetadata(value, ctx->range_md_kind, range_md);
}
LLVMValueRef ac_get_thread_id(struct ac_llvm_context *ctx)
{
return ac_build_mbcnt(ctx, LLVMConstInt(ctx->iN_wavemask, ~0ull, 0));
}
/*
* AMD GCN implements derivatives using the local data store (LDS)
* All writes to the LDS happen in all executing threads at
* the same time. TID is the Thread ID for the current
* thread and is a value between 0 and 63, representing
* the thread's position in the wavefront.
*
* For the pixel shader threads are grouped into quads of four pixels.
* The TIDs of the pixels of a quad are:
*
* +------+------+
* |4n + 0|4n + 1|
* +------+------+
* |4n + 2|4n + 3|
* +------+------+
*
* So, masking the TID with 0xfffffffc yields the TID of the top left pixel
* of the quad, masking with 0xfffffffd yields the TID of the top pixel of
* the current pixel's column, and masking with 0xfffffffe yields the TID
* of the left pixel of the current pixel's row.
*
* Adding 1 yields the TID of the pixel to the right of the left pixel, and
* adding 2 yields the TID of the pixel below the top pixel.
*/
LLVMValueRef ac_build_ddxy(struct ac_llvm_context *ctx, uint32_t mask, int idx, LLVMValueRef val)
{
unsigned tl_lanes[4], trbl_lanes[4];
char name[32], type[8];
LLVMValueRef tl, trbl;
LLVMTypeRef result_type;
LLVMValueRef result;
result_type = ac_to_float_type(ctx, LLVMTypeOf(val));
if (result_type == ctx->f16)
val = LLVMBuildZExt(ctx->builder, val, ctx->i32, "");
else if (result_type == ctx->v2f16)
val = LLVMBuildBitCast(ctx->builder, val, ctx->i32, "");
for (unsigned i = 0; i < 4; ++i) {
tl_lanes[i] = i & mask;
trbl_lanes[i] = (i & mask) + idx;
}
tl = ac_build_quad_swizzle(ctx, val, tl_lanes[0], tl_lanes[1], tl_lanes[2], tl_lanes[3]);
trbl =
ac_build_quad_swizzle(ctx, val, trbl_lanes[0], trbl_lanes[1], trbl_lanes[2], trbl_lanes[3]);
if (result_type == ctx->f16) {
tl = LLVMBuildTrunc(ctx->builder, tl, ctx->i16, "");
trbl = LLVMBuildTrunc(ctx->builder, trbl, ctx->i16, "");
}
tl = LLVMBuildBitCast(ctx->builder, tl, result_type, "");
trbl = LLVMBuildBitCast(ctx->builder, trbl, result_type, "");
result = LLVMBuildFSub(ctx->builder, trbl, tl, "");
ac_build_type_name_for_intr(result_type, type, sizeof(type));
snprintf(name, sizeof(name), "llvm.amdgcn.wqm.%s", type);
return ac_build_intrinsic(ctx, name, result_type, &result, 1, 0);
}
void ac_build_sendmsg(struct ac_llvm_context *ctx, uint32_t msg, LLVMValueRef wave_id)
{
LLVMValueRef args[2];
args[0] = LLVMConstInt(ctx->i32, msg, false);
args[1] = wave_id;
ac_build_intrinsic(ctx, "llvm.amdgcn.s.sendmsg", ctx->voidt, args, 2, 0);
}
LLVMValueRef ac_build_imsb(struct ac_llvm_context *ctx, LLVMValueRef arg, LLVMTypeRef dst_type)
{
LLVMValueRef msb =
ac_build_intrinsic(ctx, "llvm.amdgcn.sffbh.i32", dst_type, &arg, 1, AC_FUNC_ATTR_READNONE);
/* The HW returns the last bit index from MSB, but NIR/TGSI wants
* the index from LSB. Invert it by doing "31 - msb". */
msb = LLVMBuildSub(ctx->builder, LLVMConstInt(ctx->i32, 31, false), msb, "");
LLVMValueRef all_ones = LLVMConstInt(ctx->i32, -1, true);
LLVMValueRef cond =
LLVMBuildOr(ctx->builder, LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg, ctx->i32_0, ""),
LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg, all_ones, ""), "");
return LLVMBuildSelect(ctx->builder, cond, all_ones, msb, "");
}
LLVMValueRef ac_build_umsb(struct ac_llvm_context *ctx, LLVMValueRef arg, LLVMTypeRef dst_type,
bool rev)
{
const char *intrin_name;
LLVMTypeRef type;
LLVMValueRef highest_bit;
LLVMValueRef zero;
unsigned bitsize;
bitsize = ac_get_elem_bits(ctx, LLVMTypeOf(arg));
switch (bitsize) {
case 64:
intrin_name = "llvm.ctlz.i64";
type = ctx->i64;
highest_bit = LLVMConstInt(ctx->i64, 63, false);
zero = ctx->i64_0;
break;
case 32:
intrin_name = "llvm.ctlz.i32";
type = ctx->i32;
highest_bit = LLVMConstInt(ctx->i32, 31, false);
zero = ctx->i32_0;
break;
case 16:
intrin_name = "llvm.ctlz.i16";
type = ctx->i16;
highest_bit = LLVMConstInt(ctx->i16, 15, false);
zero = ctx->i16_0;
break;
case 8:
intrin_name = "llvm.ctlz.i8";
type = ctx->i8;
highest_bit = LLVMConstInt(ctx->i8, 7, false);
zero = ctx->i8_0;
break;
default:
unreachable(!"invalid bitsize");
break;
}
LLVMValueRef params[2] = {
arg,
ctx->i1true,
};
LLVMValueRef msb = ac_build_intrinsic(ctx, intrin_name, type, params, 2, AC_FUNC_ATTR_READNONE);
if (!rev) {
/* The HW returns the last bit index from MSB, but TGSI/NIR wants
* the index from LSB. Invert it by doing "31 - msb". */
msb = LLVMBuildSub(ctx->builder, highest_bit, msb, "");
}
if (bitsize == 64) {
msb = LLVMBuildTrunc(ctx->builder, msb, ctx->i32, "");
} else if (bitsize < 32) {
msb = LLVMBuildSExt(ctx->builder, msb, ctx->i32, "");
}
/* check for zero */
return LLVMBuildSelect(ctx->builder, LLVMBuildICmp(ctx->builder, LLVMIntEQ, arg, zero, ""),
LLVMConstInt(ctx->i32, -1, true), msb, "");
}
LLVMValueRef ac_build_fmin(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
{
char name[64], type[64];
ac_build_type_name_for_intr(LLVMTypeOf(a), type, sizeof(type));
snprintf(name, sizeof(name), "llvm.minnum.%s", type);
LLVMValueRef args[2] = {a, b};
return ac_build_intrinsic(ctx, name, LLVMTypeOf(a), args, 2, AC_FUNC_ATTR_READNONE);
}
LLVMValueRef ac_build_fmax(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
{
char name[64], type[64];
ac_build_type_name_for_intr(LLVMTypeOf(a), type, sizeof(type));
snprintf(name, sizeof(name), "llvm.maxnum.%s", type);
LLVMValueRef args[2] = {a, b};
return ac_build_intrinsic(ctx, name, LLVMTypeOf(a), args, 2, AC_FUNC_ATTR_READNONE);
}
LLVMValueRef ac_build_imin(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
{
LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSLE, a, b, "");
return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
}
LLVMValueRef ac_build_imax(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
{
LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntSGT, a, b, "");
return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
}
LLVMValueRef ac_build_umin(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
{
LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntULE, a, b, "");
return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
}
LLVMValueRef ac_build_umax(struct ac_llvm_context *ctx, LLVMValueRef a, LLVMValueRef b)
{
LLVMValueRef cmp = LLVMBuildICmp(ctx->builder, LLVMIntUGE, a, b, "");
return LLVMBuildSelect(ctx->builder, cmp, a, b, "");
}
LLVMValueRef ac_build_clamp(struct ac_llvm_context *ctx, LLVMValueRef value)
{
LLVMTypeRef t = LLVMTypeOf(value);
return ac_build_fmin(ctx, ac_build_fmax(ctx, value, LLVMConstReal(t, 0.0)),
LLVMConstReal(t, 1.0));
}
void ac_build_export(struct ac_llvm_context *ctx, struct ac_export_args *a)
{
LLVMValueRef args[9];
args[0] = LLVMConstInt(ctx->i32, a->target, 0);
args[1] = LLVMConstInt(ctx->i32, a->enabled_channels, 0);
if (a->compr) {
assert(ctx->gfx_level < GFX11);
args[2] = LLVMBuildBitCast(ctx->builder, a->out[0], ctx->v2i16, "");
args[3] = LLVMBuildBitCast(ctx->builder, a->out[1], ctx->v2i16, "");
args[4] = LLVMConstInt(ctx->i1, a->done, 0);
args[5] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
ac_build_intrinsic(ctx, "llvm.amdgcn.exp.compr.v2i16", ctx->voidt, args, 6, 0);
} else {
args[2] = LLVMBuildBitCast(ctx->builder, a->out[0], ctx->f32, "");
args[3] = LLVMBuildBitCast(ctx->builder, a->out[1], ctx->f32, "");
args[4] = LLVMBuildBitCast(ctx->builder, a->out[2], ctx->f32, "");
args[5] = LLVMBuildBitCast(ctx->builder, a->out[3], ctx->f32, "");
args[6] = LLVMConstInt(ctx->i1, a->done, 0);
args[7] = LLVMConstInt(ctx->i1, a->valid_mask, 0);
ac_build_intrinsic(ctx, "llvm.amdgcn.exp.f32", ctx->voidt, args, 8, 0);
}
}
void ac_build_export_null(struct ac_llvm_context *ctx, bool uses_discard)
{
struct ac_export_args args;
/* Gfx10+ doesn't need to export anything if we don't need to export the EXEC mask
* for discard.
*/
if (ctx->gfx_level >= GFX10 && !uses_discard)
return;
args.enabled_channels = 0x0; /* enabled channels */
args.valid_mask = 1; /* whether the EXEC mask is valid */
args.done = 1; /* DONE bit */
/* Gfx11 doesn't support null exports, and mrt0 should be exported instead. */
args.target = ctx->gfx_level >= GFX11 ? V_008DFC_SQ_EXP_MRT : V_008DFC_SQ_EXP_NULL;
args.compr = 0; /* COMPR flag (0 = 32-bit export) */
args.out[0] = LLVMGetUndef(ctx->f32); /* R */
args.out[1] = LLVMGetUndef(ctx->f32); /* G */
args.out[2] = LLVMGetUndef(ctx->f32); /* B */
args.out[3] = LLVMGetUndef(ctx->f32); /* A */
ac_build_export(ctx, &args);
}
static unsigned ac_num_coords(enum ac_image_dim dim)
{
switch (dim) {
case ac_image_1d:
return 1;
case ac_image_2d:
case ac_image_1darray:
return 2;
case ac_image_3d:
case ac_image_cube:
case ac_image_2darray:
case ac_image_2dmsaa:
return 3;
case ac_image_2darraymsaa:
return 4;
default:
unreachable("ac_num_coords: bad dim");
}
}
static unsigned ac_num_derivs(enum ac_image_dim dim)
{
switch (dim) {
case ac_image_1d:
case ac_image_1darray:
return 2;
case ac_image_2d:
case ac_image_2darray:
case ac_image_cube:
return 4;
case ac_image_3d:
return 6;
case ac_image_2dmsaa:
case ac_image_2darraymsaa:
default:
unreachable("derivatives not supported");
}
}
static const char *get_atomic_name(enum ac_atomic_op op)
{
switch (op) {
case ac_atomic_swap:
return "swap";
case ac_atomic_add:
return "add";
case ac_atomic_sub:
return "sub";
case ac_atomic_smin:
return "smin";
case ac_atomic_umin:
return "umin";
case ac_atomic_smax:
return "smax";
case ac_atomic_umax:
return "umax";
case ac_atomic_and:
return "and";
case ac_atomic_or:
return "or";
case ac_atomic_xor:
return "xor";
case ac_atomic_inc_wrap:
return "inc";
case ac_atomic_dec_wrap:
return "dec";
case ac_atomic_fmin:
return "fmin";
case ac_atomic_fmax:
return "fmax";
}
unreachable("bad atomic op");
}
LLVMValueRef ac_build_image_opcode(struct ac_llvm_context *ctx, struct ac_image_args *a)
{
const char *overload[3] = {"", "", ""};
unsigned num_overloads = 0;
LLVMValueRef args[18];
unsigned num_args = 0;
enum ac_image_dim dim = a->dim;
assert(!a->lod || a->lod == ctx->i32_0 || a->lod == ctx->f32_0 || !a->level_zero);
assert((a->opcode != ac_image_get_resinfo && a->opcode != ac_image_load_mip &&
a->opcode != ac_image_store_mip) ||
a->lod);
assert(a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
(!a->compare && !a->offset));
assert((a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
a->opcode == ac_image_get_lod) ||
!a->bias);
assert((a->bias ? 1 : 0) + (a->lod ? 1 : 0) + (a->level_zero ? 1 : 0) + (a->derivs[0] ? 1 : 0) <=
1);
assert((a->min_lod ? 1 : 0) + (a->lod ? 1 : 0) + (a->level_zero ? 1 : 0) <= 1);
assert(!a->d16 || (ctx->gfx_level >= GFX8 && a->opcode != ac_image_atomic &&
a->opcode != ac_image_atomic_cmpswap && a->opcode != ac_image_get_lod &&
a->opcode != ac_image_get_resinfo));
assert(!a->a16 || ctx->gfx_level >= GFX9);
assert(!a->derivs[0] || a->g16 == a->a16 || ctx->gfx_level >= GFX10);
assert(!a->offset ||
ac_get_elem_bits(ctx, LLVMTypeOf(a->offset)) == 32);
assert(!a->bias ||
ac_get_elem_bits(ctx, LLVMTypeOf(a->bias)) == 32);
assert(!a->compare ||
ac_get_elem_bits(ctx, LLVMTypeOf(a->compare)) == 32);
assert(!a->derivs[0] ||
((!a->g16 || ac_get_elem_bits(ctx, LLVMTypeOf(a->derivs[0])) == 16) &&
(a->g16 || ac_get_elem_bits(ctx, LLVMTypeOf(a->derivs[0])) == 32)));
assert(!a->coords[0] ||
((!a->a16 || ac_get_elem_bits(ctx, LLVMTypeOf(a->coords[0])) == 16) &&
(a->a16 || ac_get_elem_bits(ctx, LLVMTypeOf(a->coords[0])) == 32)));
assert(!a->lod ||
((a->opcode != ac_image_get_resinfo || ac_get_elem_bits(ctx, LLVMTypeOf(a->lod))) &&
(a->opcode == ac_image_get_resinfo ||
ac_get_elem_bits(ctx, LLVMTypeOf(a->lod)) ==
ac_get_elem_bits(ctx, LLVMTypeOf(a->coords[0])))));
assert(!a->min_lod ||
ac_get_elem_bits(ctx, LLVMTypeOf(a->min_lod)) ==
ac_get_elem_bits(ctx, LLVMTypeOf(a->coords[0])));
if (a->opcode == ac_image_get_lod) {
switch (dim) {
case ac_image_1darray:
dim = ac_image_1d;
break;
case ac_image_2darray:
case ac_image_cube:
dim = ac_image_2d;
break;
default:
break;
}
}
bool sample = a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
a->opcode == ac_image_get_lod;
bool atomic = a->opcode == ac_image_atomic || a->opcode == ac_image_atomic_cmpswap;
bool load = a->opcode == ac_image_sample || a->opcode == ac_image_gather4 ||
a->opcode == ac_image_load || a->opcode == ac_image_load_mip;
LLVMTypeRef coord_type = sample ? (a->a16 ? ctx->f16 : ctx->f32) : (a->a16 ? ctx->i16 : ctx->i32);
uint8_t dmask = a->dmask;
LLVMTypeRef data_type;
char data_type_str[32];
if (atomic) {
data_type = LLVMTypeOf(a->data[0]);
} else if (a->opcode == ac_image_store || a->opcode == ac_image_store_mip) {
/* Image stores might have been shrinked using the format. */
data_type = LLVMTypeOf(a->data[0]);
dmask = (1 << ac_get_llvm_num_components(a->data[0])) - 1;
} else {
data_type = a->d16 ? ctx->v4f16 : ctx->v4f32;
}
if (a->tfe) {
data_type = LLVMStructTypeInContext(
ctx->context, (LLVMTypeRef[]){data_type, ctx->i32}, 2, false);
}
if (atomic || a->opcode == ac_image_store || a->opcode == ac_image_store_mip) {
args[num_args++] = a->data[0];
if (a->opcode == ac_image_atomic_cmpswap)
args[num_args++] = a->data[1];
}
if (!atomic)
args[num_args++] = LLVMConstInt(ctx->i32, dmask, false);
if (a->offset)
args[num_args++] = ac_to_integer(ctx, a->offset);
if (a->bias) {
args[num_args++] = ac_to_float(ctx, a->bias);
overload[num_overloads++] = ".f32";
}
if (a->compare)
args[num_args++] = ac_to_float(ctx, a->compare);
if (a->derivs[0]) {
unsigned count = ac_num_derivs(dim);
for (unsigned i = 0; i < count; ++i)
args[num_args++] = ac_to_float(ctx, a->derivs[i]);
overload[num_overloads++] = a->g16 ? ".f16" : ".f32";
}
unsigned num_coords = a->opcode != ac_image_get_resinfo ? ac_num_coords(dim) : 0;
for (unsigned i = 0; i < num_coords; ++i)
args[num_args++] = LLVMBuildBitCast(ctx->builder, a->coords[i], coord_type, "");
if (a->lod)
args[num_args++] = LLVMBuildBitCast(ctx->builder, a->lod, coord_type, "");
if (a->min_lod)
args[num_args++] = LLVMBuildBitCast(ctx->builder, a->min_lod, coord_type, "");
overload[num_overloads++] = sample ? (a->a16 ? ".f16" : ".f32") : (a->a16 ? ".i16" : ".i32");
args[num_args++] = a->resource;
if (sample) {
args[num_args++] = a->sampler;
args[num_args++] = LLVMConstInt(ctx->i1, a->unorm, false);
}
args[num_args++] = a->tfe ? ctx->i32_1 : ctx->i32_0; /* texfailctrl */
args[num_args++] = LLVMConstInt(
ctx->i32, load ? get_load_cache_policy(ctx, a->cache_policy) : a->cache_policy, false);
const char *name;
const char *atomic_subop = "";
switch (a->opcode) {
case ac_image_sample:
name = "sample";
break;
case ac_image_gather4:
name = "gather4";
break;
case ac_image_load:
name = "load";
break;
case ac_image_load_mip:
name = "load.mip";
break;
case ac_image_store:
name = "store";
break;
case ac_image_store_mip:
name = "store.mip";
break;
case ac_image_atomic:
name = "atomic.";
atomic_subop = get_atomic_name(a->atomic);
break;
case ac_image_atomic_cmpswap:
name = "atomic.";
atomic_subop = "cmpswap";
break;
case ac_image_get_lod:
name = "getlod";
break;
case ac_image_get_resinfo:
name = "getresinfo";
break;
default:
unreachable("invalid image opcode");
}
const char *dimname;
switch (dim) {
case ac_image_1d:
dimname = "1d";
break;
case ac_image_2d:
dimname = "2d";
break;
case ac_image_3d:
dimname = "3d";
break;
case ac_image_cube:
dimname = "cube";
break;
case ac_image_1darray:
dimname = "1darray";
break;
case ac_image_2darray:
dimname = "2darray";
break;
case ac_image_2dmsaa:
dimname = "2dmsaa";
break;
case ac_image_2darraymsaa:
dimname = "2darraymsaa";
break;
default:
unreachable("invalid dim");
}
ac_build_type_name_for_intr(data_type, data_type_str, sizeof(data_type_str));
bool lod_suffix = a->lod && (a->opcode == ac_image_sample || a->opcode == ac_image_gather4);
char intr_name[96];
snprintf(intr_name, sizeof(intr_name),
"llvm.amdgcn.image.%s%s" /* base name */
"%s%s%s%s" /* sample/gather modifiers */
".%s.%s%s%s%s", /* dimension and type overloads */
name, atomic_subop, a->compare ? ".c" : "",
a->bias ? ".b" : lod_suffix ? ".l" : a->derivs[0] ? ".d" : a->level_zero ? ".lz" : "",
a->min_lod ? ".cl" : "", a->offset ? ".o" : "", dimname,
data_type_str, overload[0], overload[1], overload[2]);
LLVMTypeRef retty;
if (a->opcode == ac_image_store || a->opcode == ac_image_store_mip)
retty = ctx->voidt;
else
retty = data_type;
LLVMValueRef result = ac_build_intrinsic(ctx, intr_name, retty, args, num_args, a->attributes);
if (a->tfe) {
LLVMValueRef texel = LLVMBuildExtractValue(ctx->builder, result, 0, "");
LLVMValueRef code = LLVMBuildExtractValue(ctx->builder, result, 1, "");
result = ac_build_concat(ctx, texel, ac_to_float(ctx, code));
}
if (!sample && !atomic && retty != ctx->voidt)
result = ac_to_integer(ctx, result);
return result;
}
LLVMValueRef ac_build_image_get_sample_count(struct ac_llvm_context *ctx, LLVMValueRef rsrc)
{
LLVMValueRef samples;
/* Read the samples from the descriptor directly.
* Hardware doesn't have any instruction for this.
*/
samples = LLVMBuildExtractElement(ctx->builder, rsrc, LLVMConstInt(ctx->i32, 3, 0), "");
samples = LLVMBuildLShr(ctx->builder, samples, LLVMConstInt(ctx->i32, 16, 0), "");
samples = LLVMBuildAnd(ctx->builder, samples, LLVMConstInt(ctx->i32, 0xf, 0), "");
samples = LLVMBuildShl(ctx->builder, ctx->i32_1, samples, "");
return samples;
}
LLVMValueRef ac_build_cvt_pkrtz_f16(struct ac_llvm_context *ctx, LLVMValueRef args[2])
{
return ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pkrtz", ctx->v2f16, args, 2,
AC_FUNC_ATTR_READNONE);
}
LLVMValueRef ac_build_cvt_pknorm_i16(struct ac_llvm_context *ctx, LLVMValueRef args[2])
{
LLVMValueRef res = ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.i16", ctx->v2i16, args, 2,
AC_FUNC_ATTR_READNONE);
return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
}
LLVMValueRef ac_build_cvt_pknorm_u16(struct ac_llvm_context *ctx, LLVMValueRef args[2])
{
LLVMValueRef res = ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pknorm.u16", ctx->v2i16, args, 2,
AC_FUNC_ATTR_READNONE);
return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
}
LLVMValueRef ac_build_cvt_pknorm_i16_f16(struct ac_llvm_context *ctx,
LLVMValueRef args[2])
{
LLVMTypeRef param_types[] = {ctx->f16, ctx->f16};
LLVMTypeRef calltype = LLVMFunctionType(ctx->i32, param_types, 2, false);
LLVMValueRef code = LLVMConstInlineAsm(calltype,
ctx->gfx_level >= GFX11 ?
"v_cvt_pk_norm_i16_f16 $0, $1, $2" :
"v_cvt_pknorm_i16_f16 $0, $1, $2",
"=v,v,v", false, false);
return LLVMBuildCall2(ctx->builder, calltype, code, args, 2, "");
}
LLVMValueRef ac_build_cvt_pknorm_u16_f16(struct ac_llvm_context *ctx,
LLVMValueRef args[2])
{
LLVMTypeRef param_types[] = {ctx->f16, ctx->f16};
LLVMTypeRef calltype = LLVMFunctionType(ctx->i32, param_types, 2, false);
LLVMValueRef code = LLVMConstInlineAsm(calltype,
ctx->gfx_level >= GFX11 ?
"v_cvt_pk_norm_u16_f16 $0, $1, $2" :
"v_cvt_pknorm_u16_f16 $0, $1, $2",
"=v,v,v", false, false);
return LLVMBuildCall2(ctx->builder, calltype, code, args, 2, "");
}
/* The 8-bit and 10-bit clamping is for HW workarounds. */
LLVMValueRef ac_build_cvt_pk_i16(struct ac_llvm_context *ctx, LLVMValueRef args[2], unsigned bits,
bool hi)
{
assert(bits == 8 || bits == 10 || bits == 16);
LLVMValueRef max_rgb = LLVMConstInt(ctx->i32, bits == 8 ? 127 : bits == 10 ? 511 : 32767, 0);
LLVMValueRef min_rgb = LLVMConstInt(ctx->i32, bits == 8 ? -128 : bits == 10 ? -512 : -32768, 0);
LLVMValueRef max_alpha = bits != 10 ? max_rgb : ctx->i32_1;
LLVMValueRef min_alpha = bits != 10 ? min_rgb : LLVMConstInt(ctx->i32, -2, 0);
/* Clamp. */
if (bits != 16) {
for (int i = 0; i < 2; i++) {
bool alpha = hi && i == 1;
args[i] = ac_build_imin(ctx, args[i], alpha ? max_alpha : max_rgb);
args[i] = ac_build_imax(ctx, args[i], alpha ? min_alpha : min_rgb);
}
}
LLVMValueRef res =
ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.i16", ctx->v2i16, args, 2, AC_FUNC_ATTR_READNONE);
return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
}
/* The 8-bit and 10-bit clamping is for HW workarounds. */
LLVMValueRef ac_build_cvt_pk_u16(struct ac_llvm_context *ctx, LLVMValueRef args[2], unsigned bits,
bool hi)
{
assert(bits == 8 || bits == 10 || bits == 16);
LLVMValueRef max_rgb = LLVMConstInt(ctx->i32, bits == 8 ? 255 : bits == 10 ? 1023 : 65535, 0);
LLVMValueRef max_alpha = bits != 10 ? max_rgb : LLVMConstInt(ctx->i32, 3, 0);
/* Clamp. */
if (bits != 16) {
for (int i = 0; i < 2; i++) {
bool alpha = hi && i == 1;
args[i] = ac_build_umin(ctx, args[i], alpha ? max_alpha : max_rgb);
}
}
LLVMValueRef res =
ac_build_intrinsic(ctx, "llvm.amdgcn.cvt.pk.u16", ctx->v2i16, args, 2, AC_FUNC_ATTR_READNONE);
return LLVMBuildBitCast(ctx->builder, res, ctx->i32, "");
}
LLVMValueRef ac_build_wqm_vote(struct ac_llvm_context *ctx, LLVMValueRef i1)
{
return ac_build_intrinsic(ctx, "llvm.amdgcn.wqm.vote", ctx->i1, &i1, 1, AC_FUNC_ATTR_READNONE);
}
void ac_build_kill_if_false(struct ac_llvm_context *ctx, LLVMValueRef i1)
{
ac_build_intrinsic(ctx, "llvm.amdgcn.kill", ctx->voidt, &i1, 1, 0);
}
LLVMValueRef ac_build_bfe(struct ac_llvm_context *ctx, LLVMValueRef input, LLVMValueRef offset,
LLVMValueRef width, bool is_signed)
{
LLVMValueRef args[] = {
input,
offset,
width,
};
return ac_build_intrinsic(ctx, is_signed ? "llvm.amdgcn.sbfe.i32" : "llvm.amdgcn.ubfe.i32",
ctx->i32, args, 3, AC_FUNC_ATTR_READNONE);
}
LLVMValueRef ac_build_imad(struct ac_llvm_context *ctx, LLVMValueRef s0, LLVMValueRef s1,
LLVMValueRef s2)
{
return LLVMBuildAdd(ctx->builder, LLVMBuildMul(ctx->builder, s0, s1, ""), s2, "");
}
LLVMValueRef ac_build_fmad(struct ac_llvm_context *ctx, LLVMValueRef s0, LLVMValueRef s1,
LLVMValueRef s2)
{
/* FMA is better on GFX10, because it has FMA units instead of MUL-ADD units. */
if (ctx->gfx_level >= GFX10) {
return ac_build_intrinsic(ctx, "llvm.fma.f32", ctx->f32, (LLVMValueRef[]){s0, s1, s2}, 3,
AC_FUNC_ATTR_READNONE);
}
return LLVMBuildFAdd(ctx->builder, LLVMBuildFMul(ctx->builder, s0, s1, ""), s2, "");
}
void ac_build_waitcnt(struct ac_llvm_context *ctx, unsigned wait_flags)
{
if (!wait_flags)
return;
unsigned expcnt = 7;
unsigned lgkmcnt = 63;
unsigned vmcnt = ctx->gfx_level >= GFX9 ? 63 : 15;
unsigned vscnt = 63;
if (wait_flags & AC_WAIT_EXP)
expcnt = 0;