blob: c532bbd47567aa2a468a49d042753a127985ef7c [file] [log] [blame]
/*
* Copyright © 2018 Valve Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include "aco_builder.h"
#include "aco_ir.h"
#include "util/half_float.h"
#include "util/memstream.h"
#include <algorithm>
#include <array>
#include <vector>
namespace aco {
#ifndef NDEBUG
void
perfwarn(Program* program, bool cond, const char* msg, Instruction* instr)
{
if (cond) {
char* out;
size_t outsize;
struct u_memstream mem;
u_memstream_open(&mem, &out, &outsize);
FILE* const memf = u_memstream_get(&mem);
fprintf(memf, "%s: ", msg);
aco_print_instr(program->gfx_level, instr, memf);
u_memstream_close(&mem);
aco_perfwarn(program, out);
free(out);
if (debug_flags & DEBUG_PERFWARN)
exit(1);
}
}
#endif
/**
* The optimizer works in 4 phases:
* (1) The first pass collects information for each ssa-def,
* propagates reg->reg operands of the same type, inline constants
* and neg/abs input modifiers.
* (2) The second pass combines instructions like mad, omod, clamp and
* propagates sgpr's on VALU instructions.
* This pass depends on information collected in the first pass.
* (3) The third pass goes backwards, and selects instructions,
* i.e. decides if a mad instruction is profitable and eliminates dead code.
* (4) The fourth pass cleans up the sequence: literals get applied and dead
* instructions are removed from the sequence.
*/
struct mad_info {
aco_ptr<Instruction> add_instr;
uint32_t mul_temp_id;
uint16_t literal_mask;
mad_info(aco_ptr<Instruction> instr, uint32_t id)
: add_instr(std::move(instr)), mul_temp_id(id), literal_mask(0)
{}
};
enum Label {
label_vec = 1 << 0,
label_constant_32bit = 1 << 1,
/* label_{abs,neg,mul,omod2,omod4,omod5,clamp} are used for both 16 and
* 32-bit operations but this shouldn't cause any issues because we don't
* look through any conversions */
label_abs = 1 << 2,
label_neg = 1 << 3,
label_mul = 1 << 4,
label_temp = 1 << 5,
label_literal = 1 << 6,
label_mad = 1 << 7,
label_omod2 = 1 << 8,
label_omod4 = 1 << 9,
label_omod5 = 1 << 10,
label_clamp = 1 << 12,
label_undefined = 1 << 14,
label_vcc = 1 << 15,
label_b2f = 1 << 16,
label_add_sub = 1 << 17,
label_bitwise = 1 << 18,
label_minmax = 1 << 19,
label_vopc = 1 << 20,
label_uniform_bool = 1 << 21,
label_constant_64bit = 1 << 22,
label_uniform_bitwise = 1 << 23,
label_scc_invert = 1 << 24,
label_scc_needed = 1 << 26,
label_b2i = 1 << 27,
label_fcanonicalize = 1 << 28,
label_constant_16bit = 1 << 29,
label_usedef = 1 << 30, /* generic label */
label_vop3p = 1ull << 31, /* 1ull to prevent sign extension */
label_canonicalized = 1ull << 32,
label_extract = 1ull << 33,
label_insert = 1ull << 34,
label_dpp16 = 1ull << 35,
label_dpp8 = 1ull << 36,
label_f2f32 = 1ull << 37,
label_f2f16 = 1ull << 38,
label_split = 1ull << 39,
};
static constexpr uint64_t instr_usedef_labels =
label_vec | label_mul | label_mad | label_add_sub | label_vop3p | label_bitwise |
label_uniform_bitwise | label_minmax | label_vopc | label_usedef | label_extract | label_dpp16 |
label_dpp8 | label_f2f32;
static constexpr uint64_t instr_mod_labels =
label_omod2 | label_omod4 | label_omod5 | label_clamp | label_insert | label_f2f16;
static constexpr uint64_t instr_labels = instr_usedef_labels | instr_mod_labels | label_split;
static constexpr uint64_t temp_labels = label_abs | label_neg | label_temp | label_vcc | label_b2f |
label_uniform_bool | label_scc_invert | label_b2i |
label_fcanonicalize;
static constexpr uint32_t val_labels =
label_constant_32bit | label_constant_64bit | label_constant_16bit | label_literal;
static_assert((instr_labels & temp_labels) == 0, "labels cannot intersect");
static_assert((instr_labels & val_labels) == 0, "labels cannot intersect");
static_assert((temp_labels & val_labels) == 0, "labels cannot intersect");
struct ssa_info {
uint64_t label;
union {
uint32_t val;
Temp temp;
Instruction* instr;
};
ssa_info() : label(0) {}
void add_label(Label new_label)
{
/* Since all the instr_usedef_labels use instr for the same thing
* (indicating the defining instruction), there is usually no need to
* clear any other instr labels. */
if (new_label & instr_usedef_labels)
label &= ~(instr_mod_labels | temp_labels | val_labels); /* instr, temp and val alias */
if (new_label & instr_mod_labels) {
label &= ~instr_labels;
label &= ~(temp_labels | val_labels); /* instr, temp and val alias */
}
if (new_label & temp_labels) {
label &= ~temp_labels;
label &= ~(instr_labels | val_labels); /* instr, temp and val alias */
}
uint32_t const_labels =
label_literal | label_constant_32bit | label_constant_64bit | label_constant_16bit;
if (new_label & const_labels) {
label &= ~val_labels | const_labels;
label &= ~(instr_labels | temp_labels); /* instr, temp and val alias */
} else if (new_label & val_labels) {
label &= ~val_labels;
label &= ~(instr_labels | temp_labels); /* instr, temp and val alias */
}
label |= new_label;
}
void set_vec(Instruction* vec)
{
add_label(label_vec);
instr = vec;
}
bool is_vec() { return label & label_vec; }
void set_constant(amd_gfx_level gfx_level, uint64_t constant)
{
Operand op16 = Operand::c16(constant);
Operand op32 = Operand::get_const(gfx_level, constant, 4);
add_label(label_literal);
val = constant;
/* check that no upper bits are lost in case of packed 16bit constants */
if (gfx_level >= GFX8 && !op16.isLiteral() &&
op16.constantValue16(true) == ((constant >> 16) & 0xffff))
add_label(label_constant_16bit);
if (!op32.isLiteral())
add_label(label_constant_32bit);
if (Operand::is_constant_representable(constant, 8))
add_label(label_constant_64bit);
if (label & label_constant_64bit) {
val = Operand::c64(constant).constantValue();
if (val != constant)
label &= ~(label_literal | label_constant_16bit | label_constant_32bit);
}
}
bool is_constant(unsigned bits)
{
switch (bits) {
case 8: return label & label_literal;
case 16: return label & label_constant_16bit;
case 32: return label & label_constant_32bit;
case 64: return label & label_constant_64bit;
}
return false;
}
bool is_literal(unsigned bits)
{
bool is_lit = label & label_literal;
switch (bits) {
case 8: return false;
case 16: return is_lit && ~(label & label_constant_16bit);
case 32: return is_lit && ~(label & label_constant_32bit);
case 64: return false;
}
return false;
}
bool is_constant_or_literal(unsigned bits)
{
if (bits == 64)
return label & label_constant_64bit;
else
return label & label_literal;
}
void set_abs(Temp abs_temp)
{
add_label(label_abs);
temp = abs_temp;
}
bool is_abs() { return label & label_abs; }
void set_neg(Temp neg_temp)
{
add_label(label_neg);
temp = neg_temp;
}
bool is_neg() { return label & label_neg; }
void set_neg_abs(Temp neg_abs_temp)
{
add_label((Label)((uint32_t)label_abs | (uint32_t)label_neg));
temp = neg_abs_temp;
}
void set_mul(Instruction* mul)
{
add_label(label_mul);
instr = mul;
}
bool is_mul() { return label & label_mul; }
void set_temp(Temp tmp)
{
add_label(label_temp);
temp = tmp;
}
bool is_temp() { return label & label_temp; }
void set_mad(Instruction* mad, uint32_t mad_info_idx)
{
add_label(label_mad);
mad->pass_flags = mad_info_idx;
instr = mad;
}
bool is_mad() { return label & label_mad; }
void set_omod2(Instruction* mul)
{
add_label(label_omod2);
instr = mul;
}
bool is_omod2() { return label & label_omod2; }
void set_omod4(Instruction* mul)
{
add_label(label_omod4);
instr = mul;
}
bool is_omod4() { return label & label_omod4; }
void set_omod5(Instruction* mul)
{
add_label(label_omod5);
instr = mul;
}
bool is_omod5() { return label & label_omod5; }
void set_clamp(Instruction* med3)
{
add_label(label_clamp);
instr = med3;
}
bool is_clamp() { return label & label_clamp; }
void set_f2f16(Instruction* conv)
{
add_label(label_f2f16);
instr = conv;
}
bool is_f2f16() { return label & label_f2f16; }
void set_undefined() { add_label(label_undefined); }
bool is_undefined() { return label & label_undefined; }
void set_vcc(Temp vcc_val)
{
add_label(label_vcc);
temp = vcc_val;
}
bool is_vcc() { return label & label_vcc; }
void set_b2f(Temp b2f_val)
{
add_label(label_b2f);
temp = b2f_val;
}
bool is_b2f() { return label & label_b2f; }
void set_add_sub(Instruction* add_sub_instr)
{
add_label(label_add_sub);
instr = add_sub_instr;
}
bool is_add_sub() { return label & label_add_sub; }
void set_bitwise(Instruction* bitwise_instr)
{
add_label(label_bitwise);
instr = bitwise_instr;
}
bool is_bitwise() { return label & label_bitwise; }
void set_uniform_bitwise() { add_label(label_uniform_bitwise); }
bool is_uniform_bitwise() { return label & label_uniform_bitwise; }
void set_minmax(Instruction* minmax_instr)
{
add_label(label_minmax);
instr = minmax_instr;
}
bool is_minmax() { return label & label_minmax; }
void set_vopc(Instruction* vopc_instr)
{
add_label(label_vopc);
instr = vopc_instr;
}
bool is_vopc() { return label & label_vopc; }
void set_scc_needed() { add_label(label_scc_needed); }
bool is_scc_needed() { return label & label_scc_needed; }
void set_scc_invert(Temp scc_inv)
{
add_label(label_scc_invert);
temp = scc_inv;
}
bool is_scc_invert() { return label & label_scc_invert; }
void set_uniform_bool(Temp uniform_bool)
{
add_label(label_uniform_bool);
temp = uniform_bool;
}
bool is_uniform_bool() { return label & label_uniform_bool; }
void set_b2i(Temp b2i_val)
{
add_label(label_b2i);
temp = b2i_val;
}
bool is_b2i() { return label & label_b2i; }
void set_usedef(Instruction* label_instr)
{
add_label(label_usedef);
instr = label_instr;
}
bool is_usedef() { return label & label_usedef; }
void set_vop3p(Instruction* vop3p_instr)
{
add_label(label_vop3p);
instr = vop3p_instr;
}
bool is_vop3p() { return label & label_vop3p; }
void set_fcanonicalize(Temp tmp)
{
add_label(label_fcanonicalize);
temp = tmp;
}
bool is_fcanonicalize() { return label & label_fcanonicalize; }
void set_canonicalized() { add_label(label_canonicalized); }
bool is_canonicalized() { return label & label_canonicalized; }
void set_f2f32(Instruction* cvt)
{
add_label(label_f2f32);
instr = cvt;
}
bool is_f2f32() { return label & label_f2f32; }
void set_extract(Instruction* extract)
{
add_label(label_extract);
instr = extract;
}
bool is_extract() { return label & label_extract; }
void set_insert(Instruction* insert)
{
add_label(label_insert);
instr = insert;
}
bool is_insert() { return label & label_insert; }
void set_dpp16(Instruction* mov)
{
add_label(label_dpp16);
instr = mov;
}
void set_dpp8(Instruction* mov)
{
add_label(label_dpp8);
instr = mov;
}
bool is_dpp() { return label & (label_dpp16 | label_dpp8); }
bool is_dpp16() { return label & label_dpp16; }
bool is_dpp8() { return label & label_dpp8; }
void set_split(Instruction* split)
{
add_label(label_split);
instr = split;
}
bool is_split() { return label & label_split; }
};
struct opt_ctx {
Program* program;
float_mode fp_mode;
std::vector<aco_ptr<Instruction>> instructions;
ssa_info* info;
std::pair<uint32_t, Temp> last_literal;
std::vector<mad_info> mad_infos;
std::vector<uint16_t> uses;
};
bool
can_use_VOP3(opt_ctx& ctx, const aco_ptr<Instruction>& instr)
{
if (instr->isVOP3())
return true;
if (instr->isVOP3P())
return false;
if (instr->operands.size() && instr->operands[0].isLiteral() && ctx.program->gfx_level < GFX10)
return false;
if (instr->isDPP() || instr->isSDWA())
return false;
return instr->opcode != aco_opcode::v_madmk_f32 && instr->opcode != aco_opcode::v_madak_f32 &&
instr->opcode != aco_opcode::v_madmk_f16 && instr->opcode != aco_opcode::v_madak_f16 &&
instr->opcode != aco_opcode::v_fmamk_f32 && instr->opcode != aco_opcode::v_fmaak_f32 &&
instr->opcode != aco_opcode::v_fmamk_f16 && instr->opcode != aco_opcode::v_fmaak_f16 &&
instr->opcode != aco_opcode::v_readlane_b32 &&
instr->opcode != aco_opcode::v_writelane_b32 &&
instr->opcode != aco_opcode::v_readfirstlane_b32;
}
bool
pseudo_propagate_temp(opt_ctx& ctx, aco_ptr<Instruction>& instr, Temp temp, unsigned index)
{
if (instr->definitions.empty())
return false;
const bool vgpr =
instr->opcode == aco_opcode::p_as_uniform ||
std::all_of(instr->definitions.begin(), instr->definitions.end(),
[](const Definition& def) { return def.regClass().type() == RegType::vgpr; });
/* don't propagate VGPRs into SGPR instructions */
if (temp.type() == RegType::vgpr && !vgpr)
return false;
bool can_accept_sgpr =
ctx.program->gfx_level >= GFX9 ||
std::none_of(instr->definitions.begin(), instr->definitions.end(),
[](const Definition& def) { return def.regClass().is_subdword(); });
switch (instr->opcode) {
case aco_opcode::p_phi:
case aco_opcode::p_linear_phi:
case aco_opcode::p_parallelcopy:
case aco_opcode::p_create_vector:
if (temp.bytes() != instr->operands[index].bytes())
return false;
break;
case aco_opcode::p_extract_vector:
case aco_opcode::p_extract:
if (temp.type() == RegType::sgpr && !can_accept_sgpr)
return false;
break;
case aco_opcode::p_split_vector: {
if (temp.type() == RegType::sgpr && !can_accept_sgpr)
return false;
/* don't increase the vector size */
if (temp.bytes() > instr->operands[index].bytes())
return false;
/* We can decrease the vector size as smaller temporaries are only
* propagated by p_as_uniform instructions.
* If this propagation leads to invalid IR or hits the assertion below,
* it means that some undefined bytes within a dword are begin accessed
* and a bug in instruction_selection is likely. */
int decrease = instr->operands[index].bytes() - temp.bytes();
while (decrease > 0) {
decrease -= instr->definitions.back().bytes();
instr->definitions.pop_back();
}
assert(decrease == 0);
break;
}
case aco_opcode::p_as_uniform:
if (temp.regClass() == instr->definitions[0].regClass())
instr->opcode = aco_opcode::p_parallelcopy;
break;
default: return false;
}
instr->operands[index].setTemp(temp);
return true;
}
/* This expects the DPP modifier to be removed. */
bool
can_apply_sgprs(opt_ctx& ctx, aco_ptr<Instruction>& instr)
{
if (instr->isSDWA() && ctx.program->gfx_level < GFX9)
return false;
return instr->opcode != aco_opcode::v_readfirstlane_b32 &&
instr->opcode != aco_opcode::v_readlane_b32 &&
instr->opcode != aco_opcode::v_readlane_b32_e64 &&
instr->opcode != aco_opcode::v_writelane_b32 &&
instr->opcode != aco_opcode::v_writelane_b32_e64 &&
instr->opcode != aco_opcode::v_permlane16_b32 &&
instr->opcode != aco_opcode::v_permlanex16_b32;
}
void
to_VOP3(opt_ctx& ctx, aco_ptr<Instruction>& instr)
{
if (instr->isVOP3())
return;
aco_ptr<Instruction> tmp = std::move(instr);
Format format = asVOP3(tmp->format);
instr.reset(create_instruction<VOP3_instruction>(tmp->opcode, format, tmp->operands.size(),
tmp->definitions.size()));
std::copy(tmp->operands.cbegin(), tmp->operands.cend(), instr->operands.begin());
for (unsigned i = 0; i < instr->definitions.size(); i++) {
instr->definitions[i] = tmp->definitions[i];
if (instr->definitions[i].isTemp()) {
ssa_info& info = ctx.info[instr->definitions[i].tempId()];
if (info.label & instr_usedef_labels && info.instr == tmp.get())
info.instr = instr.get();
}
}
/* we don't need to update any instr_mod_labels because they either haven't
* been applied yet or this instruction isn't dead and so they've been ignored */
instr->pass_flags = tmp->pass_flags;
}
bool
is_operand_vgpr(Operand op)
{
return op.isTemp() && op.getTemp().type() == RegType::vgpr;
}
void
to_SDWA(opt_ctx& ctx, aco_ptr<Instruction>& instr)
{
aco_ptr<Instruction> tmp = convert_to_SDWA(ctx.program->gfx_level, instr);
if (!tmp)
return;
for (unsigned i = 0; i < instr->definitions.size(); i++) {
ssa_info& info = ctx.info[instr->definitions[i].tempId()];
if (info.label & instr_labels && info.instr == tmp.get())
info.instr = instr.get();
}
}
/* only covers special cases */
bool
alu_can_accept_constant(aco_opcode opcode, unsigned operand)
{
switch (opcode) {
case aco_opcode::v_interp_p2_f32:
case aco_opcode::v_mac_f32:
case aco_opcode::v_writelane_b32:
case aco_opcode::v_writelane_b32_e64:
case aco_opcode::v_cndmask_b32: return operand != 2;
case aco_opcode::s_addk_i32:
case aco_opcode::s_mulk_i32:
case aco_opcode::p_wqm:
case aco_opcode::p_extract_vector:
case aco_opcode::p_split_vector:
case aco_opcode::v_readlane_b32:
case aco_opcode::v_readlane_b32_e64:
case aco_opcode::v_readfirstlane_b32:
case aco_opcode::p_extract:
case aco_opcode::p_insert: return operand != 0;
case aco_opcode::p_bpermute:
case aco_opcode::p_interp_gfx11:
case aco_opcode::p_dual_src_export_gfx11: return false;
default: return true;
}
}
bool
valu_can_accept_vgpr(aco_ptr<Instruction>& instr, unsigned operand)
{
if (instr->opcode == aco_opcode::v_readlane_b32 ||
instr->opcode == aco_opcode::v_readlane_b32_e64 ||
instr->opcode == aco_opcode::v_writelane_b32 ||
instr->opcode == aco_opcode::v_writelane_b32_e64)
return operand != 1;
if (instr->opcode == aco_opcode::v_permlane16_b32 ||
instr->opcode == aco_opcode::v_permlanex16_b32)
return operand == 0;
return true;
}
/* check constant bus and literal limitations */
bool
check_vop3_operands(opt_ctx& ctx, unsigned num_operands, Operand* operands)
{
int limit = ctx.program->gfx_level >= GFX10 ? 2 : 1;
Operand literal32(s1);
Operand literal64(s2);
unsigned num_sgprs = 0;
unsigned sgpr[] = {0, 0};
for (unsigned i = 0; i < num_operands; i++) {
Operand op = operands[i];
if (op.hasRegClass() && op.regClass().type() == RegType::sgpr) {
/* two reads of the same SGPR count as 1 to the limit */
if (op.tempId() != sgpr[0] && op.tempId() != sgpr[1]) {
if (num_sgprs < 2)
sgpr[num_sgprs++] = op.tempId();
limit--;
if (limit < 0)
return false;
}
} else if (op.isLiteral()) {
if (ctx.program->gfx_level < GFX10)
return false;
if (!literal32.isUndefined() && literal32.constantValue() != op.constantValue())
return false;
if (!literal64.isUndefined() && literal64.constantValue() != op.constantValue())
return false;
/* Any number of 32-bit literals counts as only 1 to the limit. Same
* (but separately) for 64-bit literals. */
if (op.size() == 1 && literal32.isUndefined()) {
limit--;
literal32 = op;
} else if (op.size() == 2 && literal64.isUndefined()) {
limit--;
literal64 = op;
}
if (limit < 0)
return false;
}
}
return true;
}
bool
parse_base_offset(opt_ctx& ctx, Instruction* instr, unsigned op_index, Temp* base, uint32_t* offset,
bool prevent_overflow)
{
Operand op = instr->operands[op_index];
if (!op.isTemp())
return false;
Temp tmp = op.getTemp();
if (!ctx.info[tmp.id()].is_add_sub())
return false;
Instruction* add_instr = ctx.info[tmp.id()].instr;
unsigned mask = 0x3;
bool is_sub = false;
switch (add_instr->opcode) {
case aco_opcode::v_add_u32:
case aco_opcode::v_add_co_u32:
case aco_opcode::v_add_co_u32_e64:
case aco_opcode::s_add_i32:
case aco_opcode::s_add_u32: break;
case aco_opcode::v_sub_u32:
case aco_opcode::v_sub_i32:
case aco_opcode::v_sub_co_u32:
case aco_opcode::v_sub_co_u32_e64:
case aco_opcode::s_sub_u32:
case aco_opcode::s_sub_i32:
mask = 0x2;
is_sub = true;
break;
case aco_opcode::v_subrev_u32:
case aco_opcode::v_subrev_co_u32:
case aco_opcode::v_subrev_co_u32_e64:
mask = 0x1;
is_sub = true;
break;
default: return false;
}
if (prevent_overflow && !add_instr->definitions[0].isNUW())
return false;
if (add_instr->usesModifiers())
return false;
u_foreach_bit (i, mask) {
if (add_instr->operands[i].isConstant()) {
*offset = add_instr->operands[i].constantValue() * (uint32_t)(is_sub ? -1 : 1);
} else if (add_instr->operands[i].isTemp() &&
ctx.info[add_instr->operands[i].tempId()].is_constant_or_literal(32)) {
*offset = ctx.info[add_instr->operands[i].tempId()].val * (uint32_t)(is_sub ? -1 : 1);
} else {
continue;
}
if (!add_instr->operands[!i].isTemp())
continue;
uint32_t offset2 = 0;
if (parse_base_offset(ctx, add_instr, !i, base, &offset2, prevent_overflow)) {
*offset += offset2;
} else {
*base = add_instr->operands[!i].getTemp();
}
return true;
}
return false;
}
void
skip_smem_offset_align(opt_ctx& ctx, SMEM_instruction* smem)
{
bool soe = smem->operands.size() >= (!smem->definitions.empty() ? 3 : 4);
if (soe && !smem->operands[1].isConstant())
return;
/* We don't need to check the constant offset because the address seems to be calculated with
* (offset&-4 + const_offset&-4), not (offset+const_offset)&-4.
*/
Operand& op = smem->operands[soe ? smem->operands.size() - 1 : 1];
if (!op.isTemp() || !ctx.info[op.tempId()].is_bitwise())
return;
Instruction* bitwise_instr = ctx.info[op.tempId()].instr;
if (bitwise_instr->opcode != aco_opcode::s_and_b32)
return;
if (bitwise_instr->operands[0].constantEquals(-4) &&
bitwise_instr->operands[1].isOfType(op.regClass().type()))
op.setTemp(bitwise_instr->operands[1].getTemp());
else if (bitwise_instr->operands[1].constantEquals(-4) &&
bitwise_instr->operands[0].isOfType(op.regClass().type()))
op.setTemp(bitwise_instr->operands[0].getTemp());
}
void
smem_combine(opt_ctx& ctx, aco_ptr<Instruction>& instr)
{
/* skip &-4 before offset additions: load((a + 16) & -4, 0) */
if (!instr->operands.empty())
skip_smem_offset_align(ctx, &instr->smem());
/* propagate constants and combine additions */
if (!instr->operands.empty() && instr->operands[1].isTemp()) {
SMEM_instruction& smem = instr->smem();
ssa_info info = ctx.info[instr->operands[1].tempId()];
Temp base;
uint32_t offset;
bool prevent_overflow = smem.operands[0].size() > 2 || smem.prevent_overflow;
if (info.is_constant_or_literal(32) &&
((ctx.program->gfx_level == GFX6 && info.val <= 0x3FF) ||
(ctx.program->gfx_level == GFX7 && info.val <= 0xFFFFFFFF) ||
(ctx.program->gfx_level >= GFX8 && info.val <= 0xFFFFF))) {
instr->operands[1] = Operand::c32(info.val);
} else if (parse_base_offset(ctx, instr.get(), 1, &base, &offset, prevent_overflow) &&
base.regClass() == s1 && offset <= 0xFFFFF && ctx.program->gfx_level >= GFX9 &&
offset % 4u == 0) {
bool soe = smem.operands.size() >= (!smem.definitions.empty() ? 3 : 4);
if (soe) {
if (ctx.info[smem.operands.back().tempId()].is_constant_or_literal(32) &&
ctx.info[smem.operands.back().tempId()].val == 0) {
smem.operands[1] = Operand::c32(offset);
smem.operands.back() = Operand(base);
}
} else {
SMEM_instruction* new_instr = create_instruction<SMEM_instruction>(
smem.opcode, Format::SMEM, smem.operands.size() + 1, smem.definitions.size());
new_instr->operands[0] = smem.operands[0];
new_instr->operands[1] = Operand::c32(offset);
if (smem.definitions.empty())
new_instr->operands[2] = smem.operands[2];
new_instr->operands.back() = Operand(base);
if (!smem.definitions.empty())
new_instr->definitions[0] = smem.definitions[0];
new_instr->sync = smem.sync;
new_instr->glc = smem.glc;
new_instr->dlc = smem.dlc;
new_instr->nv = smem.nv;
new_instr->disable_wqm = smem.disable_wqm;
instr.reset(new_instr);
}
}
}
/* skip &-4 after offset additions: load(a & -4, 16) */
if (!instr->operands.empty())
skip_smem_offset_align(ctx, &instr->smem());
}
unsigned
get_operand_size(aco_ptr<Instruction>& instr, unsigned index)
{
if (instr->isPseudo())
return instr->operands[index].bytes() * 8u;
else if (instr->opcode == aco_opcode::v_mad_u64_u32 ||
instr->opcode == aco_opcode::v_mad_i64_i32)
return index == 2 ? 64 : 32;
else if (instr->opcode == aco_opcode::v_fma_mix_f32 ||
instr->opcode == aco_opcode::v_fma_mixlo_f16)
return instr->vop3p().opsel_hi & (1u << index) ? 16 : 32;
else if (instr->isVALU() || instr->isSALU())
return instr_info.operand_size[(int)instr->opcode];
else
return 0;
}
Operand
get_constant_op(opt_ctx& ctx, ssa_info info, uint32_t bits)
{
if (bits == 64)
return Operand::c32_or_c64(info.val, true);
return Operand::get_const(ctx.program->gfx_level, info.val, bits / 8u);
}
void
propagate_constants_vop3p(opt_ctx& ctx, aco_ptr<Instruction>& instr, ssa_info& info, unsigned i)
{
if (!info.is_constant_or_literal(32))
return;
assert(instr->operands[i].isTemp());
unsigned bits = get_operand_size(instr, i);
if (info.is_constant(bits)) {
instr->operands[i] = get_constant_op(ctx, info, bits);
return;
}
/* The accumulation operand of dot product instructions ignores opsel. */
bool cannot_use_opsel =
(instr->opcode == aco_opcode::v_dot4_i32_i8 || instr->opcode == aco_opcode::v_dot2_i32_i16 ||
instr->opcode == aco_opcode::v_dot4_i32_iu8 || instr->opcode == aco_opcode::v_dot4_u32_u8 ||
instr->opcode == aco_opcode::v_dot2_u32_u16) &&
i == 2;
if (cannot_use_opsel)
return;
/* try to fold inline constants */
VOP3P_instruction* vop3p = &instr->vop3p();
bool opsel_lo = (vop3p->opsel_lo >> i) & 1;
bool opsel_hi = (vop3p->opsel_hi >> i) & 1;
Operand const_op[2];
bool const_opsel[2] = {false, false};
for (unsigned j = 0; j < 2; j++) {
if ((unsigned)opsel_lo != j && (unsigned)opsel_hi != j)
continue; /* this half is unused */
uint16_t val = info.val >> (j ? 16 : 0);
Operand op = Operand::get_const(ctx.program->gfx_level, val, bits / 8u);
if (bits == 32 && op.isLiteral()) /* try sign extension */
op = Operand::get_const(ctx.program->gfx_level, val | 0xffff0000, 4);
if (bits == 32 && op.isLiteral()) { /* try shifting left */
op = Operand::get_const(ctx.program->gfx_level, val << 16, 4);
const_opsel[j] = true;
}
if (op.isLiteral())
return;
const_op[j] = op;
}
Operand const_lo = const_op[0];
Operand const_hi = const_op[1];
bool const_lo_opsel = const_opsel[0];
bool const_hi_opsel = const_opsel[1];
if (opsel_lo == opsel_hi) {
/* use the single 16bit value */
instr->operands[i] = opsel_lo ? const_hi : const_lo;
/* opsel must point the same for both halves */
opsel_lo = opsel_lo ? const_hi_opsel : const_lo_opsel;
opsel_hi = opsel_lo;
} else if (const_lo == const_hi) {
/* both constants are the same */
instr->operands[i] = const_lo;
/* opsel must point the same for both halves */
opsel_lo = const_lo_opsel;
opsel_hi = const_lo_opsel;
} else if (const_lo.constantValue16(const_lo_opsel) ==
const_hi.constantValue16(!const_hi_opsel)) {
instr->operands[i] = const_hi;
/* redirect opsel selection */
opsel_lo = opsel_lo ? const_hi_opsel : !const_hi_opsel;
opsel_hi = opsel_hi ? const_hi_opsel : !const_hi_opsel;
} else if (const_hi.constantValue16(const_hi_opsel) ==
const_lo.constantValue16(!const_lo_opsel)) {
instr->operands[i] = const_lo;
/* redirect opsel selection */
opsel_lo = opsel_lo ? !const_lo_opsel : const_lo_opsel;
opsel_hi = opsel_hi ? !const_lo_opsel : const_lo_opsel;
} else if (bits == 16 && const_lo.constantValue() == (const_hi.constantValue() ^ (1 << 15))) {
assert(const_lo_opsel == false && const_hi_opsel == false);
/* const_lo == -const_hi */
if (!instr_info.can_use_input_modifiers[(int)instr->opcode])
return;
instr->operands[i] = Operand::c16(const_lo.constantValue() & 0x7FFF);
bool neg_lo = const_lo.constantValue() & (1 << 15);
vop3p->neg_lo[i] ^= opsel_lo ^ neg_lo;
vop3p->neg_hi[i] ^= opsel_hi ^ neg_lo;
/* opsel must point to lo for both operands */
opsel_lo = false;
opsel_hi = false;
}
vop3p->opsel_lo = opsel_lo ? (vop3p->opsel_lo | (1 << i)) : (vop3p->opsel_lo & ~(1 << i));
vop3p->opsel_hi = opsel_hi ? (vop3p->opsel_hi | (1 << i)) : (vop3p->opsel_hi & ~(1 << i));
}
bool
fixed_to_exec(Operand op)
{
return op.isFixed() && op.physReg() == exec;
}
SubdwordSel
parse_extract(Instruction* instr)
{
if (instr->opcode == aco_opcode::p_extract) {
unsigned size = instr->operands[2].constantValue() / 8;
unsigned offset = instr->operands[1].constantValue() * size;
bool sext = instr->operands[3].constantEquals(1);
return SubdwordSel(size, offset, sext);
} else if (instr->opcode == aco_opcode::p_insert && instr->operands[1].constantEquals(0)) {
return instr->operands[2].constantEquals(8) ? SubdwordSel::ubyte : SubdwordSel::uword;
} else if (instr->opcode == aco_opcode::p_extract_vector) {
unsigned size = instr->definitions[0].bytes();
unsigned offset = instr->operands[1].constantValue() * size;
if (size <= 2)
return SubdwordSel(size, offset, false);
} else if (instr->opcode == aco_opcode::p_split_vector) {
assert(instr->operands[0].bytes() == 4 && instr->definitions[1].bytes() == 2);
return SubdwordSel(2, 2, false);
}
return SubdwordSel();
}
SubdwordSel
parse_insert(Instruction* instr)
{
if (instr->opcode == aco_opcode::p_extract && instr->operands[3].constantEquals(0) &&
instr->operands[1].constantEquals(0)) {
return instr->operands[2].constantEquals(8) ? SubdwordSel::ubyte : SubdwordSel::uword;
} else if (instr->opcode == aco_opcode::p_insert) {
unsigned size = instr->operands[2].constantValue() / 8;
unsigned offset = instr->operands[1].constantValue() * size;
return SubdwordSel(size, offset, false);
} else {
return SubdwordSel();
}
}
bool
can_apply_extract(opt_ctx& ctx, aco_ptr<Instruction>& instr, unsigned idx, ssa_info& info)
{
Temp tmp = info.instr->operands[0].getTemp();
SubdwordSel sel = parse_extract(info.instr);
if (!sel) {
return false;
} else if (sel.size() == 4) {
return true;
} else if (instr->opcode == aco_opcode::v_cvt_f32_u32 && sel.size() == 1 && !sel.sign_extend()) {
return true;
} else if (idx < 2 && can_use_SDWA(ctx.program->gfx_level, instr, true) &&
(tmp.type() == RegType::vgpr || ctx.program->gfx_level >= GFX9)) {
if (instr->isSDWA() && instr->sdwa().sel[idx] != SubdwordSel::dword)
return false;
return true;
} else if (instr->isVOP3() && sel.size() == 2 &&
can_use_opsel(ctx.program->gfx_level, instr->opcode, idx) &&
!(instr->vop3().opsel & (1 << idx))) {
return true;
} else if (instr->opcode == aco_opcode::p_extract) {
SubdwordSel instrSel = parse_extract(instr.get());
/* the outer offset must be within extracted range */
if (instrSel.offset() >= sel.size())
return false;
/* don't remove the sign-extension when increasing the size further */
if (instrSel.size() > sel.size() && !instrSel.sign_extend() && sel.sign_extend())
return false;
return true;
}
return false;
}
/* Combine an p_extract (or p_insert, in some cases) instruction with instr.
* instr(p_extract(...)) -> instr()
*/
void
apply_extract(opt_ctx& ctx, aco_ptr<Instruction>& instr, unsigned idx, ssa_info& info)
{
Temp tmp = info.instr->operands[0].getTemp();
SubdwordSel sel = parse_extract(info.instr);
assert(sel);
instr->operands[idx].set16bit(false);
instr->operands[idx].set24bit(false);
ctx.info[tmp.id()].label &= ~label_insert;
if (sel.size() == 4) {
/* full dword selection */
} else if (instr->opcode == aco_opcode::v_cvt_f32_u32 && sel.size() == 1 && !sel.sign_extend()) {
switch (sel.offset()) {
case 0: instr->opcode = aco_opcode::v_cvt_f32_ubyte0; break;
case 1: instr->opcode = aco_opcode::v_cvt_f32_ubyte1; break;
case 2: instr->opcode = aco_opcode::v_cvt_f32_ubyte2; break;
case 3: instr->opcode = aco_opcode::v_cvt_f32_ubyte3; break;
}
} else if (instr->opcode == aco_opcode::v_lshlrev_b32 && instr->operands[0].isConstant() &&
sel.offset() == 0 &&
((sel.size() == 2 && instr->operands[0].constantValue() >= 16u) ||
(sel.size() == 1 && instr->operands[0].constantValue() >= 24u))) {
/* The undesireable upper bits are already shifted out. */
return;
} else if (can_use_SDWA(ctx.program->gfx_level, instr, true) &&
(tmp.type() == RegType::vgpr || ctx.program->gfx_level >= GFX9)) {
to_SDWA(ctx, instr);
static_cast<SDWA_instruction*>(instr.get())->sel[idx] = sel;
} else if (instr->isVOP3()) {
if (sel.offset())
instr->vop3().opsel |= 1 << idx;
} else if (instr->opcode == aco_opcode::p_extract) {
SubdwordSel instrSel = parse_extract(instr.get());
unsigned size = std::min(sel.size(), instrSel.size());
unsigned offset = sel.offset() + instrSel.offset();
unsigned sign_extend =
instrSel.sign_extend() && (sel.sign_extend() || instrSel.size() <= sel.size());
instr->operands[1] = Operand::c32(offset / size);
instr->operands[2] = Operand::c32(size * 8u);
instr->operands[3] = Operand::c32(sign_extend);
return;
}
/* Output modifier, label_vopc and label_f2f32 seem to be the only one worth keeping at the
* moment
*/
for (Definition& def : instr->definitions)
ctx.info[def.tempId()].label &= (label_vopc | label_f2f32 | instr_mod_labels);
}
void
check_sdwa_extract(opt_ctx& ctx, aco_ptr<Instruction>& instr)
{
for (unsigned i = 0; i < instr->operands.size(); i++) {
Operand op = instr->operands[i];
if (!op.isTemp())
continue;
ssa_info& info = ctx.info[op.tempId()];
if (info.is_extract() && (info.instr->operands[0].getTemp().type() == RegType::vgpr ||
op.getTemp().type() == RegType::sgpr)) {
if (!can_apply_extract(ctx, instr, i, info))
info.label &= ~label_extract;
}
}
}
bool
does_fp_op_flush_denorms(opt_ctx& ctx, aco_opcode op)
{
if (ctx.program->gfx_level <= GFX8) {
switch (op) {
case aco_opcode::v_min_f32:
case aco_opcode::v_max_f32:
case aco_opcode::v_med3_f32:
case aco_opcode::v_min3_f32:
case aco_opcode::v_max3_f32:
case aco_opcode::v_min_f16:
case aco_opcode::v_max_f16: return false;
default: break;
}
}
return op != aco_opcode::v_cndmask_b32;
}
bool
can_eliminate_fcanonicalize(opt_ctx& ctx, aco_ptr<Instruction>& instr, Temp tmp)
{
float_mode* fp = &ctx.fp_mode;
if (ctx.info[tmp.id()].is_canonicalized() ||
(tmp.bytes() == 4 ? fp->denorm32 : fp->denorm16_64) == fp_denorm_keep)
return true;
aco_opcode op = instr->opcode;
return instr_info.can_use_input_modifiers[(int)op] && does_fp_op_flush_denorms(ctx, op);
}
bool
can_eliminate_and_exec(opt_ctx& ctx, Temp tmp, unsigned pass_flags)
{
if (ctx.info[tmp.id()].is_vopc()) {
Instruction* vopc_instr = ctx.info[tmp.id()].instr;
/* Remove superfluous s_and when the VOPC instruction uses the same exec and thus
* already produces the same result */
return vopc_instr->pass_flags == pass_flags;
}
if (ctx.info[tmp.id()].is_bitwise()) {
Instruction* instr = ctx.info[tmp.id()].instr;
if (instr->operands.size() != 2 || instr->pass_flags != pass_flags)
return false;
if (!(instr->operands[0].isTemp() && instr->operands[1].isTemp()))
return false;
if (instr->opcode == aco_opcode::s_and_b32 || instr->opcode == aco_opcode::s_and_b64) {
return can_eliminate_and_exec(ctx, instr->operands[0].getTemp(), pass_flags) ||
can_eliminate_and_exec(ctx, instr->operands[1].getTemp(), pass_flags);
} else {
return can_eliminate_and_exec(ctx, instr->operands[0].getTemp(), pass_flags) &&
can_eliminate_and_exec(ctx, instr->operands[1].getTemp(), pass_flags);
}
}
return false;
}
bool
is_copy_label(opt_ctx& ctx, aco_ptr<Instruction>& instr, ssa_info& info)
{
return info.is_temp() ||
(info.is_fcanonicalize() && can_eliminate_fcanonicalize(ctx, instr, info.temp));
}
bool
is_op_canonicalized(opt_ctx& ctx, Operand op)
{
float_mode* fp = &ctx.fp_mode;
if ((op.isTemp() && ctx.info[op.tempId()].is_canonicalized()) ||
(op.bytes() == 4 ? fp->denorm32 : fp->denorm16_64) == fp_denorm_keep)
return true;
if (op.isConstant() || (op.isTemp() && ctx.info[op.tempId()].is_constant_or_literal(32))) {
uint32_t val = op.isTemp() ? ctx.info[op.tempId()].val : op.constantValue();
if (op.bytes() == 2)
return (val & 0x7fff) == 0 || (val & 0x7fff) > 0x3ff;
else if (op.bytes() == 4)
return (val & 0x7fffffff) == 0 || (val & 0x7fffffff) > 0x7fffff;
}
return false;
}
bool
is_scratch_offset_valid(opt_ctx& ctx, Instruction* instr, int64_t offset0, int64_t offset1)
{
bool negative_unaligned_scratch_offset_bug = ctx.program->gfx_level == GFX10;
int32_t min = ctx.program->dev.scratch_global_offset_min;
int32_t max = ctx.program->dev.scratch_global_offset_max;
int64_t offset = offset0 + offset1;
bool has_vgpr_offset = instr && !instr->operands[0].isUndefined();
if (negative_unaligned_scratch_offset_bug && has_vgpr_offset && offset < 0 && offset % 4)
return false;
return offset >= min && offset <= max;
}
void
label_instruction(opt_ctx& ctx, aco_ptr<Instruction>& instr)
{
if (instr->isSALU() || instr->isVALU() || instr->isPseudo()) {
ASSERTED bool all_const = false;
for (Operand& op : instr->operands)
all_const =
all_const && (!op.isTemp() || ctx.info[op.tempId()].is_constant_or_literal(32));
perfwarn(ctx.program, all_const, "All instruction operands are constant", instr.get());
ASSERTED bool is_copy = instr->opcode == aco_opcode::s_mov_b32 ||
instr->opcode == aco_opcode::s_mov_b64 ||
instr->opcode == aco_opcode::v_mov_b32;
perfwarn(ctx.program, is_copy && !instr->usesModifiers(), "Use p_parallelcopy instead",
instr.get());
}
if (instr->isSMEM())
smem_combine(ctx, instr);
for (unsigned i = 0; i < instr->operands.size(); i++) {
if (!instr->operands[i].isTemp())
continue;
ssa_info info = ctx.info[instr->operands[i].tempId()];
/* propagate undef */
if (info.is_undefined() && is_phi(instr))
instr->operands[i] = Operand(instr->operands[i].regClass());
/* propagate reg->reg of same type */
while (info.is_temp() && info.temp.regClass() == instr->operands[i].getTemp().regClass()) {
instr->operands[i].setTemp(ctx.info[instr->operands[i].tempId()].temp);
info = ctx.info[info.temp.id()];
}
/* PSEUDO: propagate temporaries */
if (instr->isPseudo()) {
while (info.is_temp()) {
pseudo_propagate_temp(ctx, instr, info.temp, i);
info = ctx.info[info.temp.id()];
}
}
/* SALU / PSEUDO: propagate inline constants */
if (instr->isSALU() || instr->isPseudo()) {
unsigned bits = get_operand_size(instr, i);
if ((info.is_constant(bits) || (info.is_literal(bits) && instr->isPseudo())) &&
!instr->operands[i].isFixed() && alu_can_accept_constant(instr->opcode, i)) {
instr->operands[i] = get_constant_op(ctx, info, bits);
continue;
}
}
/* VALU: propagate neg, abs & inline constants */
else if (instr->isVALU()) {
if (is_copy_label(ctx, instr, info) && info.temp.type() == RegType::vgpr &&
valu_can_accept_vgpr(instr, i)) {
instr->operands[i].setTemp(info.temp);
info = ctx.info[info.temp.id()];
}
/* applying SGPRs to VOP1 doesn't increase code size and DCE is helped by doing it earlier */
if (info.is_temp() && info.temp.type() == RegType::sgpr && can_apply_sgprs(ctx, instr) &&
instr->operands.size() == 1) {
instr->format = withoutDPP(instr->format);
instr->operands[i].setTemp(info.temp);
info = ctx.info[info.temp.id()];
}
/* for instructions other than v_cndmask_b32, the size of the instruction should match the
* operand size */
unsigned can_use_mod =
instr->opcode != aco_opcode::v_cndmask_b32 || instr->operands[i].getTemp().bytes() == 4;
can_use_mod = can_use_mod && instr_info.can_use_input_modifiers[(int)instr->opcode];
if (instr->isSDWA())
can_use_mod = can_use_mod && instr->sdwa().sel[i].size() == 4;
else
can_use_mod = can_use_mod && (instr->isDPP16() || can_use_VOP3(ctx, instr));
unsigned bits = get_operand_size(instr, i);
bool mod_bitsize_compat = instr->operands[i].bytes() * 8 == bits;
if (info.is_neg() && instr->opcode == aco_opcode::v_add_f32 && mod_bitsize_compat) {
instr->opcode = i ? aco_opcode::v_sub_f32 : aco_opcode::v_subrev_f32;
instr->operands[i].setTemp(info.temp);
} else if (info.is_neg() && instr->opcode == aco_opcode::v_add_f16 && mod_bitsize_compat) {
instr->opcode = i ? aco_opcode::v_sub_f16 : aco_opcode::v_subrev_f16;
instr->operands[i].setTemp(info.temp);
} else if (info.is_neg() && can_use_mod && mod_bitsize_compat &&
can_eliminate_fcanonicalize(ctx, instr, info.temp)) {
if (!instr->isDPP() && !instr->isSDWA())
to_VOP3(ctx, instr);
instr->operands[i].setTemp(info.temp);
if (instr->isDPP16() && !instr->dpp16().abs[i])
instr->dpp16().neg[i] = true;
else if (instr->isSDWA() && !instr->sdwa().abs[i])
instr->sdwa().neg[i] = true;
else if (instr->isVOP3() && !instr->vop3().abs[i])
instr->vop3().neg[i] = true;
}
if (info.is_abs() && can_use_mod && mod_bitsize_compat &&
can_eliminate_fcanonicalize(ctx, instr, info.temp)) {
if (!instr->isDPP() && !instr->isSDWA())
to_VOP3(ctx, instr);
instr->operands[i] = Operand(info.temp);
if (instr->isDPP16())
instr->dpp16().abs[i] = true;
else if (instr->isSDWA())
instr->sdwa().abs[i] = true;
else
instr->vop3().abs[i] = true;
continue;
}
if (instr->isVOP3P()) {
propagate_constants_vop3p(ctx, instr, info, i);
continue;
}
if (info.is_constant(bits) && alu_can_accept_constant(instr->opcode, i) &&
(!instr->isSDWA() || ctx.program->gfx_level >= GFX9)) {
Operand op = get_constant_op(ctx, info, bits);
perfwarn(ctx.program, instr->opcode == aco_opcode::v_cndmask_b32 && i == 2,
"v_cndmask_b32 with a constant selector", instr.get());
if (i == 0 || instr->isSDWA() || instr->opcode == aco_opcode::v_readlane_b32 ||
instr->opcode == aco_opcode::v_writelane_b32) {
instr->format = withoutDPP(instr->format);
instr->operands[i] = op;
continue;
} else if (!instr->isVOP3() && can_swap_operands(instr, &instr->opcode)) {
instr->operands[i] = instr->operands[0];
instr->operands[0] = op;
continue;
} else if (can_use_VOP3(ctx, instr)) {
to_VOP3(ctx, instr);
instr->operands[i] = op;
continue;
}
}
}
/* MUBUF: propagate constants and combine additions */
else if (instr->isMUBUF()) {
MUBUF_instruction& mubuf = instr->mubuf();
Temp base;
uint32_t offset;
while (info.is_temp())
info = ctx.info[info.temp.id()];
/* According to AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(), vaddr
* overflow for scratch accesses works only on GFX9+ and saddr overflow
* never works. Since swizzling is the only thing that separates
* scratch accesses and other accesses and swizzling changing how
* addressing works significantly, this probably applies to swizzled
* MUBUF accesses. */
bool vaddr_prevent_overflow = mubuf.swizzled && ctx.program->gfx_level < GFX9;
if (mubuf.offen && mubuf.idxen && i == 1 && info.is_vec() &&
info.instr->operands.size() == 2 && info.instr->operands[0].isTemp() &&
info.instr->operands[0].regClass() == v1 && info.instr->operands[1].isConstant() &&
mubuf.offset + info.instr->operands[1].constantValue() < 4096) {
instr->operands[1] = info.instr->operands[0];
mubuf.offset += info.instr->operands[1].constantValue();
mubuf.offen = false;
continue;
} else if (mubuf.offen && i == 1 && info.is_constant_or_literal(32) &&
mubuf.offset + info.val < 4096) {
assert(!mubuf.idxen);
instr->operands[1] = Operand(v1);
mubuf.offset += info.val;
mubuf.offen = false;
continue;
} else if (i == 2 && info.is_constant_or_literal(32) && mubuf.offset + info.val < 4096) {
instr->operands[2] = Operand::c32(0);
mubuf.offset += info.val;
continue;
} else if (mubuf.offen && i == 1 &&
parse_base_offset(ctx, instr.get(), i, &base, &offset,
vaddr_prevent_overflow) &&
base.regClass() == v1 && mubuf.offset + offset < 4096) {
assert(!mubuf.idxen);
instr->operands[1].setTemp(base);
mubuf.offset += offset;
continue;
} else if (i == 2 && parse_base_offset(ctx, instr.get(), i, &base, &offset, true) &&
base.regClass() == s1 && mubuf.offset + offset < 4096) {
instr->operands[i].setTemp(base);
mubuf.offset += offset;
continue;
}
}
/* SCRATCH: propagate constants and combine additions */
else if (instr->isScratch()) {
FLAT_instruction& scratch = instr->scratch();
Temp base;
uint32_t offset;
while (info.is_temp())
info = ctx.info[info.temp.id()];
/* The hardware probably does: 'scratch_base + u2u64(saddr) + i2i64(offset)'. This means
* we can't combine the addition if the unsigned addition overflows and offset is
* positive. In theory, there is also issues if
* 'ilt(offset, 0) && ige(saddr, 0) && ilt(saddr + offset, 0)', but that just
* replaces an already out-of-bounds access with a larger one since 'saddr + offset'
* would be larger than INT32_MAX.
*/
if (i <= 1 && parse_base_offset(ctx, instr.get(), i, &base, &offset, true) &&
base.regClass() == instr->operands[i].regClass() &&
is_scratch_offset_valid(ctx, instr.get(), scratch.offset, (int32_t)offset)) {
instr->operands[i].setTemp(base);
scratch.offset += (int32_t)offset;
continue;
} else if (i <= 1 && parse_base_offset(ctx, instr.get(), i, &base, &offset, false) &&
base.regClass() == instr->operands[i].regClass() && (int32_t)offset < 0 &&
is_scratch_offset_valid(ctx, instr.get(), scratch.offset, (int32_t)offset)) {
instr->operands[i].setTemp(base);
scratch.offset += (int32_t)offset;
continue;
} else if (i <= 1 && info.is_constant_or_literal(32) &&
ctx.program->gfx_level >= GFX10_3 &&
is_scratch_offset_valid(ctx, NULL, scratch.offset, (int32_t)info.val)) {
/* GFX10.3+ can disable both SADDR and ADDR. */
instr->operands[i] = Operand(instr->operands[i].regClass());
scratch.offset += (int32_t)info.val;
continue;
}
}
/* DS: combine additions */
else if (instr->isDS()) {
DS_instruction& ds = instr->ds();
Temp base;
uint32_t offset;
bool has_usable_ds_offset = ctx.program->gfx_level >= GFX7;
if (has_usable_ds_offset && i == 0 &&
parse_base_offset(ctx, instr.get(), i, &base, &offset, false) &&
base.regClass() == instr->operands[i].regClass() &&
instr->opcode != aco_opcode::ds_swizzle_b32) {
if (instr->opcode == aco_opcode::ds_write2_b32 ||
instr->opcode == aco_opcode::ds_read2_b32 ||
instr->opcode == aco_opcode::ds_write2_b64 ||
instr->opcode == aco_opcode::ds_read2_b64 ||
instr->opcode == aco_opcode::ds_write2st64_b32 ||
instr->opcode == aco_opcode::ds_read2st64_b32 ||
instr->opcode == aco_opcode::ds_write2st64_b64 ||
instr->opcode == aco_opcode::ds_read2st64_b64) {
bool is64bit = instr->opcode == aco_opcode::ds_write2_b64 ||
instr->opcode == aco_opcode::ds_read2_b64 ||
instr->opcode == aco_opcode::ds_write2st64_b64 ||
instr->opcode == aco_opcode::ds_read2st64_b64;
bool st64 = instr->opcode == aco_opcode::ds_write2st64_b32 ||
instr->opcode == aco_opcode::ds_read2st64_b32 ||
instr->opcode == aco_opcode::ds_write2st64_b64 ||
instr->opcode == aco_opcode::ds_read2st64_b64;
unsigned shifts = (is64bit ? 3 : 2) + (st64 ? 6 : 0);
unsigned mask = BITFIELD_MASK(shifts);
if ((offset & mask) == 0 && ds.offset0 + (offset >> shifts) <= 255 &&
ds.offset1 + (offset >> shifts) <= 255) {
instr->operands[i].setTemp(base);
ds.offset0 += offset >> shifts;
ds.offset1 += offset >> shifts;
}
} else {
if (ds.offset0 + offset <= 65535) {
instr->operands[i].setTemp(base);
ds.offset0 += offset;
}
}
}
}
else if (instr->isBranch()) {
if (ctx.info[instr->operands[0].tempId()].is_scc_invert()) {
/* Flip the branch instruction to get rid of the scc_invert instruction */
instr->opcode = instr->opcode == aco_opcode::p_cbranch_z ? aco_opcode::p_cbranch_nz
: aco_opcode::p_cbranch_z;
instr->operands[0].setTemp(ctx.info[instr->operands[0].tempId()].temp);
}
}
}
/* if this instruction doesn't define anything, return */
if (instr->definitions.empty()) {
check_sdwa_extract(ctx, instr);
return;
}
if (instr->isVALU() || instr->isVINTRP()) {
if (instr_info.can_use_output_modifiers[(int)instr->opcode] || instr->isVINTRP() ||
instr->opcode == aco_opcode::v_cndmask_b32) {
bool canonicalized = true;
if (!does_fp_op_flush_denorms(ctx, instr->opcode)) {
unsigned ops = instr->opcode == aco_opcode::v_cndmask_b32 ? 2 : instr->operands.size();
for (unsigned i = 0; canonicalized && (i < ops); i++)
canonicalized = is_op_canonicalized(ctx, instr->operands[i]);
}
if (canonicalized)
ctx.info[instr->definitions[0].tempId()].set_canonicalized();
}
if (instr->isVOPC()) {
ctx.info[instr->definitions[0].tempId()].set_vopc(instr.get());
check_sdwa_extract(ctx, instr);
return;
}
if (instr->isVOP3P()) {
ctx.info[instr->definitions[0].tempId()].set_vop3p(instr.get());
return;
}
}
switch (instr->opcode) {
case aco_opcode::p_create_vector: {
bool copy_prop = instr->operands.size() == 1 && instr->operands[0].isTemp() &&
instr->operands[0].regClass() == instr->definitions[0].regClass();
if (copy_prop) {
ctx.info[instr->definitions[0].tempId()].set_temp(instr->operands[0].getTemp());
break;
}
/* expand vector operands */
std::vector<Operand> ops;
unsigned offset = 0;
for (const Operand& op : instr->operands) {
/* ensure that any expanded operands are properly aligned */
bool aligned = offset % 4 == 0 || op.bytes() < 4;
offset += op.bytes();
if (aligned && op.isTemp() && ctx.info[op.tempId()].is_vec()) {
Instruction* vec = ctx.info[op.tempId()].instr;
for (const Operand& vec_op : vec->operands)
ops.emplace_back(vec_op);
} else {
ops.emplace_back(op);
}
}
/* combine expanded operands to new vector */
if (ops.size() != instr->operands.size()) {
assert(ops.size() > instr->operands.size());
Definition def = instr->definitions[0];
instr.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector,
Format::PSEUDO, ops.size(), 1));
for (unsigned i = 0; i < ops.size(); i++) {
if (ops[i].isTemp() && ctx.info[ops[i].tempId()].is_temp() &&
ops[i].regClass() == ctx.info[ops[i].tempId()].temp.regClass())
ops[i].setTemp(ctx.info[ops[i].tempId()].temp);
instr->operands[i] = ops[i];
}
instr->definitions[0] = def;
} else {
for (unsigned i = 0; i < ops.size(); i++) {
assert(instr->operands[i] == ops[i]);
}
}
ctx.info[instr->definitions[0].tempId()].set_vec(instr.get());
if (instr->operands.size() == 2) {
/* check if this is created from split_vector */
if (instr->operands[1].isTemp() && ctx.info[instr->operands[1].tempId()].is_split()) {
Instruction* split = ctx.info[instr->operands[1].tempId()].instr;
if (instr->operands[0].isTemp() &&
instr->operands[0].getTemp() == split->definitions[0].getTemp())
ctx.info[instr->definitions[0].tempId()].set_temp(split->operands[0].getTemp());
}
}
break;
}
case aco_opcode::p_split_vector: {
ssa_info& info = ctx.info[instr->operands[0].tempId()];
if (info.is_constant_or_literal(32)) {
uint64_t val = info.val;
for (Definition def : instr->definitions) {
uint32_t mask = u_bit_consecutive(0, def.bytes() * 8u);
ctx.info[def.tempId()].set_constant(ctx.program->gfx_level, val & mask);
val >>= def.bytes() * 8u;
}
break;
} else if (!info.is_vec()) {
if (instr->definitions.size() == 2 && instr->operands[0].isTemp() &&
instr->definitions[0].bytes() == instr->definitions[1].bytes()) {
ctx.info[instr->definitions[1].tempId()].set_split(instr.get());
if (instr->operands[0].bytes() == 4) {
/* D16 subdword split */
ctx.info[instr->definitions[0].tempId()].set_temp(instr->operands[0].getTemp());
ctx.info[instr->definitions[1].tempId()].set_extract(instr.get());
}
}
break;
}
Instruction* vec = ctx.info[instr->operands[0].tempId()].instr;
unsigned split_offset = 0;
unsigned vec_offset = 0;
unsigned vec_index = 0;
for (unsigned i = 0; i < instr->definitions.size();
split_offset += instr->definitions[i++].bytes()) {
while (vec_offset < split_offset && vec_index < vec->operands.size())
vec_offset += vec->operands[vec_index++].bytes();
if (vec_offset != split_offset ||
vec->operands[vec_index].bytes() != instr->definitions[i].bytes())
continue;
Operand vec_op = vec->operands[vec_index];
if (vec_op.isConstant()) {
ctx.info[instr->definitions[i].tempId()].set_constant(ctx.program->gfx_level,
vec_op.constantValue64());
} else if (vec_op.isUndefined()) {
ctx.info[instr->definitions[i].tempId()].set_undefined();
} else {
assert(vec_op.isTemp());
ctx.info[instr->definitions[i].tempId()].set_temp(vec_op.getTemp());
}
}
break;
}
case aco_opcode::p_extract_vector: { /* mov */
ssa_info& info = ctx.info[instr->operands[0].tempId()];
const unsigned index = instr->operands[1].constantValue();
const unsigned dst_offset = index * instr->definitions[0].bytes();
if (info.is_vec()) {
/* check if we index directly into a vector element */
Instruction* vec = info.instr;
unsigned offset = 0;
for (const Operand& op : vec->operands) {
if (offset < dst_offset) {
offset += op.bytes();
continue;
} else if (offset != dst_offset || op.bytes() != instr->definitions[0].bytes()) {
break;
}
instr->operands[0] = op;
break;
}
} else if (info.is_constant_or_literal(32)) {
/* propagate constants */
uint32_t mask = u_bit_consecutive(0, instr->definitions[0].bytes() * 8u);
uint32_t val = (info.val >> (dst_offset * 8u)) & mask;
instr->operands[0] =
Operand::get_const(ctx.program->gfx_level, val, instr->definitions[0].bytes());
;
}
if (instr->operands[0].bytes() != instr->definitions[0].bytes()) {
if (instr->operands[0].size() != 1)
break;
if (index == 0)
ctx.info[instr->definitions[0].tempId()].set_temp(instr->operands[0].getTemp());
else
ctx.info[instr->definitions[0].tempId()].set_extract(instr.get());
break;
}
/* convert this extract into a copy instruction */
instr->opcode = aco_opcode::p_parallelcopy;
instr->operands.pop_back();
FALLTHROUGH;
}
case aco_opcode::p_parallelcopy: /* propagate */
if (instr->operands[0].isTemp() && ctx.info[instr->operands[0].tempId()].is_vec() &&
instr->operands[0].regClass() != instr->definitions[0].regClass()) {
/* We might not be able to copy-propagate if it's a SGPR->VGPR copy, so
* duplicate the vector instead.
*/
Instruction* vec = ctx.info[instr->operands[0].tempId()].instr;
aco_ptr<Instruction> old_copy = std::move(instr);
instr.reset(create_instruction<Pseudo_instruction>(
aco_opcode::p_create_vector, Format::PSEUDO, vec->operands.size(), 1));
instr->definitions[0] = old_copy->definitions[0];
std::copy(vec->operands.begin(), vec->operands.end(), instr->operands.begin());
for (unsigned i = 0; i < vec->operands.size(); i++) {
Operand& op = instr->operands[i];
if (op.isTemp() && ctx.info[op.tempId()].is_temp() &&
ctx.info[op.tempId()].temp.type() == instr->definitions[0].regClass().type())
op.setTemp(ctx.info[op.tempId()].temp);
}
ctx.info[instr->definitions[0].tempId()].set_vec(instr.get());
break;
}
FALLTHROUGH;
case aco_opcode::p_as_uniform:
if (instr->definitions[0].isFixed()) {
/* don't copy-propagate copies into fixed registers */
} else if (instr->usesModifiers()) {
// TODO
} else if (instr->operands[0].isConstant()) {
ctx.info[instr->definitions[0].tempId()].set_constant(
ctx.program->gfx_level, instr->operands[0].constantValue64());
} else if (instr->operands[0].isTemp()) {
ctx.info[instr->definitions[0].tempId()].set_temp(instr->operands[0].getTemp());
if (ctx.info[instr->operands[0].tempId()].is_canonicalized())
ctx.info[instr->definitions[0].tempId()].set_canonicalized();
} else {
assert(instr->operands[0].isFixed());
}
break;
case aco_opcode::v_mov_b32:
if (instr->isDPP16()) {
/* anything else doesn't make sense in SSA */
assert(instr->dpp16().row_mask == 0xf && instr->dpp16().bank_mask == 0xf);
ctx.info[instr->definitions[0].tempId()].set_dpp16(instr.get());
} else if (instr->isDPP8()) {
ctx.info[instr->definitions[0].tempId()].set_dpp8(instr.get());
}
break;
case aco_opcode::p_is_helper:
if (!ctx.program->needs_wqm)
ctx.info[instr->definitions[0].tempId()].set_constant(ctx.program->gfx_level, 0u);
break;
case aco_opcode::v_mul_f64: ctx.info[instr->definitions[0].tempId()].set_mul(instr.get()); break;
case aco_opcode::v_mul_f16:
case aco_opcode::v_mul_f32:
case aco_opcode::v_mul_legacy_f32: { /* omod */
ctx.info[instr->definitions[0].tempId()].set_mul(instr.get());
/* TODO: try to move the negate/abs modifier to the consumer instead */
bool uses_mods = instr->usesModifiers();
bool fp16 = instr->opcode == aco_opcode::v_mul_f16;
for (unsigned i = 0; i < 2; i++) {
if (instr->operands[!i].isConstant() && instr->operands[i].isTemp()) {
if (!instr->isDPP() && !instr->isSDWA() &&
(instr->operands[!i].constantEquals(fp16 ? 0x3c00 : 0x3f800000) || /* 1.0 */
instr->operands[!i].constantEquals(fp16 ? 0xbc00 : 0xbf800000u))) { /* -1.0 */
bool neg1 = instr->operands[!i].constantEquals(fp16 ? 0xbc00 : 0xbf800000u);
VOP3_instruction* vop3 = instr->isVOP3() ? &instr->vop3() : NULL;
if (vop3 && (vop3->abs[!i] || vop3->neg[!i] || vop3->clamp || vop3->omod))
continue;
bool abs = vop3 && vop3->abs[i];
bool neg = neg1 ^ (vop3 && vop3->neg[i]);
Temp other = instr->operands[i].getTemp();
if (abs && neg && other.type() == RegType::vgpr)
ctx.info[instr->definitions[0].tempId()].set_neg_abs(other);
else if (abs && !neg && other.type() == RegType::vgpr)
ctx.info[instr->definitions[0].tempId()].set_abs(other);
else if (!abs && neg && other.type() == RegType::vgpr)
ctx.info[instr->definitions[0].tempId()].set_neg(other);
else if (!abs && !neg)
ctx.info[instr->definitions[0].tempId()].set_fcanonicalize(other);
} else if (uses_mods) {
continue;
} else if (instr->operands[!i].constantValue() ==
(fp16 ? 0x4000 : 0x40000000)) { /* 2.0 */
ctx.info[instr->operands[i].tempId()].set_omod2(instr.get());
} else if (instr->operands[!i].constantValue() ==
(fp16 ? 0x4400 : 0x40800000)) { /* 4.0 */
ctx.info[instr->operands[i].tempId()].set_omod4(instr.get());
} else if (instr->operands[!i].constantValue() ==
(fp16 ? 0x3800 : 0x3f000000)) { /* 0.5 */
ctx.info[instr->operands[i].tempId()].set_omod5(instr.get());
} else if (instr->operands[!i].constantValue() == 0u &&
(!(fp16 ? ctx.fp_mode.preserve_signed_zero_inf_nan16_64
: ctx.fp_mode.preserve_signed_zero_inf_nan32) ||
instr->opcode == aco_opcode::v_mul_legacy_f32)) { /* 0.0 */
ctx.info[instr->definitions[0].tempId()].set_constant(ctx.program->gfx_level, 0u);
} else {
continue;
}
break;
}
}
break;
}
case aco_opcode::v_mul_lo_u16:
case aco_opcode::v_mul_lo_u16_e64:
case aco_opcode::v_mul_u32_u24:
ctx.info[instr->definitions[0].tempId()].set_usedef(instr.get());
break;
case aco_opcode::v_med3_f16:
case aco_opcode::v_med3_f32: { /* clamp */
VOP3_instruction& vop3 = instr->vop3();
if (vop3.abs[0] || vop3.abs[1] || vop3.abs[2] || vop3.neg[0] || vop3.neg[1] || vop3.neg[2] ||
vop3.omod != 0 || vop3.opsel != 0)
break;
unsigned idx = 0;
bool found_zero = false, found_one = false;
bool is_fp16 = instr->opcode == aco_opcode::v_med3_f16;
for (unsigned i = 0; i < 3; i++) {
if (instr->operands[i].constantEquals(0))
found_zero = true;
else if (instr->operands[i].constantEquals(is_fp16 ? 0x3c00 : 0x3f800000)) /* 1.0 */
found_one = true;
else
idx = i;
}
if (found_zero && found_one && instr->operands[idx].isTemp())
ctx.info[instr->operands[idx].tempId()].set_clamp(instr.get());
break;
}
case aco_opcode::v_cndmask_b32:
if (instr->operands[0].constantEquals(0) && instr->operands[1].constantEquals(0xFFFFFFFF))
ctx.info[instr->definitions[0].tempId()].set_vcc(instr->operands[2].getTemp());
else if (instr->operands[0].constantEquals(0) &&
instr->operands[1].constantEquals(0x3f800000u))
ctx.info[instr->definitions[0].tempId()].set_b2f(instr->operands[2].getTemp());
else if (instr->operands[0].constantEquals(0) && instr->operands[1].constantEquals(1))
ctx.info[instr->definitions[0].tempId()].set_b2i(instr->operands[2].getTemp());
break;
case aco_opcode::v_cmp_lg_u32:
if (instr->format == Format::VOPC && /* don't optimize VOP3 / SDWA / DPP */
instr->operands[0].constantEquals(0) && instr->operands[1].isTemp() &&
ctx.info[instr->operands[1].tempId()].is_vcc())
ctx.info[instr->definitions[0].tempId()].set_temp(
ctx.info[instr->operands[1].tempId()].temp);
break;
case aco_opcode::p_linear_phi: {
/* lower_bool_phis() can create phis like this */
bool all_same_temp = instr->operands[0].isTemp();
/* this check is needed when moving uniform loop counters out of a divergent loop */
if (all_same_temp)
all_same_temp = instr->definitions[0].regClass() == instr->operands[0].regClass();
for (unsigned i = 1; all_same_temp && (i < instr->operands.size()); i++) {
if (!instr->operands[i].isTemp() ||
instr->operands[i].tempId() != instr->operands[0].tempId())
all_same_temp = false;
}
if (all_same_temp) {
ctx.info[instr->definitions[0].tempId()].set_temp(instr->operands[0].getTemp());
} else {
bool all_undef = instr->operands[0].isUndefined();
for (unsigned i = 1; all_undef && (i < instr->operands.size()); i++) {
if (!instr->operands[i].isUndefined())
all_undef = false;
}
if (all_undef)
ctx.info[instr->definitions[0].tempId()].set_undefined();
}
break;
}
case aco_opcode::v_add_u32:
case aco_opcode::v_add_co_u32:
case aco_opcode::v_add_co_u32_e64:
case aco_opcode::s_add_i32:
case aco_opcode::s_add_u32:
case aco_opcode::v_subbrev_co_u32:
case aco_opcode::v_sub_u32:
case aco_opcode::v_sub_i32:
case aco_opcode::v_sub_co_u32:
case aco_opcode::v_sub_co_u32_e64:
case aco_opcode::s_sub_u32:
case aco_opcode::s_sub_i32:
case aco_opcode::v_subrev_u32:
case aco_opcode::v_subrev_co_u32:
case aco_opcode::v_subrev_co_u32_e64:
ctx.info[instr->definitions[0].tempId()].set_add_sub(instr.get());
break;
case aco_opcode::s_not_b32:
case aco_opcode::s_not_b64:
if (ctx.info[instr->operands[0].tempId()].is_uniform_bool()) {
ctx.info[instr->definitions[0].tempId()].set_uniform_bitwise();
ctx.info[instr->definitions[1].tempId()].set_scc_invert(
ctx.info[instr->operands[0].tempId()].temp);
} else if (ctx.info[instr->operands[0].tempId()].is_uniform_bitwise()) {
ctx.info[instr->definitions[0].tempId()].set_uniform_bitwise();
ctx.info[instr->definitions[1].tempId()].set_scc_invert(
ctx.info[instr->operands[0].tempId()].instr->definitions[1].getTemp());
}
ctx.info[instr->definitions[0].tempId()].set_bitwise(instr.get());
break;
case aco_opcode::s_and_b32:
case aco_opcode::s_and_b64:
if (fixed_to_exec(instr->operands[1]) && instr->operands[0].isTemp()) {
if (ctx.info[instr->operands[0].tempId()].is_uniform_bool()) {
/* Try to get rid of the superfluous s_cselect + s_and_b64 that comes from turning a
* uniform bool into divergent */
ctx.info[instr->definitions[1].tempId()].set_temp(
ctx.info[instr->operands[0].tempId()].temp);
ctx.info[instr->definitions[0].tempId()].set_uniform_bool(
ctx.info[instr->operands[0].tempId()].temp);
break;
} else if (ctx.info[instr->operands[0].tempId()].is_uniform_bitwise()) {
/* Try to get rid of the superfluous s_and_b64, since the uniform bitwise instruction
* already produces the same SCC */
ctx.info[instr->definitions[1].tempId()].set_temp(
ctx.info[instr->operands[0].tempId()].instr->definitions[1].getTemp());
ctx.info[instr->definitions[0].tempId()].set_uniform_bool(
ctx.info[instr->operands[0].tempId()].instr->definitions[1].getTemp());
break;
} else if ((ctx.program->stage.num_sw_stages() > 1 ||
ctx.program->stage.hw == HWStage::NGG) &&
instr->pass_flags == 1) {
/* In case of merged shaders, pass_flags=1 means that all lanes are active (exec=-1), so
* s_and is unnecessary. */
ctx.info[instr->definitions[0].tempId()].set_temp(instr->operands[0].getTemp());
break;
}
}
FALLTHROUGH;
case aco_opcode::s_or_b32:
case aco_opcode::s_or_b64:
case aco_opcode::s_xor_b32:
case aco_opcode::s_xor_b64:
if (std::all_of(instr->operands.begin(), instr->operands.end(),
[&ctx](const Operand& op)
{
return op.isTemp() && (ctx.info[op.tempId()].is_uniform_bool() ||
ctx.info[op.tempId()].is_uniform_bitwise());
})) {
ctx.info[instr->definitions[0].tempId()].set_uniform_bitwise();
}
ctx.info[instr->definitions[0].tempId()].set_bitwise(instr.get());
break;
case aco_opcode::s_lshl_b32:
case aco_opcode::v_or_b32:
case aco_opcode::v_lshlrev_b32:
case aco_opcode::v_bcnt_u32_b32:
case aco_opcode::v_and_b32:
case aco_opcode::v_xor_b32:
ctx.info[instr->definitions[0].tempId()].set_usedef(instr.get());
break;
case aco_opcode::v_min_f32:
case aco_opcode::v_min_f16:
case aco_opcode::v_min_u32:
case aco_opcode::v_min_i32:
case aco_opcode::v_min_u16:
case aco_opcode::v_min_i16:
case aco_opcode::v_min_u16_e64:
case aco_opcode::v_min_i16_e64:
case aco_opcode::v_max_f32:
case aco_opcode::v_max_f16:
case aco_opcode::v_max_u32:
case aco_opcode::v_max_i32:
case aco_opcode::v_max_u16:
case aco_opcode::v_max_i16:
case aco_opcode::v_max_u16_e64:
case aco_opcode::v_max_i16_e64:
ctx.info[instr->definitions[0].tempId()].set_minmax(instr.get());
break;
case aco_opcode::s_cselect_b64:
case aco_opcode::s_cselect_b32:
if (instr->operands[0].constantEquals((unsigned)-1) && instr->operands[1].constantEquals(0)) {
/* Found a cselect that operates on a uniform bool that comes from eg. s_cmp */
ctx.info[instr->definitions[0].tempId()].set_uniform_bool(instr->operands[2].getTemp());
}
if (instr->operands[2].isTemp() && ctx.info[instr->operands[2].tempId()].is_scc_invert()) {
/* Flip the operands to get rid of the scc_invert instruction */
std::swap(instr->operands[0], instr->operands[1]);
instr->operands[2].setTemp(ctx.info[instr->operands[2].tempId()].temp);
}
break;
case aco_opcode::p_wqm:
if (instr->operands[0].isTemp() && ctx.info[instr->operands[0].tempId()].is_scc_invert()) {
ctx.info[instr->definitions[0].tempId()].set_temp(instr->operands[0].getTemp());
}
break;
case aco_opcode::s_mul_i32:
/* Testing every uint32_t shows that 0x3f800000*n is never a denormal.
* This pattern is created from a uniform nir_op_b2f. */
if (instr->operands[0].constantEquals(0x3f800000u))
ctx.info[instr->definitions[0].tempId()].set_canonicalized();
break;
case aco_opcode::p_extract: {
if (instr->definitions[0].bytes() == 4) {
ctx.info[instr->definitions[0].tempId()].set_extract(instr.get());
if (instr->operands[0].regClass() == v1 && parse_insert(instr.get()))
ctx.info[instr->operands[0].tempId()].set_insert(instr.get());
}
break;
}
case aco_opcode::p_insert: {
if (instr->operands[0].bytes() == 4) {
if (instr->operands[0].regClass() == v1)
ctx.info[instr->operands[0].tempId()].set_insert(instr.get());
if (parse_extract(instr.get()))
ctx.info[instr->definitions[0].tempId()].set_extract(instr.get());
ctx.info[instr->definitions[0].tempId()].set_bitwise(instr.get());
}
break;
}
case aco_opcode::ds_read_u8:
case aco_opcode::ds_read_u8_d16:
case aco_opcode::ds_read_u16:
case aco_opcode::ds_read_u16_d16: {
ctx.info[instr->definitions[0].tempId()].set_usedef(instr.get());
break;
}
case aco_opcode::v_cvt_f16_f32: {
if (instr->operands[0].isTemp())
ctx.info[instr->operands[0].tempId()].set_f2f16(instr.get());
break;
}
case aco_opcode::v_cvt_f32_f16: {
if (instr->operands[0].isTemp())
ctx.info[instr->definitions[0].tempId()].set_f2f32(instr.get());
break;
}
default: break;
}
/* Don't remove label_extract if we can't apply the extract to
* neg/abs instructions because we'll likely combine it into another valu. */
if (!(ctx.info[instr->definitions[0].tempId()].label & (label_neg | label_abs)))
check_sdwa_extract(ctx, instr);
}
unsigned
original_temp_id(opt_ctx& ctx, Temp tmp)
{
if (ctx.info[tmp.id()].is_temp())
return ctx.info[tmp.id()].temp.id();
else
return tmp.id();
}
void
decrease_uses(opt_ctx& ctx, Instruction* instr)
{
if (!--ctx.uses[instr->definitions[0].tempId()]) {
for (const Operand& op : instr->operands) {
if (op.isTemp())
ctx.uses[op.tempId()]--;
}
}
}
Instruction*
follow_operand(opt_ctx& ctx, Operand op, bool ignore_uses = false)
{
if (!op.isTemp() || !(ctx.info[op.tempId()].label & instr_usedef_labels))
return nullptr;
if (!ignore_uses && ctx.uses[op.tempId()] > 1)
return nullptr;
Instruction* instr = ctx.info[op.tempId()].instr;
if (instr->definitions.size() == 2) {
assert(instr->definitions[0].isTemp() && instr->definitions[0].tempId() == op.tempId());
if (instr->definitions[1].isTemp() && ctx.uses[instr->definitions[1].tempId()])
return nullptr;
}
return instr;
}
/* s_or_b64(neq(a, a), neq(b, b)) -> v_cmp_u_f32(a, b)
* s_and_b64(eq(a, a), eq(b, b)) -> v_cmp_o_f32(a, b) */
bool
combine_ordering_test(opt_ctx& ctx, aco_ptr<Instruction>& instr)
{
if (instr->definitions[0].regClass() != ctx.program->lane_mask)
return false;
if (instr->definitions[1].isTemp() && ctx.uses[instr->definitions[1].tempId()])
return false;
bool is_or = instr->opcode == aco_opcode::s_or_b64 || instr->opcode == aco_opcode::s_or_b32;
bool neg[2] = {false, false};
bool abs[2] = {false, false};
uint8_t opsel = 0;
Instruction* op_instr[2];
Temp op[2];
unsigned bitsize = 0;
for (unsigned i = 0; i < 2; i++) {
op_instr[i] = follow_operand(ctx, instr->operands[i], true);
if (!op_instr[i])
return false;
aco_opcode expected_cmp = is_or ? aco_opcode::v_cmp_neq_f32 : aco_opcode::v_cmp_eq_f32;
unsigned op_bitsize = get_cmp_bitsize(op_instr[i]->opcode);
if (get_f32_cmp(op_instr[i]->opcode) != expected_cmp)
return false;
if (bitsize && op_bitsize != bitsize)
return false;
if (!op_instr[i]->operands[0].isTemp() || !op_instr[i]->operands[1].isTemp())
return false;
if (op_instr[i]->isVOP3()) {
VOP3_instruction& vop3 = op_instr[i]->vop3();
if (vop3.neg[0] != vop3.neg[1] || vop3.abs[0] != vop3.abs[1] || vop3.opsel == 1 ||
vop3.opsel == 2)
return false;
neg[i] = vop3.neg[0];
abs[i] = vop3.abs[0];
opsel |= (vop3.opsel & 1) << i;
} else if (op_instr[i]->isSDWA()) {
return false;
}
Temp op0 = op_instr[i]->operands[0].getTemp();
Temp op1 = op_instr[i]->operands[1].getTemp();
if (original_temp_id(ctx, op0) != original_temp_id(ctx, op1))
return false;
op[i] = op1;
bitsize = op_bitsize;
}
if (op[1].type() == RegType::sgpr)
std::swap(op[0], op[1]);
unsigned num_sgprs = (op[0].type() == RegType::sgpr) + (op[1].type() == RegType::sgpr);
if (num_sgprs > (ctx.program->gfx_level >= GFX10 ? 2 : 1))
return false;
ctx.uses[op[0].id()]++;
ctx.uses[op[1].id()]++;
decrease_uses(ctx, op_instr[0]);
decrease_uses(ctx, op_instr[1]);
aco_opcode new_op = aco_opcode::num_opcodes;
switch (bitsize) {
case 16: new_op = is_or ? aco_opcode::v_cmp_u_f16 : aco_opcode::v_cmp_o_f16; break;
case 32: new_op = is_or ? aco_opcode::v_cmp_u_f32 : aco_opcode::v_cmp_o_f32; break;
case 64: new_op = is_or ? aco_opcode::v_cmp_u_f64 : aco_opcode::v_cmp_o_f64; break;
}
Instruction* new_instr;
if (neg[0] || neg[1] || abs[0] || abs[1] || opsel || num_sgprs > 1) {
VOP3_instruction* vop3 =
create_instruction<VOP3_instruction>(new_op, asVOP3(Format::VOPC), 2, 1);
for (unsigned i = 0; i < 2; i++) {
vop3->neg[i] = neg[i];
vop3->abs[i] = abs[i];
}
vop3->opsel = opsel;
new_instr = static_cast<Instruction*>(vop3);
} else {
new_instr = create_instruction<VOPC_instruction>(new_op, Format::VOPC, 2, 1);
}
new_instr->operands[0] = Operand(op[0]);
new_instr->operands[1] = Operand(op[1]);
new_instr->definitions[0] = instr->definitions[0];
ctx.info[instr->definitions[0].tempId()].label = 0;
ctx.info[instr->definitions[0].tempId()].set_vopc(new_instr);
instr.reset(new_instr);
return true;
}
/* s_or_b64(v_cmp_u_f32(a, b), cmp(a, b)) -> get_unordered(cmp)(a, b)
* s_and_b64(v_cmp_o_f32(a, b), cmp(a, b)) -> get_ordered(cmp)(a, b) */
bool
combine_comparison_ordering(opt_ctx& ctx, aco_ptr<Instruction>& instr)
{
if (instr->definitions[0].regClass() != ctx.program->lane_mask)
return false;
if (instr->definitions[1].isTemp() && ctx.uses[instr->definitions[1].tempId()])
return false;
bool is_or = instr->opcode == aco_opcode::s_or_b64 || instr->opcode == aco_opcode::s_or_b32;
aco_opcode expected_nan_test = is_or ? aco_opcode::v_cmp_u_f32 : aco_opcode::v_cmp_o_f32;
Instruction* nan_test = follow_operand(ctx, instr->operands[0], true);
Instruction* cmp = follow_operand(ctx, instr->operands[1], true);
if (!nan_test || !cmp)
return false;
if (nan_test->isSDWA() || cmp->isSDWA())
return false;
if (get_f32_cmp(cmp->opcode) == expected_nan_test)
std::swap(nan_test, cmp);
else if (get_f32_cmp(nan_test->opcode) != expected_nan_test)
return false;
if (!is_fp_cmp(cmp->opcode) || get_cmp_bitsize(cmp->opcode) != get_cmp_bitsize(nan_test->opcode))
return false;
if (!nan_test->operands[0].isTemp() || !nan_test->operands[1].isTemp())
return false;
if (!cmp->operands[0].isTemp() || !cmp->operands[1].isTemp())
return false;
unsigned prop_cmp0 = original_temp_id(ctx, cmp->operands[0].getTemp());
unsigned prop_cmp1 = original_temp_id(ctx, cmp->operands[1].getTemp());
unsigned prop_nan0 = original_temp_id(ctx, nan_test->operands[0].getTemp());
unsigned prop_nan1 = original_temp_id(ctx, nan_test->operands[1].getTemp());
if (prop_cmp0 != prop_nan0 && prop_cmp0 != prop_nan1)
return false;
if (prop_cmp1 != prop_nan0 && prop_cmp1 != prop_nan1)
return false;
ctx.uses[cmp->operands[0].tempId()]++;
ctx.uses[cmp->operands[1].tempId()]++;
decrease_uses(ctx, nan_test);
decrease_uses(ctx, cmp);
aco_opcode new_op = is_or ? get_unordered(cmp->opcode) : get_ordered(cmp->opcode);
Instruction* new_instr;
if (cmp->isVOP3()) {
VOP3_instruction* new_vop3 =
create_instruction<VOP3_instruction>(new_op, asVOP3(Format::VOPC), 2, 1);
VOP3_instruction& cmp_vop3 = cmp->vop3();
memcpy(new_vop3->abs, cmp_vop3.abs, sizeof(new_vop3->abs));
memcpy(new_vop3->neg, cmp_vop3.neg, sizeof(new_vop3->neg));
new_vop3->clamp = cmp_vop3.clamp;
new_vop3->omod = cmp_vop3.omod;
new_vop3->opsel = cmp_vop3.opsel;
new_instr = new_vop3;
} else {
new_instr = create_instruction<VOPC_instruction>(new_op, Format::VOPC, 2, 1);
}
new_instr->operands[0] = cmp->operands[0];
new_instr->operands[1] = cmp->operands[1];
new_instr->definitions[0] = instr->definitions[0];
ctx.info[instr->definitions[0].tempId()].label = 0;
ctx.info[instr->definitions[0].tempId()].set_vopc(new_instr);
instr.reset(new_instr);
return true;
}
bool
is_operand_constant(opt_ctx& ctx, Operand op, unsigned bit_size, uint64_t* value)
{
if (op.isConstant()) {
*value = op.constantValue64();
return true;
} else if (op.isTemp()) {
unsigned id = original_temp_id(ctx, op.getTemp());
if (!ctx.info[id].is_constant_or_literal(bit_size))
return false;
*value = get_constant_op(ctx, ctx.info[id], bit_size).constantValue64();
return true;
}
return false;
}
bool
is_constant_nan(uint64_t value, unsigned bit_size)
{
if (bit_size == 16)
return ((value >> 10) & 0x1f) == 0x1f && (value & 0x3ff);
else if (bit_size == 32)
return ((value >> 23) & 0xff) == 0xff && (value & 0x7fffff);
else
return ((value >> 52) & 0x7ff) == 0x7ff && (value & 0xfffffffffffff);
}
/* s_or_b64(v_cmp_neq_f32(a, a), cmp(a, #b)) and b is not NaN -> get_unordered(cmp)(a, b)
* s_and_b64(v_cmp_eq_f32(a, a), cmp(a, #b)) and b is not NaN -> get_ordered(cmp)(a, b) */
bool
combine_constant_comparison_ordering(opt_ctx& ctx, aco_ptr<Instruction>& instr)
{
if (instr->definitions[0].regClass() != ctx.program->lane_mask)
return false;
if (instr->definitions[1].isTemp() && ctx.uses[instr->definitions[1].tempId()])
return false;
bool is_or = instr->opcode == aco_opcode::s_or_b64 || instr->opcode == aco_opcode::s_or_b32;
Instruction* nan_test = follow_operand(ctx, instr->operands[0], true);
Instruction* cmp = follow_operand(ctx, instr->operands[1], true);
if (!nan_test || !cmp || nan_test->isSDWA() || cmp->isSDWA())
return false;
if (nan_test->isSDWA() || cmp->isSDWA())
return false;
aco_opcode expected_nan_test = is_or ? aco_opcode::v_cmp_neq_f32 : aco_opcode::v_cmp_eq_f32;
if (get_f32_cmp(cmp->opcode) == expected_nan_test)
std::swap(nan_test, cmp);
else if (get_f32_cmp(nan_test->opcode) != expected_nan_test)
return false;
unsigned bit_size = get_cmp_bitsize(cmp->opcode);
if (!is_fp_cmp(cmp->opcode) || get_cmp_bitsize(nan_test->opcode) != bit_size)
return false;
if (!nan_test->operands[0].isTemp() || !nan_test->operands[1].isTemp())
return false;
if (!cmp->operands[0].isTemp() && !cmp->operands[1].isTemp())
return false;
unsigned prop_nan0 = original_temp_id(ctx, nan_test->operands[0].getTemp());
unsigned prop_nan1 = original_temp_id(ctx, nan_test->operands[1].getTemp());
if (prop_nan0 != prop_nan1)
return false;
if (nan_test->isVOP3()) {
VOP3_instruction& vop3 = nan_test->vop3();
if (vop3.neg[0] != vop3.neg[1] || vop3.abs[0] != vop3.abs[1] || vop3.opsel == 1 ||
vop3.opsel == 2)
return false;
}
int constant_operand = -1;
for (unsigned i = 0; i < 2; i++) {
if (cmp->operands[i].isTemp() &&
original_temp_id(ctx, cmp->operands[i].getTemp()) == prop_nan0) {
constant_operand = !i;
break;
}
}
if (constant_operand == -1)
return false;
uint64_t constant_value;
if (!is_operand_constant(ctx, cmp->operands[constant_operand], bit_size, &constant_value))
return false;
if (is_constant_nan(constant_value, bit_size))
return false;
if (cmp->operands[0].isTemp())
ctx.uses[cmp->operands[0].tempId()]++;
if (cmp->operands[1].isTemp())
ctx.uses[cmp->operands[1].tempId()]++;
decrease_uses(ctx, nan_test);
decrease_uses(ctx, cmp);
aco_opcode new_op = is_or ? get_unordered(cmp->opcode) : get_ordered(cmp->opcode);
Instruction* new_instr;
if (cmp->isVOP3()) {
VOP3_instruction* new_vop3 =
create_instruction<VOP3_instruction>(new_op, asVOP3(Format::VOPC), 2, 1);
VOP3_instruction& cmp_vop3 = cmp->vop3();
memcpy(new_vop3->abs, cmp_vop3.abs, sizeof(new_vop3->abs));
memcpy(new_vop3->neg, cmp_vop3.neg, sizeof(new_vop3->neg));
new_vop3->clamp = cmp_vop3.clamp;
new_vop3->omod = cmp_vop3.omod;
new_vop3->opsel = cmp_vop3.opsel;
new_instr = new_vop3;
} else {
new_instr = create_instruction<VOPC_instruction>(new_op, Format::VOPC, 2, 1);
}
new_instr->operands[0] = cmp->operands[0];
new_instr->operands[1] = cmp->operands[1];
new_instr->definitions[0] = instr->definitions[0];
ctx.info[instr->definitions[0].tempId()].label = 0;
ctx.info[instr->definitions[0].tempId()].set_vopc(new_instr);
instr.reset(new_instr);
return true;
}
/* s_not(cmp(a, b)) -> get_inverse(cmp)(a, b) */
bool
combine_inverse_comparison(opt_ctx& ctx