| /************************************************************************** |
| * |
| * Copyright 2007-2008 VMware, Inc. |
| * All Rights Reserved. |
| * Copyright 2009-2010 VMware, Inc. All rights Reserved. |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a |
| * copy of this software and associated documentation files (the |
| * "Software"), to deal in the Software without restriction, including |
| * without limitation the rights to use, copy, modify, merge, publish, |
| * distribute, sub license, and/or sell copies of the Software, and to |
| * permit persons to whom the Software is furnished to do so, subject to |
| * the following conditions: |
| * |
| * The above copyright notice and this permission notice (including the |
| * next paragraph) shall be included in all copies or substantial portions |
| * of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
| * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR |
| * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| * |
| **************************************************************************/ |
| |
| /** |
| * TGSI interpreter/executor. |
| * |
| * Flow control information: |
| * |
| * Since we operate on 'quads' (4 pixels or 4 vertices in parallel) |
| * flow control statements (IF/ELSE/ENDIF, LOOP/ENDLOOP) require special |
| * care since a condition may be true for some quad components but false |
| * for other components. |
| * |
| * We basically execute all statements (even if they're in the part of |
| * an IF/ELSE clause that's "not taken") and use a special mask to |
| * control writing to destination registers. This is the ExecMask. |
| * See store_dest(). |
| * |
| * The ExecMask is computed from three other masks (CondMask, LoopMask and |
| * ContMask) which are controlled by the flow control instructions (namely: |
| * (IF/ELSE/ENDIF, LOOP/ENDLOOP and CONT). |
| * |
| * |
| * Authors: |
| * Michal Krol |
| * Brian Paul |
| */ |
| |
| #include "pipe/p_compiler.h" |
| #include "pipe/p_state.h" |
| #include "pipe/p_shader_tokens.h" |
| #include "tgsi/tgsi_dump.h" |
| #include "tgsi/tgsi_parse.h" |
| #include "tgsi/tgsi_util.h" |
| #include "tgsi_exec.h" |
| #include "util/compiler.h" |
| #include "util/half_float.h" |
| #include "util/u_memory.h" |
| #include "util/u_math.h" |
| #include "util/rounding.h" |
| |
| |
| #define DEBUG_EXECUTION 0 |
| |
| |
| #define TILE_TOP_LEFT 0 |
| #define TILE_TOP_RIGHT 1 |
| #define TILE_BOTTOM_LEFT 2 |
| #define TILE_BOTTOM_RIGHT 3 |
| |
| union tgsi_double_channel { |
| double d[TGSI_QUAD_SIZE]; |
| unsigned u[TGSI_QUAD_SIZE][2]; |
| uint64_t u64[TGSI_QUAD_SIZE]; |
| int64_t i64[TGSI_QUAD_SIZE]; |
| } ALIGN16; |
| |
| struct ALIGN16 tgsi_double_vector { |
| union tgsi_double_channel xy; |
| union tgsi_double_channel zw; |
| }; |
| |
| static void |
| micro_abs(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = fabsf(src->f[0]); |
| dst->f[1] = fabsf(src->f[1]); |
| dst->f[2] = fabsf(src->f[2]); |
| dst->f[3] = fabsf(src->f[3]); |
| } |
| |
| static void |
| micro_arl(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->i[0] = (int)floorf(src->f[0]); |
| dst->i[1] = (int)floorf(src->f[1]); |
| dst->i[2] = (int)floorf(src->f[2]); |
| dst->i[3] = (int)floorf(src->f[3]); |
| } |
| |
| static void |
| micro_arr(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->i[0] = (int)floorf(src->f[0] + 0.5f); |
| dst->i[1] = (int)floorf(src->f[1] + 0.5f); |
| dst->i[2] = (int)floorf(src->f[2] + 0.5f); |
| dst->i[3] = (int)floorf(src->f[3] + 0.5f); |
| } |
| |
| static void |
| micro_ceil(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = ceilf(src->f[0]); |
| dst->f[1] = ceilf(src->f[1]); |
| dst->f[2] = ceilf(src->f[2]); |
| dst->f[3] = ceilf(src->f[3]); |
| } |
| |
| static void |
| micro_cmp(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1, |
| const union tgsi_exec_channel *src2) |
| { |
| dst->f[0] = src0->f[0] < 0.0f ? src1->f[0] : src2->f[0]; |
| dst->f[1] = src0->f[1] < 0.0f ? src1->f[1] : src2->f[1]; |
| dst->f[2] = src0->f[2] < 0.0f ? src1->f[2] : src2->f[2]; |
| dst->f[3] = src0->f[3] < 0.0f ? src1->f[3] : src2->f[3]; |
| } |
| |
| static void |
| micro_cos(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = cosf(src->f[0]); |
| dst->f[1] = cosf(src->f[1]); |
| dst->f[2] = cosf(src->f[2]); |
| dst->f[3] = cosf(src->f[3]); |
| } |
| |
| static void |
| micro_d2f(union tgsi_exec_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->f[0] = (float)src->d[0]; |
| dst->f[1] = (float)src->d[1]; |
| dst->f[2] = (float)src->d[2]; |
| dst->f[3] = (float)src->d[3]; |
| } |
| |
| static void |
| micro_d2i(union tgsi_exec_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->i[0] = (int)src->d[0]; |
| dst->i[1] = (int)src->d[1]; |
| dst->i[2] = (int)src->d[2]; |
| dst->i[3] = (int)src->d[3]; |
| } |
| |
| static void |
| micro_d2u(union tgsi_exec_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0] = (unsigned)src->d[0]; |
| dst->u[1] = (unsigned)src->d[1]; |
| dst->u[2] = (unsigned)src->d[2]; |
| dst->u[3] = (unsigned)src->d[3]; |
| } |
| static void |
| micro_dabs(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = src->d[0] >= 0.0 ? src->d[0] : -src->d[0]; |
| dst->d[1] = src->d[1] >= 0.0 ? src->d[1] : -src->d[1]; |
| dst->d[2] = src->d[2] >= 0.0 ? src->d[2] : -src->d[2]; |
| dst->d[3] = src->d[3] >= 0.0 ? src->d[3] : -src->d[3]; |
| } |
| |
| static void |
| micro_dadd(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = src[0].d[0] + src[1].d[0]; |
| dst->d[1] = src[0].d[1] + src[1].d[1]; |
| dst->d[2] = src[0].d[2] + src[1].d[2]; |
| dst->d[3] = src[0].d[3] + src[1].d[3]; |
| } |
| |
| static void |
| micro_ddiv(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = src[0].d[0] / src[1].d[0]; |
| dst->d[1] = src[0].d[1] / src[1].d[1]; |
| dst->d[2] = src[0].d[2] / src[1].d[2]; |
| dst->d[3] = src[0].d[3] / src[1].d[3]; |
| } |
| |
| static void |
| micro_ddx(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = |
| dst->f[1] = |
| dst->f[2] = |
| dst->f[3] = src->f[TILE_BOTTOM_RIGHT] - src->f[TILE_BOTTOM_LEFT]; |
| } |
| |
| static void |
| micro_ddx_fine(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = |
| dst->f[1] = src->f[TILE_TOP_RIGHT] - src->f[TILE_TOP_LEFT]; |
| dst->f[2] = |
| dst->f[3] = src->f[TILE_BOTTOM_RIGHT] - src->f[TILE_BOTTOM_LEFT]; |
| } |
| |
| |
| static void |
| micro_ddy(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = |
| dst->f[1] = |
| dst->f[2] = |
| dst->f[3] = src->f[TILE_BOTTOM_LEFT] - src->f[TILE_TOP_LEFT]; |
| } |
| |
| static void |
| micro_ddy_fine(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = |
| dst->f[2] = src->f[TILE_BOTTOM_LEFT] - src->f[TILE_TOP_LEFT]; |
| dst->f[1] = |
| dst->f[3] = src->f[TILE_BOTTOM_RIGHT] - src->f[TILE_TOP_RIGHT]; |
| } |
| |
| static void |
| micro_dmul(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = src[0].d[0] * src[1].d[0]; |
| dst->d[1] = src[0].d[1] * src[1].d[1]; |
| dst->d[2] = src[0].d[2] * src[1].d[2]; |
| dst->d[3] = src[0].d[3] * src[1].d[3]; |
| } |
| |
| static void |
| micro_dmax(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = fmax(src[0].d[0], src[1].d[0]); |
| dst->d[1] = fmax(src[0].d[1], src[1].d[1]); |
| dst->d[2] = fmax(src[0].d[2], src[1].d[2]); |
| dst->d[3] = fmax(src[0].d[3], src[1].d[3]); |
| } |
| |
| static void |
| micro_dmin(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = fmin(src[0].d[0], src[1].d[0]); |
| dst->d[1] = fmin(src[0].d[1], src[1].d[1]); |
| dst->d[2] = fmin(src[0].d[2], src[1].d[2]); |
| dst->d[3] = fmin(src[0].d[3], src[1].d[3]); |
| } |
| |
| static void |
| micro_dneg(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = -src->d[0]; |
| dst->d[1] = -src->d[1]; |
| dst->d[2] = -src->d[2]; |
| dst->d[3] = -src->d[3]; |
| } |
| |
| static void |
| micro_dslt(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].d[0] < src[1].d[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].d[1] < src[1].d[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].d[2] < src[1].d[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].d[3] < src[1].d[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_dsne(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].d[0] != src[1].d[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].d[1] != src[1].d[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].d[2] != src[1].d[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].d[3] != src[1].d[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_dsge(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].d[0] >= src[1].d[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].d[1] >= src[1].d[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].d[2] >= src[1].d[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].d[3] >= src[1].d[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_dseq(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].d[0] == src[1].d[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].d[1] == src[1].d[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].d[2] == src[1].d[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].d[3] == src[1].d[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_drcp(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = 1.0 / src->d[0]; |
| dst->d[1] = 1.0 / src->d[1]; |
| dst->d[2] = 1.0 / src->d[2]; |
| dst->d[3] = 1.0 / src->d[3]; |
| } |
| |
| static void |
| micro_dsqrt(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = sqrt(src->d[0]); |
| dst->d[1] = sqrt(src->d[1]); |
| dst->d[2] = sqrt(src->d[2]); |
| dst->d[3] = sqrt(src->d[3]); |
| } |
| |
| static void |
| micro_drsq(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = 1.0 / sqrt(src->d[0]); |
| dst->d[1] = 1.0 / sqrt(src->d[1]); |
| dst->d[2] = 1.0 / sqrt(src->d[2]); |
| dst->d[3] = 1.0 / sqrt(src->d[3]); |
| } |
| |
| static void |
| micro_dmad(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = src[0].d[0] * src[1].d[0] + src[2].d[0]; |
| dst->d[1] = src[0].d[1] * src[1].d[1] + src[2].d[1]; |
| dst->d[2] = src[0].d[2] * src[1].d[2] + src[2].d[2]; |
| dst->d[3] = src[0].d[3] * src[1].d[3] + src[2].d[3]; |
| } |
| |
| static void |
| micro_dfrac(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = src->d[0] - floor(src->d[0]); |
| dst->d[1] = src->d[1] - floor(src->d[1]); |
| dst->d[2] = src->d[2] - floor(src->d[2]); |
| dst->d[3] = src->d[3] - floor(src->d[3]); |
| } |
| |
| static void |
| micro_dflr(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = floor(src->d[0]); |
| dst->d[1] = floor(src->d[1]); |
| dst->d[2] = floor(src->d[2]); |
| dst->d[3] = floor(src->d[3]); |
| } |
| |
| static void |
| micro_dldexp(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src0, |
| union tgsi_exec_channel *src1) |
| { |
| dst->d[0] = ldexp(src0->d[0], src1->i[0]); |
| dst->d[1] = ldexp(src0->d[1], src1->i[1]); |
| dst->d[2] = ldexp(src0->d[2], src1->i[2]); |
| dst->d[3] = ldexp(src0->d[3], src1->i[3]); |
| } |
| |
| static void |
| micro_dfracexp(union tgsi_double_channel *dst, |
| union tgsi_exec_channel *dst_exp, |
| const union tgsi_double_channel *src) |
| { |
| dst->d[0] = frexp(src->d[0], &dst_exp->i[0]); |
| dst->d[1] = frexp(src->d[1], &dst_exp->i[1]); |
| dst->d[2] = frexp(src->d[2], &dst_exp->i[2]); |
| dst->d[3] = frexp(src->d[3], &dst_exp->i[3]); |
| } |
| |
| static void |
| micro_exp2(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| #if DEBUG |
| /* Inf is okay for this instruction, so clamp it to silence assertions. */ |
| uint i; |
| union tgsi_exec_channel clamped; |
| |
| for (i = 0; i < 4; i++) { |
| if (src->f[i] > 127.99999f) { |
| clamped.f[i] = 127.99999f; |
| } else if (src->f[i] < -126.99999f) { |
| clamped.f[i] = -126.99999f; |
| } else { |
| clamped.f[i] = src->f[i]; |
| } |
| } |
| src = &clamped; |
| #endif /* DEBUG */ |
| |
| dst->f[0] = powf(2.0f, src->f[0]); |
| dst->f[1] = powf(2.0f, src->f[1]); |
| dst->f[2] = powf(2.0f, src->f[2]); |
| dst->f[3] = powf(2.0f, src->f[3]); |
| } |
| |
| static void |
| micro_f2d(union tgsi_double_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->d[0] = (double)src->f[0]; |
| dst->d[1] = (double)src->f[1]; |
| dst->d[2] = (double)src->f[2]; |
| dst->d[3] = (double)src->f[3]; |
| } |
| |
| static void |
| micro_flr(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = floorf(src->f[0]); |
| dst->f[1] = floorf(src->f[1]); |
| dst->f[2] = floorf(src->f[2]); |
| dst->f[3] = floorf(src->f[3]); |
| } |
| |
| static void |
| micro_frc(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = src->f[0] - floorf(src->f[0]); |
| dst->f[1] = src->f[1] - floorf(src->f[1]); |
| dst->f[2] = src->f[2] - floorf(src->f[2]); |
| dst->f[3] = src->f[3] - floorf(src->f[3]); |
| } |
| |
| static void |
| micro_i2d(union tgsi_double_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->d[0] = (double)src->i[0]; |
| dst->d[1] = (double)src->i[1]; |
| dst->d[2] = (double)src->i[2]; |
| dst->d[3] = (double)src->i[3]; |
| } |
| |
| static void |
| micro_iabs(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->i[0] = src->i[0] >= 0 ? src->i[0] : -src->i[0]; |
| dst->i[1] = src->i[1] >= 0 ? src->i[1] : -src->i[1]; |
| dst->i[2] = src->i[2] >= 0 ? src->i[2] : -src->i[2]; |
| dst->i[3] = src->i[3] >= 0 ? src->i[3] : -src->i[3]; |
| } |
| |
| static void |
| micro_ineg(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->i[0] = -src->i[0]; |
| dst->i[1] = -src->i[1]; |
| dst->i[2] = -src->i[2]; |
| dst->i[3] = -src->i[3]; |
| } |
| |
| static void |
| micro_lg2(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = logf(src->f[0]) * 1.442695f; |
| dst->f[1] = logf(src->f[1]) * 1.442695f; |
| dst->f[2] = logf(src->f[2]) * 1.442695f; |
| dst->f[3] = logf(src->f[3]) * 1.442695f; |
| } |
| |
| static void |
| micro_lrp(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1, |
| const union tgsi_exec_channel *src2) |
| { |
| dst->f[0] = src0->f[0] * (src1->f[0] - src2->f[0]) + src2->f[0]; |
| dst->f[1] = src0->f[1] * (src1->f[1] - src2->f[1]) + src2->f[1]; |
| dst->f[2] = src0->f[2] * (src1->f[2] - src2->f[2]) + src2->f[2]; |
| dst->f[3] = src0->f[3] * (src1->f[3] - src2->f[3]) + src2->f[3]; |
| } |
| |
| static void |
| micro_mad(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1, |
| const union tgsi_exec_channel *src2) |
| { |
| dst->f[0] = src0->f[0] * src1->f[0] + src2->f[0]; |
| dst->f[1] = src0->f[1] * src1->f[1] + src2->f[1]; |
| dst->f[2] = src0->f[2] * src1->f[2] + src2->f[2]; |
| dst->f[3] = src0->f[3] * src1->f[3] + src2->f[3]; |
| } |
| |
| static void |
| micro_mov(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->u[0] = src->u[0]; |
| dst->u[1] = src->u[1]; |
| dst->u[2] = src->u[2]; |
| dst->u[3] = src->u[3]; |
| } |
| |
| static void |
| micro_rcp(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| #if 0 /* for debugging */ |
| assert(src->f[0] != 0.0f); |
| assert(src->f[1] != 0.0f); |
| assert(src->f[2] != 0.0f); |
| assert(src->f[3] != 0.0f); |
| #endif |
| dst->f[0] = 1.0f / src->f[0]; |
| dst->f[1] = 1.0f / src->f[1]; |
| dst->f[2] = 1.0f / src->f[2]; |
| dst->f[3] = 1.0f / src->f[3]; |
| } |
| |
| static void |
| micro_rnd(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = _mesa_roundevenf(src->f[0]); |
| dst->f[1] = _mesa_roundevenf(src->f[1]); |
| dst->f[2] = _mesa_roundevenf(src->f[2]); |
| dst->f[3] = _mesa_roundevenf(src->f[3]); |
| } |
| |
| static void |
| micro_rsq(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| #if 0 /* for debugging */ |
| assert(src->f[0] != 0.0f); |
| assert(src->f[1] != 0.0f); |
| assert(src->f[2] != 0.0f); |
| assert(src->f[3] != 0.0f); |
| #endif |
| dst->f[0] = 1.0f / sqrtf(src->f[0]); |
| dst->f[1] = 1.0f / sqrtf(src->f[1]); |
| dst->f[2] = 1.0f / sqrtf(src->f[2]); |
| dst->f[3] = 1.0f / sqrtf(src->f[3]); |
| } |
| |
| static void |
| micro_sqrt(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = sqrtf(src->f[0]); |
| dst->f[1] = sqrtf(src->f[1]); |
| dst->f[2] = sqrtf(src->f[2]); |
| dst->f[3] = sqrtf(src->f[3]); |
| } |
| |
| static void |
| micro_seq(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = src0->f[0] == src1->f[0] ? 1.0f : 0.0f; |
| dst->f[1] = src0->f[1] == src1->f[1] ? 1.0f : 0.0f; |
| dst->f[2] = src0->f[2] == src1->f[2] ? 1.0f : 0.0f; |
| dst->f[3] = src0->f[3] == src1->f[3] ? 1.0f : 0.0f; |
| } |
| |
| static void |
| micro_sge(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = src0->f[0] >= src1->f[0] ? 1.0f : 0.0f; |
| dst->f[1] = src0->f[1] >= src1->f[1] ? 1.0f : 0.0f; |
| dst->f[2] = src0->f[2] >= src1->f[2] ? 1.0f : 0.0f; |
| dst->f[3] = src0->f[3] >= src1->f[3] ? 1.0f : 0.0f; |
| } |
| |
| static void |
| micro_sgn(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = src->f[0] < 0.0f ? -1.0f : src->f[0] > 0.0f ? 1.0f : 0.0f; |
| dst->f[1] = src->f[1] < 0.0f ? -1.0f : src->f[1] > 0.0f ? 1.0f : 0.0f; |
| dst->f[2] = src->f[2] < 0.0f ? -1.0f : src->f[2] > 0.0f ? 1.0f : 0.0f; |
| dst->f[3] = src->f[3] < 0.0f ? -1.0f : src->f[3] > 0.0f ? 1.0f : 0.0f; |
| } |
| |
| static void |
| micro_isgn(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->i[0] = src->i[0] < 0 ? -1 : src->i[0] > 0 ? 1 : 0; |
| dst->i[1] = src->i[1] < 0 ? -1 : src->i[1] > 0 ? 1 : 0; |
| dst->i[2] = src->i[2] < 0 ? -1 : src->i[2] > 0 ? 1 : 0; |
| dst->i[3] = src->i[3] < 0 ? -1 : src->i[3] > 0 ? 1 : 0; |
| } |
| |
| static void |
| micro_sgt(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = src0->f[0] > src1->f[0] ? 1.0f : 0.0f; |
| dst->f[1] = src0->f[1] > src1->f[1] ? 1.0f : 0.0f; |
| dst->f[2] = src0->f[2] > src1->f[2] ? 1.0f : 0.0f; |
| dst->f[3] = src0->f[3] > src1->f[3] ? 1.0f : 0.0f; |
| } |
| |
| static void |
| micro_sin(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = sinf(src->f[0]); |
| dst->f[1] = sinf(src->f[1]); |
| dst->f[2] = sinf(src->f[2]); |
| dst->f[3] = sinf(src->f[3]); |
| } |
| |
| static void |
| micro_sle(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = src0->f[0] <= src1->f[0] ? 1.0f : 0.0f; |
| dst->f[1] = src0->f[1] <= src1->f[1] ? 1.0f : 0.0f; |
| dst->f[2] = src0->f[2] <= src1->f[2] ? 1.0f : 0.0f; |
| dst->f[3] = src0->f[3] <= src1->f[3] ? 1.0f : 0.0f; |
| } |
| |
| static void |
| micro_slt(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = src0->f[0] < src1->f[0] ? 1.0f : 0.0f; |
| dst->f[1] = src0->f[1] < src1->f[1] ? 1.0f : 0.0f; |
| dst->f[2] = src0->f[2] < src1->f[2] ? 1.0f : 0.0f; |
| dst->f[3] = src0->f[3] < src1->f[3] ? 1.0f : 0.0f; |
| } |
| |
| static void |
| micro_sne(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = src0->f[0] != src1->f[0] ? 1.0f : 0.0f; |
| dst->f[1] = src0->f[1] != src1->f[1] ? 1.0f : 0.0f; |
| dst->f[2] = src0->f[2] != src1->f[2] ? 1.0f : 0.0f; |
| dst->f[3] = src0->f[3] != src1->f[3] ? 1.0f : 0.0f; |
| } |
| |
| static void |
| micro_trunc(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->f[0] = truncf(src->f[0]); |
| dst->f[1] = truncf(src->f[1]); |
| dst->f[2] = truncf(src->f[2]); |
| dst->f[3] = truncf(src->f[3]); |
| } |
| |
| static void |
| micro_u2d(union tgsi_double_channel *dst, |
| const union tgsi_exec_channel *src) |
| { |
| dst->d[0] = (double)src->u[0]; |
| dst->d[1] = (double)src->u[1]; |
| dst->d[2] = (double)src->u[2]; |
| dst->d[3] = (double)src->u[3]; |
| } |
| |
| static void |
| micro_i64abs(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->i64[0] = src->i64[0] >= 0.0 ? src->i64[0] : -src->i64[0]; |
| dst->i64[1] = src->i64[1] >= 0.0 ? src->i64[1] : -src->i64[1]; |
| dst->i64[2] = src->i64[2] >= 0.0 ? src->i64[2] : -src->i64[2]; |
| dst->i64[3] = src->i64[3] >= 0.0 ? src->i64[3] : -src->i64[3]; |
| } |
| |
| static void |
| micro_i64sgn(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->i64[0] = src->i64[0] < 0 ? -1 : src->i64[0] > 0 ? 1 : 0; |
| dst->i64[1] = src->i64[1] < 0 ? -1 : src->i64[1] > 0 ? 1 : 0; |
| dst->i64[2] = src->i64[2] < 0 ? -1 : src->i64[2] > 0 ? 1 : 0; |
| dst->i64[3] = src->i64[3] < 0 ? -1 : src->i64[3] > 0 ? 1 : 0; |
| } |
| |
| static void |
| micro_i64neg(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->i64[0] = -src->i64[0]; |
| dst->i64[1] = -src->i64[1]; |
| dst->i64[2] = -src->i64[2]; |
| dst->i64[3] = -src->i64[3]; |
| } |
| |
| static void |
| micro_u64seq(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].u64[0] == src[1].u64[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].u64[1] == src[1].u64[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].u64[2] == src[1].u64[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].u64[3] == src[1].u64[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_u64sne(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].u64[0] != src[1].u64[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].u64[1] != src[1].u64[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].u64[2] != src[1].u64[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].u64[3] != src[1].u64[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_i64slt(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].i64[0] < src[1].i64[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].i64[1] < src[1].i64[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].i64[2] < src[1].i64[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].i64[3] < src[1].i64[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_u64slt(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].u64[0] < src[1].u64[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].u64[1] < src[1].u64[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].u64[2] < src[1].u64[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].u64[3] < src[1].u64[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_i64sge(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].i64[0] >= src[1].i64[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].i64[1] >= src[1].i64[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].i64[2] >= src[1].i64[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].i64[3] >= src[1].i64[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_u64sge(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u[0][0] = src[0].u64[0] >= src[1].u64[0] ? ~0U : 0U; |
| dst->u[1][0] = src[0].u64[1] >= src[1].u64[1] ? ~0U : 0U; |
| dst->u[2][0] = src[0].u64[2] >= src[1].u64[2] ? ~0U : 0U; |
| dst->u[3][0] = src[0].u64[3] >= src[1].u64[3] ? ~0U : 0U; |
| } |
| |
| static void |
| micro_u64max(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u64[0] = src[0].u64[0] > src[1].u64[0] ? src[0].u64[0] : src[1].u64[0]; |
| dst->u64[1] = src[0].u64[1] > src[1].u64[1] ? src[0].u64[1] : src[1].u64[1]; |
| dst->u64[2] = src[0].u64[2] > src[1].u64[2] ? src[0].u64[2] : src[1].u64[2]; |
| dst->u64[3] = src[0].u64[3] > src[1].u64[3] ? src[0].u64[3] : src[1].u64[3]; |
| } |
| |
| static void |
| micro_i64max(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->i64[0] = src[0].i64[0] > src[1].i64[0] ? src[0].i64[0] : src[1].i64[0]; |
| dst->i64[1] = src[0].i64[1] > src[1].i64[1] ? src[0].i64[1] : src[1].i64[1]; |
| dst->i64[2] = src[0].i64[2] > src[1].i64[2] ? src[0].i64[2] : src[1].i64[2]; |
| dst->i64[3] = src[0].i64[3] > src[1].i64[3] ? src[0].i64[3] : src[1].i64[3]; |
| } |
| |
| static void |
| micro_u64min(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u64[0] = src[0].u64[0] < src[1].u64[0] ? src[0].u64[0] : src[1].u64[0]; |
| dst->u64[1] = src[0].u64[1] < src[1].u64[1] ? src[0].u64[1] : src[1].u64[1]; |
| dst->u64[2] = src[0].u64[2] < src[1].u64[2] ? src[0].u64[2] : src[1].u64[2]; |
| dst->u64[3] = src[0].u64[3] < src[1].u64[3] ? src[0].u64[3] : src[1].u64[3]; |
| } |
| |
| static void |
| micro_i64min(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->i64[0] = src[0].i64[0] < src[1].i64[0] ? src[0].i64[0] : src[1].i64[0]; |
| dst->i64[1] = src[0].i64[1] < src[1].i64[1] ? src[0].i64[1] : src[1].i64[1]; |
| dst->i64[2] = src[0].i64[2] < src[1].i64[2] ? src[0].i64[2] : src[1].i64[2]; |
| dst->i64[3] = src[0].i64[3] < src[1].i64[3] ? src[0].i64[3] : src[1].i64[3]; |
| } |
| |
| static void |
| micro_u64add(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u64[0] = src[0].u64[0] + src[1].u64[0]; |
| dst->u64[1] = src[0].u64[1] + src[1].u64[1]; |
| dst->u64[2] = src[0].u64[2] + src[1].u64[2]; |
| dst->u64[3] = src[0].u64[3] + src[1].u64[3]; |
| } |
| |
| static void |
| micro_u64mul(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u64[0] = src[0].u64[0] * src[1].u64[0]; |
| dst->u64[1] = src[0].u64[1] * src[1].u64[1]; |
| dst->u64[2] = src[0].u64[2] * src[1].u64[2]; |
| dst->u64[3] = src[0].u64[3] * src[1].u64[3]; |
| } |
| |
| static void |
| micro_u64div(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u64[0] = src[1].u64[0] ? src[0].u64[0] / src[1].u64[0] : ~0ull; |
| dst->u64[1] = src[1].u64[1] ? src[0].u64[1] / src[1].u64[1] : ~0ull; |
| dst->u64[2] = src[1].u64[2] ? src[0].u64[2] / src[1].u64[2] : ~0ull; |
| dst->u64[3] = src[1].u64[3] ? src[0].u64[3] / src[1].u64[3] : ~0ull; |
| } |
| |
| static void |
| micro_i64div(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->i64[0] = src[1].i64[0] ? src[0].i64[0] / src[1].i64[0] : 0; |
| dst->i64[1] = src[1].i64[1] ? src[0].i64[1] / src[1].i64[1] : 0; |
| dst->i64[2] = src[1].i64[2] ? src[0].i64[2] / src[1].i64[2] : 0; |
| dst->i64[3] = src[1].i64[3] ? src[0].i64[3] / src[1].i64[3] : 0; |
| } |
| |
| static void |
| micro_u64mod(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->u64[0] = src[1].u64[0] ? src[0].u64[0] % src[1].u64[0] : ~0ull; |
| dst->u64[1] = src[1].u64[1] ? src[0].u64[1] % src[1].u64[1] : ~0ull; |
| dst->u64[2] = src[1].u64[2] ? src[0].u64[2] % src[1].u64[2] : ~0ull; |
| dst->u64[3] = src[1].u64[3] ? src[0].u64[3] % src[1].u64[3] : ~0ull; |
| } |
| |
| static void |
| micro_i64mod(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src) |
| { |
| dst->i64[0] = src[1].i64[0] ? src[0].i64[0] % src[1].i64[0] : ~0ll; |
| dst->i64[1] = src[1].i64[1] ? src[0].i64[1] % src[1].i64[1] : ~0ll; |
| dst->i64[2] = src[1].i64[2] ? src[0].i64[2] % src[1].i64[2] : ~0ll; |
| dst->i64[3] = src[1].i64[3] ? src[0].i64[3] % src[1].i64[3] : ~0ll; |
| } |
| |
| static void |
| micro_u64shl(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src0, |
| union tgsi_exec_channel *src1) |
| { |
| unsigned masked_count; |
| masked_count = src1->u[0] & 0x3f; |
| dst->u64[0] = src0->u64[0] << masked_count; |
| masked_count = src1->u[1] & 0x3f; |
| dst->u64[1] = src0->u64[1] << masked_count; |
| masked_count = src1->u[2] & 0x3f; |
| dst->u64[2] = src0->u64[2] << masked_count; |
| masked_count = src1->u[3] & 0x3f; |
| dst->u64[3] = src0->u64[3] << masked_count; |
| } |
| |
| static void |
| micro_i64shr(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src0, |
| union tgsi_exec_channel *src1) |
| { |
| unsigned masked_count; |
| masked_count = src1->u[0] & 0x3f; |
| dst->i64[0] = src0->i64[0] >> masked_count; |
| masked_count = src1->u[1] & 0x3f; |
| dst->i64[1] = src0->i64[1] >> masked_count; |
| masked_count = src1->u[2] & 0x3f; |
| dst->i64[2] = src0->i64[2] >> masked_count; |
| masked_count = src1->u[3] & 0x3f; |
| dst->i64[3] = src0->i64[3] >> masked_count; |
| } |
| |
| static void |
| micro_u64shr(union tgsi_double_channel *dst, |
| const union tgsi_double_channel *src0, |
| union tgsi_exec_channel *src1) |
| { |
| unsigned masked_count; |
| masked_count = src1->u[0] & 0x3f; |
| dst->u64[0] = src0->u64[0] >> masked_count; |
| masked_count = src1->u[1] & 0x3f; |
| dst->u64[1] = src0->u64[1] >> masked_count; |
| masked_count = src1->u[2] & 0x3f; |
| dst->u64[2] = src0->u64[2] >> masked_count; |
| masked_count = src1->u[3] & 0x3f; |
| dst->u64[3] = src0->u64[3] >> masked_count; |
| } |
| |
| enum tgsi_exec_datatype { |
| TGSI_EXEC_DATA_FLOAT, |
| TGSI_EXEC_DATA_INT, |
| TGSI_EXEC_DATA_UINT, |
| TGSI_EXEC_DATA_DOUBLE, |
| TGSI_EXEC_DATA_INT64, |
| TGSI_EXEC_DATA_UINT64, |
| }; |
| |
| /** The execution mask depends on the conditional mask and the loop mask */ |
| #define UPDATE_EXEC_MASK(MACH) \ |
| MACH->ExecMask = MACH->CondMask & MACH->LoopMask & MACH->ContMask & MACH->Switch.mask & MACH->FuncMask |
| |
| |
| static const union tgsi_exec_channel ZeroVec = |
| { { 0.0, 0.0, 0.0, 0.0 } }; |
| |
| static const union tgsi_exec_channel OneVec = { |
| {1.0f, 1.0f, 1.0f, 1.0f} |
| }; |
| |
| static const union tgsi_exec_channel P128Vec = { |
| {128.0f, 128.0f, 128.0f, 128.0f} |
| }; |
| |
| static const union tgsi_exec_channel M128Vec = { |
| {-128.0f, -128.0f, -128.0f, -128.0f} |
| }; |
| |
| #ifdef DEBUG |
| static void |
| print_chan(const char *msg, const union tgsi_exec_channel *chan) |
| { |
| debug_printf("%s = {%f, %f, %f, %f}\n", |
| msg, chan->f[0], chan->f[1], chan->f[2], chan->f[3]); |
| } |
| #endif |
| |
| |
| #ifdef DEBUG |
| static void |
| print_temp(const struct tgsi_exec_machine *mach, uint index) |
| { |
| const struct tgsi_exec_vector *tmp = &mach->Temps[index]; |
| int i; |
| debug_printf("Temp[%u] =\n", index); |
| for (i = 0; i < 4; i++) { |
| debug_printf(" %c: { %f, %f, %f, %f }\n", |
| "XYZW"[i], |
| tmp->xyzw[i].f[0], |
| tmp->xyzw[i].f[1], |
| tmp->xyzw[i].f[2], |
| tmp->xyzw[i].f[3]); |
| } |
| } |
| #endif |
| |
| |
| void |
| tgsi_exec_set_constant_buffers(struct tgsi_exec_machine *mach, |
| unsigned num_bufs, |
| const void **bufs, |
| const unsigned *buf_sizes) |
| { |
| unsigned i; |
| |
| for (i = 0; i < num_bufs; i++) { |
| mach->Consts[i] = bufs[i]; |
| mach->ConstsSize[i] = buf_sizes[i]; |
| } |
| } |
| |
| /** |
| * Initialize machine state by expanding tokens to full instructions, |
| * allocating temporary storage, setting up constants, etc. |
| * After this, we can call tgsi_exec_machine_run() many times. |
| */ |
| void |
| tgsi_exec_machine_bind_shader( |
| struct tgsi_exec_machine *mach, |
| const struct tgsi_token *tokens, |
| struct tgsi_sampler *sampler, |
| struct tgsi_image *image, |
| struct tgsi_buffer *buffer) |
| { |
| uint k; |
| struct tgsi_parse_context parse; |
| struct tgsi_full_instruction *instructions; |
| struct tgsi_full_declaration *declarations; |
| uint maxInstructions = 10, numInstructions = 0; |
| uint maxDeclarations = 10, numDeclarations = 0; |
| |
| #if 0 |
| tgsi_dump(tokens, 0); |
| #endif |
| |
| mach->Tokens = tokens; |
| mach->Sampler = sampler; |
| mach->Image = image; |
| mach->Buffer = buffer; |
| |
| if (!tokens) { |
| /* unbind and free all */ |
| FREE(mach->Declarations); |
| mach->Declarations = NULL; |
| mach->NumDeclarations = 0; |
| |
| FREE(mach->Instructions); |
| mach->Instructions = NULL; |
| mach->NumInstructions = 0; |
| |
| return; |
| } |
| |
| k = tgsi_parse_init (&parse, mach->Tokens); |
| if (k != TGSI_PARSE_OK) { |
| debug_printf( "Problem parsing!\n" ); |
| return; |
| } |
| |
| mach->ImmLimit = 0; |
| mach->NumOutputs = 0; |
| |
| for (k = 0; k < TGSI_SEMANTIC_COUNT; k++) |
| mach->SysSemanticToIndex[k] = -1; |
| |
| if (mach->ShaderType == PIPE_SHADER_GEOMETRY && |
| !mach->UsedGeometryShader) { |
| struct tgsi_exec_vector *inputs; |
| struct tgsi_exec_vector *outputs; |
| |
| inputs = align_malloc(sizeof(struct tgsi_exec_vector) * |
| TGSI_MAX_PRIM_VERTICES * PIPE_MAX_SHADER_INPUTS, |
| 16); |
| |
| if (!inputs) |
| return; |
| |
| outputs = align_malloc(sizeof(struct tgsi_exec_vector) * |
| TGSI_MAX_TOTAL_VERTICES, 16); |
| |
| if (!outputs) { |
| align_free(inputs); |
| return; |
| } |
| |
| align_free(mach->Inputs); |
| align_free(mach->Outputs); |
| |
| mach->Inputs = inputs; |
| mach->Outputs = outputs; |
| mach->UsedGeometryShader = TRUE; |
| } |
| |
| declarations = (struct tgsi_full_declaration *) |
| MALLOC( maxDeclarations * sizeof(struct tgsi_full_declaration) ); |
| |
| if (!declarations) { |
| return; |
| } |
| |
| instructions = (struct tgsi_full_instruction *) |
| MALLOC( maxInstructions * sizeof(struct tgsi_full_instruction) ); |
| |
| if (!instructions) { |
| FREE( declarations ); |
| return; |
| } |
| |
| while( !tgsi_parse_end_of_tokens( &parse ) ) { |
| uint i; |
| |
| tgsi_parse_token( &parse ); |
| switch( parse.FullToken.Token.Type ) { |
| case TGSI_TOKEN_TYPE_DECLARATION: |
| /* save expanded declaration */ |
| if (numDeclarations == maxDeclarations) { |
| declarations = REALLOC(declarations, |
| maxDeclarations |
| * sizeof(struct tgsi_full_declaration), |
| (maxDeclarations + 10) |
| * sizeof(struct tgsi_full_declaration)); |
| maxDeclarations += 10; |
| } |
| if (parse.FullToken.FullDeclaration.Declaration.File == TGSI_FILE_OUTPUT) |
| mach->NumOutputs = MAX2(mach->NumOutputs, parse.FullToken.FullDeclaration.Range.Last + 1); |
| else if (parse.FullToken.FullDeclaration.Declaration.File == TGSI_FILE_SYSTEM_VALUE) { |
| const struct tgsi_full_declaration *decl = &parse.FullToken.FullDeclaration; |
| mach->SysSemanticToIndex[decl->Semantic.Name] = decl->Range.First; |
| } |
| |
| memcpy(declarations + numDeclarations, |
| &parse.FullToken.FullDeclaration, |
| sizeof(declarations[0])); |
| numDeclarations++; |
| break; |
| |
| case TGSI_TOKEN_TYPE_IMMEDIATE: |
| { |
| uint size = parse.FullToken.FullImmediate.Immediate.NrTokens - 1; |
| assert( size <= 4 ); |
| if (mach->ImmLimit >= mach->ImmsReserved) { |
| unsigned newReserved = mach->ImmsReserved ? 2 * mach->ImmsReserved : 128; |
| float4 *imms = REALLOC(mach->Imms, mach->ImmsReserved, newReserved * sizeof(float4)); |
| if (imms) { |
| mach->ImmsReserved = newReserved; |
| mach->Imms = imms; |
| } else { |
| debug_printf("Unable to (re)allocate space for immidiate constants\n"); |
| break; |
| } |
| } |
| |
| for( i = 0; i < size; i++ ) { |
| mach->Imms[mach->ImmLimit][i] = |
| parse.FullToken.FullImmediate.u[i].Float; |
| } |
| mach->ImmLimit += 1; |
| } |
| break; |
| |
| case TGSI_TOKEN_TYPE_INSTRUCTION: |
| |
| /* save expanded instruction */ |
| if (numInstructions == maxInstructions) { |
| instructions = REALLOC(instructions, |
| maxInstructions |
| * sizeof(struct tgsi_full_instruction), |
| (maxInstructions + 10) |
| * sizeof(struct tgsi_full_instruction)); |
| maxInstructions += 10; |
| } |
| |
| memcpy(instructions + numInstructions, |
| &parse.FullToken.FullInstruction, |
| sizeof(instructions[0])); |
| |
| numInstructions++; |
| break; |
| |
| case TGSI_TOKEN_TYPE_PROPERTY: |
| if (mach->ShaderType == PIPE_SHADER_GEOMETRY) { |
| if (parse.FullToken.FullProperty.Property.PropertyName == TGSI_PROPERTY_GS_MAX_OUTPUT_VERTICES) { |
| mach->MaxOutputVertices = parse.FullToken.FullProperty.u[0].Data; |
| } |
| } |
| break; |
| |
| default: |
| assert( 0 ); |
| } |
| } |
| tgsi_parse_free (&parse); |
| |
| FREE(mach->Declarations); |
| mach->Declarations = declarations; |
| mach->NumDeclarations = numDeclarations; |
| |
| FREE(mach->Instructions); |
| mach->Instructions = instructions; |
| mach->NumInstructions = numInstructions; |
| } |
| |
| |
| struct tgsi_exec_machine * |
| tgsi_exec_machine_create(enum pipe_shader_type shader_type) |
| { |
| struct tgsi_exec_machine *mach; |
| |
| mach = align_malloc( sizeof *mach, 16 ); |
| if (!mach) |
| goto fail; |
| |
| memset(mach, 0, sizeof(*mach)); |
| |
| mach->ShaderType = shader_type; |
| |
| if (shader_type != PIPE_SHADER_COMPUTE) { |
| mach->Inputs = align_malloc(sizeof(struct tgsi_exec_vector) * PIPE_MAX_SHADER_INPUTS, 16); |
| mach->Outputs = align_malloc(sizeof(struct tgsi_exec_vector) * PIPE_MAX_SHADER_OUTPUTS, 16); |
| if (!mach->Inputs || !mach->Outputs) |
| goto fail; |
| } |
| |
| if (shader_type == PIPE_SHADER_FRAGMENT) { |
| mach->InputSampleOffsetApply = align_malloc(sizeof(apply_sample_offset_func) * PIPE_MAX_SHADER_INPUTS, 16); |
| if (!mach->InputSampleOffsetApply) |
| goto fail; |
| } |
| |
| #ifdef DEBUG |
| /* silence warnings */ |
| (void) print_chan; |
| (void) print_temp; |
| #endif |
| |
| return mach; |
| |
| fail: |
| if (mach) { |
| align_free(mach->InputSampleOffsetApply); |
| align_free(mach->Inputs); |
| align_free(mach->Outputs); |
| align_free(mach); |
| } |
| return NULL; |
| } |
| |
| |
| void |
| tgsi_exec_machine_destroy(struct tgsi_exec_machine *mach) |
| { |
| if (mach) { |
| FREE(mach->Instructions); |
| FREE(mach->Declarations); |
| FREE(mach->Imms); |
| |
| align_free(mach->InputSampleOffsetApply); |
| align_free(mach->Inputs); |
| align_free(mach->Outputs); |
| |
| align_free(mach); |
| } |
| } |
| |
| static void |
| micro_add(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = src0->f[0] + src1->f[0]; |
| dst->f[1] = src0->f[1] + src1->f[1]; |
| dst->f[2] = src0->f[2] + src1->f[2]; |
| dst->f[3] = src0->f[3] + src1->f[3]; |
| } |
| |
| static void |
| micro_div( |
| union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1 ) |
| { |
| dst->f[0] = src0->f[0] / src1->f[0]; |
| dst->f[1] = src0->f[1] / src1->f[1]; |
| dst->f[2] = src0->f[2] / src1->f[2]; |
| dst->f[3] = src0->f[3] / src1->f[3]; |
| } |
| |
| static void |
| micro_lt( |
| union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1, |
| const union tgsi_exec_channel *src2, |
| const union tgsi_exec_channel *src3 ) |
| { |
| dst->f[0] = src0->f[0] < src1->f[0] ? src2->f[0] : src3->f[0]; |
| dst->f[1] = src0->f[1] < src1->f[1] ? src2->f[1] : src3->f[1]; |
| dst->f[2] = src0->f[2] < src1->f[2] ? src2->f[2] : src3->f[2]; |
| dst->f[3] = src0->f[3] < src1->f[3] ? src2->f[3] : src3->f[3]; |
| } |
| |
| static void |
| micro_max(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = fmaxf(src0->f[0], src1->f[0]); |
| dst->f[1] = fmaxf(src0->f[1], src1->f[1]); |
| dst->f[2] = fmaxf(src0->f[2], src1->f[2]); |
| dst->f[3] = fmaxf(src0->f[3], src1->f[3]); |
| } |
| |
| static void |
| micro_min(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = fminf(src0->f[0], src1->f[0]); |
| dst->f[1] = fminf(src0->f[1], src1->f[1]); |
| dst->f[2] = fminf(src0->f[2], src1->f[2]); |
| dst->f[3] = fminf(src0->f[3], src1->f[3]); |
| } |
| |
| static void |
| micro_mul(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = src0->f[0] * src1->f[0]; |
| dst->f[1] = src0->f[1] * src1->f[1]; |
| dst->f[2] = src0->f[2] * src1->f[2]; |
| dst->f[3] = src0->f[3] * src1->f[3]; |
| } |
| |
| static void |
| micro_neg( |
| union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src ) |
| { |
| dst->f[0] = -src->f[0]; |
| dst->f[1] = -src->f[1]; |
| dst->f[2] = -src->f[2]; |
| dst->f[3] = -src->f[3]; |
| } |
| |
| static void |
| micro_pow( |
| union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1 ) |
| { |
| dst->f[0] = powf( src0->f[0], src1->f[0] ); |
| dst->f[1] = powf( src0->f[1], src1->f[1] ); |
| dst->f[2] = powf( src0->f[2], src1->f[2] ); |
| dst->f[3] = powf( src0->f[3], src1->f[3] ); |
| } |
| |
| static void |
| micro_ldexp(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = ldexpf(src0->f[0], src1->i[0]); |
| dst->f[1] = ldexpf(src0->f[1], src1->i[1]); |
| dst->f[2] = ldexpf(src0->f[2], src1->i[2]); |
| dst->f[3] = ldexpf(src0->f[3], src1->i[3]); |
| } |
| |
| static void |
| micro_sub(union tgsi_exec_channel *dst, |
| const union tgsi_exec_channel *src0, |
| const union tgsi_exec_channel *src1) |
| { |
| dst->f[0] = src0->f[0] - src1->f[0]; |
| dst->f[1] = src0->f[1] - src1->f[1]; |
| dst->f[2] = src0->f[2] - src1->f[2]; |
| dst->f[3] = src0->f[3] - src1->f[3]; |
| } |
| |
| static void |
| fetch_src_file_channel(const struct tgsi_exec_machine *mach, |
| const uint file, |
| const uint swizzle, |
| const union tgsi_exec_channel *index, |
| const union tgsi_exec_channel *index2D, |
| union tgsi_exec_channel *chan) |
| { |
| uint i; |
| |
| assert(swizzle < 4); |
| |
| switch (file) { |
| case TGSI_FILE_CONSTANT: |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) { |
| /* NOTE: copying the const value as a uint instead of float */ |
| const uint constbuf = index2D->i[i]; |
| const unsigned pos = index->i[i] * 4 + swizzle; |
| /* const buffer bounds check */ |
| if (pos >= mach->ConstsSize[constbuf] / 4) { |
| if (0) { |
| /* Debug: print warning */ |
| static int count = 0; |
| if (count++ < 100) |
| debug_printf("TGSI Exec: const buffer index %d" |
| " out of bounds\n", pos); |
| } |
| chan->u[i] = 0; |
| } else { |
| const uint *buf = (const uint *)mach->Consts[constbuf]; |
| chan->u[i] = buf[pos]; |
| } |
| } |
| break; |
| |
| case TGSI_FILE_INPUT: |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) { |
| /* |
| if (PIPE_SHADER_GEOMETRY == mach->ShaderType) { |
| debug_printf("Fetching Input[%d] (2d=%d, 1d=%d)\n", |
| index2D->i[i] * TGSI_EXEC_MAX_INPUT_ATTRIBS + index->i[i], |
| index2D->i[i], index->i[i]); |
| }*/ |
| int pos = index2D->i[i] * TGSI_EXEC_MAX_INPUT_ATTRIBS + index->i[i]; |
| assert(pos >= 0); |
| assert(pos < TGSI_MAX_PRIM_VERTICES * PIPE_MAX_ATTRIBS); |
| chan->u[i] = mach->Inputs[pos].xyzw[swizzle].u[i]; |
| } |
| break; |
| |
| case TGSI_FILE_SYSTEM_VALUE: |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) { |
| chan->u[i] = mach->SystemValue[index->i[i]].xyzw[swizzle].u[i]; |
| } |
| break; |
| |
| case TGSI_FILE_TEMPORARY: |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) { |
| assert(index->i[i] < TGSI_EXEC_NUM_TEMPS); |
| assert(index2D->i[i] == 0); |
| |
| chan->u[i] = mach->Temps[index->i[i]].xyzw[swizzle].u[i]; |
| } |
| break; |
| |
| case TGSI_FILE_IMMEDIATE: |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) { |
| assert(index->i[i] >= 0 && index->i[i] < (int)mach->ImmLimit); |
| assert(index2D->i[i] == 0); |
| |
| chan->f[i] = mach->Imms[index->i[i]][swizzle]; |
| } |
| break; |
| |
| case TGSI_FILE_ADDRESS: |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) { |
| assert(index->i[i] >= 0 && index->i[i] < ARRAY_SIZE(mach->Addrs)); |
| assert(index2D->i[i] == 0); |
| |
| chan->u[i] = mach->Addrs[index->i[i]].xyzw[swizzle].u[i]; |
| } |
| break; |
| |
| case TGSI_FILE_OUTPUT: |
| /* vertex/fragment output vars can be read too */ |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) { |
| assert(index->i[i] >= 0); |
| assert(index2D->i[i] == 0); |
| |
| chan->u[i] = mach->Outputs[index->i[i]].xyzw[swizzle].u[i]; |
| } |
| break; |
| |
| default: |
| assert(0); |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) { |
| chan->u[i] = 0; |
| } |
| } |
| } |
| |
| static void |
| get_index_registers(const struct tgsi_exec_machine *mach, |
| const struct tgsi_full_src_register *reg, |
| union tgsi_exec_channel *index, |
| union tgsi_exec_channel *index2D) |
| { |
| /* We start with a direct index into a register file. |
| * |
| * file[1], |
| * where: |
| * file = Register.File |
| * [1] = Register.Index |
| */ |
| index->i[0] = |
| index->i[1] = |
| index->i[2] = |
| index->i[3] = reg->Register.Index; |
| |
| /* There is an extra source register that indirectly subscripts |
| * a register file. The direct index now becomes an offset |
| * that is being added to the indirect register. |
| * |
| * file[ind[2].x+1], |
| * where: |
| * ind = Indirect.File |
| * [2] = Indirect.Index |
| * .x = Indirect.SwizzleX |
| */ |
| if (reg->Register.Indirect) { |
| const uint execmask = mach->ExecMask; |
| |
| assert(reg->Indirect.File == TGSI_FILE_ADDRESS); |
| const union tgsi_exec_channel *addr = &mach->Addrs[reg->Indirect.Index].xyzw[reg->Indirect.Swizzle]; |
| for (int i = 0; i < TGSI_QUAD_SIZE; i++) |
| index->i[i] += addr->u[i]; |
| |
| /* for disabled execution channels, zero-out the index to |
| * avoid using a potential garbage value. |
| */ |
| for (int i = 0; i < TGSI_QUAD_SIZE; i++) { |
| if ((execmask & (1 << i)) == 0) |
| index->i[i] = 0; |
| } |
| } |
| |
| /* There is an extra source register that is a second |
| * subscript to a register file. Effectively it means that |
| * the register file is actually a 2D array of registers. |
| * |
| * file[3][1], |
| * where: |
| * [3] = Dimension.Index |
| */ |
| if (reg->Register.Dimension) { |
| index2D->i[0] = |
| index2D->i[1] = |
| index2D->i[2] = |
| index2D->i[3] = reg->Dimension.Index; |
| |
| /* Again, the second subscript index can be addressed indirectly |
| * identically to the first one. |
| * Nothing stops us from indirectly addressing the indirect register, |
| * but there is no need for that, so we won't exercise it. |
| * |
| * file[ind[4].y+3][1], |
| * where: |
| * ind = DimIndirect.File |
| * [4] = DimIndirect.Index |
| * .y = DimIndirect.SwizzleX |
| */ |
| if (reg->Dimension.Indirect) { |
| const uint execmask = mach->ExecMask; |
| |
| assert(reg->DimIndirect.File == TGSI_FILE_ADDRESS); |
| const union tgsi_exec_channel *addr = &mach->Addrs[reg->DimIndirect.Index].xyzw[reg->DimIndirect.Swizzle]; |
| for (int i = 0; i < TGSI_QUAD_SIZE; i++) |
| index2D->i[i] += addr->u[i]; |
| |
| /* for disabled execution channels, zero-out the index to |
| * avoid using a potential garbage value. |
| */ |
| for (int i = 0; i < TGSI_QUAD_SIZE; i++) { |
| if ((execmask & (1 << i)) == 0) { |
| index2D->i[i] = 0; |
| } |
| } |
| } |
| |
| /* If by any chance there was a need for a 3D array of register |
| * files, we would have to check whether Dimension is followed |
| * by a dimension register and continue the saga. |
| */ |
| } else { |
| index2D->i[0] = |
| index2D->i[1] = |
| index2D->i[2] = |
| index2D->i[3] = 0; |
| } |
| } |
| |
| |
| static void |
| fetch_source_d(const struct tgsi_exec_machine *mach, |
| union tgsi_exec_channel *chan, |
| const struct tgsi_full_src_register *reg, |
| const uint chan_index) |
| { |
| union tgsi_exec_channel index; |
| union tgsi_exec_channel index2D; |
| uint swizzle; |
| |
| get_index_registers(mach, reg, &index, &index2D); |
| |
| |
| swizzle = tgsi_util_get_full_src_register_swizzle( reg, chan_index ); |
| fetch_src_file_channel(mach, |
| reg->Register.File, |
| swizzle, |
| &index, |
| &index2D, |
| chan); |
| } |
| |
| static void |
| fetch_source(const struct tgsi_exec_machine *mach, |
| union tgsi_exec_channel *chan, |
| const struct tgsi_full_src_register *reg, |
| const uint chan_index, |
| enum tgsi_exec_datatype src_datatype) |
| { |
| fetch_source_d(mach, chan, reg, chan_index); |
| |
| if (reg->Register.Absolute) { |
| assert(src_datatype == TGSI_EXEC_DATA_FLOAT); |
| micro_abs(chan, chan); |
| } |
| |
| if (reg->Register.Negate) { |
| if (src_datatype == TGSI_EXEC_DATA_FLOAT) { |
| micro_neg(chan, chan); |
| } else { |
| micro_ineg(chan, chan); |
| } |
| } |
| } |
| |
| static union tgsi_exec_channel * |
| store_dest_dstret(struct tgsi_exec_machine *mach, |
| const union tgsi_exec_channel *chan, |
| const struct tgsi_full_dst_register *reg, |
| uint chan_index) |
| { |
| static union tgsi_exec_channel null; |
| union tgsi_exec_channel *dst; |
| int offset = 0; /* indirection offset */ |
| int index; |
| |
| |
| /* There is an extra source register that indirectly subscripts |
| * a register file. The direct index now becomes an offset |
| * that is being added to the indirect register. |
| * |
| * file[ind[2].x+1], |
| * where: |
| * ind = Indirect.File |
| * [2] = Indirect.Index |
| * .x = Indirect.SwizzleX |
| */ |
| if (reg->Register.Indirect) { |
| union tgsi_exec_channel index; |
| union tgsi_exec_channel indir_index; |
| uint swizzle; |
| |
| /* which address register (always zero for now) */ |
| index.i[0] = |
| index.i[1] = |
| index.i[2] = |
| index.i[3] = reg->Indirect.Index; |
| |
| /* get current value of address register[swizzle] */ |
| swizzle = reg->Indirect.Swizzle; |
| |
| /* fetch values from the address/indirection register */ |
| fetch_src_file_channel(mach, |
| reg->Indirect.File, |
| swizzle, |
| &index, |
| &ZeroVec, |
| &indir_index); |
| |
| /* save indirection offset */ |
| offset = indir_index.i[0]; |
| } |
| |
| switch (reg->Register.File) { |
| case TGSI_FILE_NULL: |
| dst = &null; |
| break; |
| |
| case TGSI_FILE_OUTPUT: |
| index = mach->OutputVertexOffset + reg->Register.Index; |
| dst = &mach->Outputs[offset + index].xyzw[chan_index]; |
| #if 0 |
| debug_printf("NumOutputs = %d, TEMP_O_C/I = %d, redindex = %d\n", |
| mach->NumOutputs, mach->Temps[TEMP_OUTPUT_I].xyzw[TEMP_OUTPUT_C].u[0], |
| reg->Register.Index); |
| if (PIPE_SHADER_GEOMETRY == mach->ShaderType) { |
| debug_printf("STORING OUT[%d] mask(%d), = (", offset + index, execmask); |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) |
| if (execmask & (1 << i)) |
| debug_printf("%f, ", chan->f[i]); |
| debug_printf(")\n"); |
| } |
| #endif |
| break; |
| |
| case TGSI_FILE_TEMPORARY: |
| index = reg->Register.Index; |
| assert( index < TGSI_EXEC_NUM_TEMPS ); |
| dst = &mach->Temps[offset + index].xyzw[chan_index]; |
| break; |
| |
| case TGSI_FILE_ADDRESS: |
| index = reg->Register.Index; |
| assert(index >= 0 && index < ARRAY_SIZE(mach->Addrs)); |
| dst = &mach->Addrs[index].xyzw[chan_index]; |
| break; |
| |
| default: |
| unreachable("Bad destination file"); |
| } |
| |
| return dst; |
| } |
| |
| static void |
| store_dest_double(struct tgsi_exec_machine *mach, |
| const union tgsi_exec_channel *chan, |
| const struct tgsi_full_dst_register *reg, |
| uint chan_index) |
| { |
| union tgsi_exec_channel *dst; |
| const uint execmask = mach->ExecMask; |
| int i; |
| |
| dst = store_dest_dstret(mach, chan, reg, chan_index); |
| if (!dst) |
| return; |
| |
| /* doubles path */ |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) |
| if (execmask & (1 << i)) |
| dst->i[i] = chan->i[i]; |
| } |
| |
| static void |
| store_dest(struct tgsi_exec_machine *mach, |
| const union tgsi_exec_channel *chan, |
| const struct tgsi_full_dst_register *reg, |
| const struct tgsi_full_instruction *inst, |
| uint chan_index) |
| { |
| union tgsi_exec_channel *dst; |
| const uint execmask = mach->ExecMask; |
| int i; |
| |
| dst = store_dest_dstret(mach, chan, reg, chan_index); |
| if (!dst) |
| return; |
| |
| if (!inst->Instruction.Saturate) { |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) |
| if (execmask & (1 << i)) |
| dst->i[i] = chan->i[i]; |
| } |
| else { |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) |
| if (execmask & (1 << i)) |
| dst->f[i] = fminf(fmaxf(chan->f[i], 0.0f), 1.0f); |
| } |
| } |
| |
| #define FETCH(VAL,INDEX,CHAN)\ |
| fetch_source(mach, VAL, &inst->Src[INDEX], CHAN, TGSI_EXEC_DATA_FLOAT) |
| |
| #define IFETCH(VAL,INDEX,CHAN)\ |
| fetch_source(mach, VAL, &inst->Src[INDEX], CHAN, TGSI_EXEC_DATA_INT) |
| |
| |
| /** |
| * Execute ARB-style KIL which is predicated by a src register. |
| * Kill fragment if any of the four values is less than zero. |
| */ |
| static void |
| exec_kill_if(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst) |
| { |
| uint uniquemask; |
| uint chan_index; |
| uint kilmask = 0; /* bit 0 = pixel 0, bit 1 = pixel 1, etc */ |
| union tgsi_exec_channel r[1]; |
| |
| /* This mask stores component bits that were already tested. */ |
| uniquemask = 0; |
| |
| for (chan_index = 0; chan_index < 4; chan_index++) |
| { |
| uint swizzle; |
| uint i; |
| |
| /* unswizzle channel */ |
| swizzle = tgsi_util_get_full_src_register_swizzle ( |
| &inst->Src[0], |
| chan_index); |
| |
| /* check if the component has not been already tested */ |
| if (uniquemask & (1 << swizzle)) |
| continue; |
| uniquemask |= 1 << swizzle; |
| |
| FETCH(&r[0], 0, chan_index); |
| for (i = 0; i < 4; i++) |
| if (r[0].f[i] < 0.0f) |
| kilmask |= 1 << i; |
| } |
| |
| /* restrict to fragments currently executing */ |
| kilmask &= mach->ExecMask; |
| |
| mach->KillMask |= kilmask; |
| } |
| |
| /** |
| * Unconditional fragment kill/discard. |
| */ |
| static void |
| exec_kill(struct tgsi_exec_machine *mach) |
| { |
| /* kill fragment for all fragments currently executing. |
| * bit 0 = pixel 0, bit 1 = pixel 1, etc. |
| */ |
| mach->KillMask |= mach->ExecMask; |
| } |
| |
| static void |
| emit_vertex(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst) |
| { |
| union tgsi_exec_channel r[1]; |
| unsigned stream_id; |
| unsigned prim_count; |
| /* FIXME: check for exec mask correctly |
| unsigned i; |
| for (i = 0; i < TGSI_QUAD_SIZE; ++i) { |
| if ((mach->ExecMask & (1 << i))) |
| */ |
| IFETCH(&r[0], 0, TGSI_CHAN_X); |
| stream_id = r[0].u[0]; |
| prim_count = mach->OutputPrimCount[stream_id]; |
| if (mach->ExecMask) { |
| if (mach->Primitives[stream_id][prim_count] >= mach->MaxOutputVertices) |
| return; |
| |
| if (mach->Primitives[stream_id][prim_count] == 0) |
| mach->PrimitiveOffsets[stream_id][prim_count] = mach->OutputVertexOffset; |
| mach->OutputVertexOffset += mach->NumOutputs; |
| mach->Primitives[stream_id][prim_count]++; |
| } |
| } |
| |
| static void |
| emit_primitive(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst) |
| { |
| unsigned *prim_count; |
| union tgsi_exec_channel r[1]; |
| unsigned stream_id = 0; |
| /* FIXME: check for exec mask correctly |
| unsigned i; |
| for (i = 0; i < TGSI_QUAD_SIZE; ++i) { |
| if ((mach->ExecMask & (1 << i))) |
| */ |
| if (inst) { |
| IFETCH(&r[0], 0, TGSI_CHAN_X); |
| stream_id = r[0].u[0]; |
| } |
| prim_count = &mach->OutputPrimCount[stream_id]; |
| if (mach->ExecMask) { |
| ++(*prim_count); |
| assert((*prim_count * mach->NumOutputs) < TGSI_MAX_TOTAL_VERTICES); |
| mach->Primitives[stream_id][*prim_count] = 0; |
| } |
| } |
| |
| static void |
| conditional_emit_primitive(struct tgsi_exec_machine *mach) |
| { |
| if (PIPE_SHADER_GEOMETRY == mach->ShaderType) { |
| int emitted_verts = mach->Primitives[0][mach->OutputPrimCount[0]]; |
| if (emitted_verts) { |
| emit_primitive(mach, NULL); |
| } |
| } |
| } |
| |
| |
| /* |
| * Fetch four texture samples using STR texture coordinates. |
| */ |
| static void |
| fetch_texel( struct tgsi_sampler *sampler, |
| const unsigned sview_idx, |
| const unsigned sampler_idx, |
| const union tgsi_exec_channel *s, |
| const union tgsi_exec_channel *t, |
| const union tgsi_exec_channel *p, |
| const union tgsi_exec_channel *c0, |
| const union tgsi_exec_channel *c1, |
| float derivs[3][2][TGSI_QUAD_SIZE], |
| const int8_t offset[3], |
| enum tgsi_sampler_control control, |
| union tgsi_exec_channel *r, |
| union tgsi_exec_channel *g, |
| union tgsi_exec_channel *b, |
| union tgsi_exec_channel *a ) |
| { |
| uint j; |
| float rgba[TGSI_NUM_CHANNELS][TGSI_QUAD_SIZE]; |
| |
| /* FIXME: handle explicit derivs, offsets */ |
| sampler->get_samples(sampler, sview_idx, sampler_idx, |
| s->f, t->f, p->f, c0->f, c1->f, derivs, offset, control, rgba); |
| |
| for (j = 0; j < 4; j++) { |
| r->f[j] = rgba[0][j]; |
| g->f[j] = rgba[1][j]; |
| b->f[j] = rgba[2][j]; |
| a->f[j] = rgba[3][j]; |
| } |
| } |
| |
| |
| #define TEX_MODIFIER_NONE 0 |
| #define TEX_MODIFIER_PROJECTED 1 |
| #define TEX_MODIFIER_LOD_BIAS 2 |
| #define TEX_MODIFIER_EXPLICIT_LOD 3 |
| #define TEX_MODIFIER_LEVEL_ZERO 4 |
| #define TEX_MODIFIER_GATHER 5 |
| |
| /* |
| * Fetch all 3 (for s,t,r coords) texel offsets, put them into int array. |
| */ |
| static void |
| fetch_texel_offsets(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst, |
| int8_t offsets[3]) |
| { |
| if (inst->Texture.NumOffsets == 1) { |
| union tgsi_exec_channel index; |
| union tgsi_exec_channel offset[3]; |
| index.i[0] = index.i[1] = index.i[2] = index.i[3] = inst->TexOffsets[0].Index; |
| fetch_src_file_channel(mach, inst->TexOffsets[0].File, |
| inst->TexOffsets[0].SwizzleX, &index, &ZeroVec, &offset[0]); |
| fetch_src_file_channel(mach, inst->TexOffsets[0].File, |
| inst->TexOffsets[0].SwizzleY, &index, &ZeroVec, &offset[1]); |
| fetch_src_file_channel(mach, inst->TexOffsets[0].File, |
| inst->TexOffsets[0].SwizzleZ, &index, &ZeroVec, &offset[2]); |
| offsets[0] = offset[0].i[0]; |
| offsets[1] = offset[1].i[0]; |
| offsets[2] = offset[2].i[0]; |
| } else { |
| assert(inst->Texture.NumOffsets == 0); |
| offsets[0] = offsets[1] = offsets[2] = 0; |
| } |
| } |
| |
| |
| /* |
| * Fetch dx and dy values for one channel (s, t or r). |
| * Put dx values into one float array, dy values into another. |
| */ |
| static void |
| fetch_assign_deriv_channel(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst, |
| unsigned regdsrcx, |
| unsigned chan, |
| float derivs[2][TGSI_QUAD_SIZE]) |
| { |
| union tgsi_exec_channel d; |
| FETCH(&d, regdsrcx, chan); |
| derivs[0][0] = d.f[0]; |
| derivs[0][1] = d.f[1]; |
| derivs[0][2] = d.f[2]; |
| derivs[0][3] = d.f[3]; |
| FETCH(&d, regdsrcx + 1, chan); |
| derivs[1][0] = d.f[0]; |
| derivs[1][1] = d.f[1]; |
| derivs[1][2] = d.f[2]; |
| derivs[1][3] = d.f[3]; |
| } |
| |
| static uint |
| fetch_sampler_unit(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst, |
| uint sampler) |
| { |
| uint unit = 0; |
| int i; |
| if (inst->Src[sampler].Register.Indirect) { |
| const struct tgsi_full_src_register *reg = &inst->Src[sampler]; |
| union tgsi_exec_channel indir_index, index2; |
| const uint execmask = mach->ExecMask; |
| index2.i[0] = |
| index2.i[1] = |
| index2.i[2] = |
| index2.i[3] = reg->Indirect.Index; |
| |
| fetch_src_file_channel(mach, |
| reg->Indirect.File, |
| reg->Indirect.Swizzle, |
| &index2, |
| &ZeroVec, |
| &indir_index); |
| for (i = 0; i < TGSI_QUAD_SIZE; i++) { |
| if (execmask & (1 << i)) { |
| unit = inst->Src[sampler].Register.Index + indir_index.i[i]; |
| break; |
| } |
| } |
| |
| } else { |
| unit = inst->Src[sampler].Register.Index; |
| } |
| return unit; |
| } |
| |
| /* |
| * execute a texture instruction. |
| * |
| * modifier is used to control the channel routing for the |
| * instruction variants like proj, lod, and texture with lod bias. |
| * sampler indicates which src register the sampler is contained in. |
| */ |
| static void |
| exec_tex(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst, |
| uint modifier, uint sampler) |
| { |
| const union tgsi_exec_channel *args[5], *proj = NULL; |
| union tgsi_exec_channel r[5]; |
| enum tgsi_sampler_control control = TGSI_SAMPLER_LOD_NONE; |
| uint chan; |
| uint unit; |
| int8_t offsets[3]; |
| int dim, shadow_ref, i; |
| |
| unit = fetch_sampler_unit(mach, inst, sampler); |
| /* always fetch all 3 offsets, overkill but keeps code simple */ |
| fetch_texel_offsets(mach, inst, offsets); |
| |
| assert(modifier != TEX_MODIFIER_LEVEL_ZERO); |
| assert(inst->Texture.Texture != TGSI_TEXTURE_BUFFER); |
| |
| dim = tgsi_util_get_texture_coord_dim(inst->Texture.Texture); |
| shadow_ref = tgsi_util_get_shadow_ref_src_index(inst->Texture.Texture); |
| |
| assert(dim <= 4); |
| if (shadow_ref >= 0) |
| assert(shadow_ref >= dim && shadow_ref < (int)ARRAY_SIZE(args)); |
| |
| /* fetch modifier to the last argument */ |
| if (modifier != TEX_MODIFIER_NONE) { |
| const int last = ARRAY_SIZE(args) - 1; |
| |
| /* fetch modifier from src0.w or src1.x */ |
| if (sampler == 1) { |
| assert(dim <= TGSI_CHAN_W && shadow_ref != TGSI_CHAN_W); |
| FETCH(&r[last], 0, TGSI_CHAN_W); |
| } |
| else { |
| FETCH(&r[last], 1, TGSI_CHAN_X); |
| } |
| |
| if (modifier != TEX_MODIFIER_PROJECTED) { |
| args[last] = &r[last]; |
| } |
| else { |
| proj = &r[last]; |
| args[last] = &ZeroVec; |
| } |
| |
| /* point unused arguments to zero vector */ |
| for (i = dim; i < last; i++) |
| args[i] = &ZeroVec; |
| |
| if (modifier == TEX_MODIFIER_EXPLICIT_LOD) |
| control = TGSI_SAMPLER_LOD_EXPLICIT; |
| else if (modifier == TEX_MODIFIER_LOD_BIAS) |
| control = TGSI_SAMPLER_LOD_BIAS; |
| else if (modifier == TEX_MODIFIER_GATHER) |
| control = TGSI_SAMPLER_GATHER; |
| } |
| else { |
| for (i = dim; i < (int)ARRAY_SIZE(args); i++) |
| args[i] = &ZeroVec; |
| } |
| |
| /* fetch coordinates */ |
| for (i = 0; i < dim; i++) { |
| FETCH(&r[i], 0, TGSI_CHAN_X + i); |
| |
| if (proj) |
| micro_div(&r[i], &r[i], proj); |
| |
| args[i] = &r[i]; |
| } |
| |
| /* fetch reference value */ |
| if (shadow_ref >= 0) { |
| FETCH(&r[shadow_ref], shadow_ref / 4, TGSI_CHAN_X + (shadow_ref % 4)); |
| |
| if (proj) |
| micro_div(&r[shadow_ref], &r[shadow_ref], proj); |
| |
| args[shadow_ref] = &r[shadow_ref]; |
| } |
| |
| fetch_texel(mach->Sampler, unit, unit, |
| args[0], args[1], args[2], args[3], args[4], |
| NULL, offsets, control, |
| &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */ |
| |
| #if 0 |
| debug_printf("fetch r: %g %g %g %g\n", |
| r[0].f[0], r[0].f[1], r[0].f[2], r[0].f[3]); |
| debug_printf("fetch g: %g %g %g %g\n", |
| r[1].f[0], r[1].f[1], r[1].f[2], r[1].f[3]); |
| debug_printf("fetch b: %g %g %g %g\n", |
| r[2].f[0], r[2].f[1], r[2].f[2], r[2].f[3]); |
| debug_printf("fetch a: %g %g %g %g\n", |
| r[3].f[0], r[3].f[1], r[3].f[2], r[3].f[3]); |
| #endif |
| |
| for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) { |
| if (inst->Dst[0].Register.WriteMask & (1 << chan)) { |
| store_dest(mach, &r[chan], &inst->Dst[0], inst, chan); |
| } |
| } |
| } |
| |
| static void |
| exec_lodq(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst) |
| { |
| uint resource_unit, sampler_unit; |
| unsigned dim; |
| unsigned i; |
| union tgsi_exec_channel coords[4]; |
| const union tgsi_exec_channel *args[ARRAY_SIZE(coords)]; |
| union tgsi_exec_channel r[2]; |
| |
| resource_unit = fetch_sampler_unit(mach, inst, 1); |
| if (inst->Instruction.Opcode == TGSI_OPCODE_LOD) { |
| uint target = mach->SamplerViews[resource_unit].Resource; |
| dim = tgsi_util_get_texture_coord_dim(target); |
| sampler_unit = fetch_sampler_unit(mach, inst, 2); |
| } else { |
| dim = tgsi_util_get_texture_coord_dim(inst->Texture.Texture); |
| sampler_unit = resource_unit; |
| } |
| assert(dim <= ARRAY_SIZE(coords)); |
| /* fetch coordinates */ |
| for (i = 0; i < dim; i++) { |
| FETCH(&coords[i], 0, TGSI_CHAN_X + i); |
| args[i] = &coords[i]; |
| } |
| for (i = dim; i < ARRAY_SIZE(coords); i++) { |
| args[i] = &ZeroVec; |
| } |
| mach->Sampler->query_lod(mach->Sampler, resource_unit, sampler_unit, |
| args[0]->f, |
| args[1]->f, |
| args[2]->f, |
| args[3]->f, |
| TGSI_SAMPLER_LOD_NONE, |
| r[0].f, |
| r[1].f); |
| |
| if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) { |
| store_dest(mach, &r[0], &inst->Dst[0], inst, TGSI_CHAN_X); |
| } |
| if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) { |
| store_dest(mach, &r[1], &inst->Dst[0], inst, TGSI_CHAN_Y); |
| } |
| if (inst->Instruction.Opcode == TGSI_OPCODE_LOD) { |
| unsigned char swizzles[4]; |
| unsigned chan; |
| swizzles[0] = inst->Src[1].Register.SwizzleX; |
| swizzles[1] = inst->Src[1].Register.SwizzleY; |
| swizzles[2] = inst->Src[1].Register.SwizzleZ; |
| swizzles[3] = inst->Src[1].Register.SwizzleW; |
| |
| for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) { |
| if (inst->Dst[0].Register.WriteMask & (1 << chan)) { |
| if (swizzles[chan] >= 2) { |
| store_dest(mach, &ZeroVec, |
| &inst->Dst[0], inst, chan); |
| } else { |
| store_dest(mach, &r[swizzles[chan]], |
| &inst->Dst[0], inst, chan); |
| } |
| } |
| } |
| } else { |
| if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) { |
| store_dest(mach, &r[0], &inst->Dst[0], inst, TGSI_CHAN_X); |
| } |
| if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) { |
| store_dest(mach, &r[1], &inst->Dst[0], inst, TGSI_CHAN_Y); |
| } |
| } |
| } |
| |
| static void |
| exec_txd(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst) |
| { |
| union tgsi_exec_channel r[4]; |
| float derivs[3][2][TGSI_QUAD_SIZE]; |
| uint chan; |
| uint unit; |
| int8_t offsets[3]; |
| |
| unit = fetch_sampler_unit(mach, inst, 3); |
| /* always fetch all 3 offsets, overkill but keeps code simple */ |
| fetch_texel_offsets(mach, inst, offsets); |
| |
| switch (inst->Texture.Texture) { |
| case TGSI_TEXTURE_1D: |
| FETCH(&r[0], 0, TGSI_CHAN_X); |
| |
| fetch_assign_deriv_channel(mach, inst, 1, TGSI_CHAN_X, derivs[0]); |
| |
| fetch_texel(mach->Sampler, unit, unit, |
| &r[0], &ZeroVec, &ZeroVec, &ZeroVec, &ZeroVec, /* S, T, P, C, LOD */ |
| derivs, offsets, TGSI_SAMPLER_DERIVS_EXPLICIT, |
| &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */ |
| break; |
| |
| case TGSI_TEXTURE_SHADOW1D: |
| case TGSI_TEXTURE_1D_ARRAY: |
| case TGSI_TEXTURE_SHADOW1D_ARRAY: |
| /* SHADOW1D/1D_ARRAY would not need Y/Z respectively, but don't bother */ |
| FETCH(&r[0], 0, TGSI_CHAN_X); |
| FETCH(&r[1], 0, TGSI_CHAN_Y); |
| FETCH(&r[2], 0, TGSI_CHAN_Z); |
| |
| fetch_assign_deriv_channel(mach, inst, 1, TGSI_CHAN_X, derivs[0]); |
| |
| fetch_texel(mach->Sampler, unit, unit, |
| &r[0], &r[1], &r[2], &ZeroVec, &ZeroVec, /* S, T, P, C, LOD */ |
| derivs, offsets, TGSI_SAMPLER_DERIVS_EXPLICIT, |
| &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */ |
| break; |
| |
| case TGSI_TEXTURE_2D: |
| case TGSI_TEXTURE_RECT: |
| FETCH(&r[0], 0, TGSI_CHAN_X); |
| FETCH(&r[1], 0, TGSI_CHAN_Y); |
| |
| fetch_assign_deriv_channel(mach, inst, 1, TGSI_CHAN_X, derivs[0]); |
| fetch_assign_deriv_channel(mach, inst, 1, TGSI_CHAN_Y, derivs[1]); |
| |
| fetch_texel(mach->Sampler, unit, unit, |
| &r[0], &r[1], &ZeroVec, &ZeroVec, &ZeroVec, /* S, T, P, C, LOD */ |
| derivs, offsets, TGSI_SAMPLER_DERIVS_EXPLICIT, |
| &r[0], &r[1], &r[2], &r[3]); /* R, G, B, A */ |
| break; |
| |
| |
| case TGSI_TEXTURE_SHADOW2D: |
| case TGSI_TEXTURE_SHADOWRECT: |
| case TGSI_TEXTURE_2D_ARRAY: |
| case TGSI_TEXTURE_SHADOW2D_ARRAY: |
| /* only SHADOW2D_ARRAY actually needs W */ |
| FETCH(&r[0], 0, TGSI_CHAN_X); |
| FETCH(&r[1], 0, TGSI_CHAN_Y); |
| FETCH(&r[2], 0, TGSI_CHAN_Z); |
| FETCH(&r[3], 0, TGSI_CHAN_W); |
| |
| fetch_assign_deriv_channel(mach, inst, 1, TGSI_CHAN_X, derivs[0]); |
| fetch_assign_deriv_channel(mach, inst, 1, TGSI_CHAN_Y, derivs[1]); |
| |
| fetch_texel(mach->Sampler, unit, unit, |
| &r[0], &r[1], &r[2], &r[3], &ZeroVec, /* inputs */ |
| derivs, offsets, TGSI_SAMPLER_DERIVS_EXPLICIT, |
| &r[0], &r[1], &r[2], &r[3]); /* outputs */ |
| break; |
| |
| case TGSI_TEXTURE_3D: |
| case TGSI_TEXTURE_CUBE: |
| case TGSI_TEXTURE_CUBE_ARRAY: |
| case TGSI_TEXTURE_SHADOWCUBE: |
| /* only TEXTURE_CUBE_ARRAY and TEXTURE_SHADOWCUBE actually need W */ |
| FETCH(&r[0], 0, TGSI_CHAN_X); |
| FETCH(&r[1], 0, TGSI_CHAN_Y); |
| FETCH(&r[2], 0, TGSI_CHAN_Z); |
| FETCH(&r[3], 0, TGSI_CHAN_W); |
| |
| fetch_assign_deriv_channel(mach, inst, 1, TGSI_CHAN_X, derivs[0]); |
| fetch_assign_deriv_channel(mach, inst, 1, TGSI_CHAN_Y, derivs[1]); |
| fetch_assign_deriv_channel(mach, inst, 1, TGSI_CHAN_Z, derivs[2]); |
| |
| fetch_texel(mach->Sampler, unit, unit, |
| &r[0], &r[1], &r[2], &r[3], &ZeroVec, /* inputs */ |
| derivs, offsets, TGSI_SAMPLER_DERIVS_EXPLICIT, |
| &r[0], &r[1], &r[2], &r[3]); /* outputs */ |
| break; |
| |
| default: |
| assert(0); |
| } |
| |
| for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) { |
| if (inst->Dst[0].Register.WriteMask & (1 << chan)) { |
| store_dest(mach, &r[chan], &inst->Dst[0], inst, chan); |
| } |
| } |
| } |
| |
| |
| static void |
| exec_txf(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst) |
| { |
| union tgsi_exec_channel r[4]; |
| uint chan; |
| uint unit; |
| float rgba[TGSI_NUM_CHANNELS][TGSI_QUAD_SIZE]; |
| int j; |
| int8_t offsets[3]; |
| unsigned target; |
| |
| unit = fetch_sampler_unit(mach, inst, 1); |
| /* always fetch all 3 offsets, overkill but keeps code simple */ |
| fetch_texel_offsets(mach, inst, offsets); |
| |
| IFETCH(&r[3], 0, TGSI_CHAN_W); |
| |
| if (inst->Instruction.Opcode == TGSI_OPCODE_SAMPLE_I || |
| inst->Instruction.Opcode == TGSI_OPCODE_SAMPLE_I_MS) { |
| target = mach->SamplerViews[unit].Resource; |
| } |
| else { |
| target = inst->Texture.Texture; |
| } |
| switch(target) { |
| case TGSI_TEXTURE_3D: |
| case TGSI_TEXTURE_2D_ARRAY: |
| case TGSI_TEXTURE_SHADOW2D_ARRAY: |
| case TGSI_TEXTURE_2D_ARRAY_MSAA: |
| IFETCH(&r[2], 0, TGSI_CHAN_Z); |
| FALLTHROUGH; |
| case TGSI_TEXTURE_2D: |
| case TGSI_TEXTURE_RECT: |
| case TGSI_TEXTURE_SHADOW1D_ARRAY: |
| case TGSI_TEXTURE_SHADOW2D: |
| case TGSI_TEXTURE_SHADOWRECT: |
| case TGSI_TEXTURE_1D_ARRAY: |
| case TGSI_TEXTURE_2D_MSAA: |
| IFETCH(&r[1], 0, TGSI_CHAN_Y); |
| FALLTHROUGH; |
| case TGSI_TEXTURE_BUFFER: |
| case TGSI_TEXTURE_1D: |
| case TGSI_TEXTURE_SHADOW1D: |
| IFETCH(&r[0], 0, TGSI_CHAN_X); |
| break; |
| default: |
| assert(0); |
| break; |
| } |
| |
| mach->Sampler->get_texel(mach->Sampler, unit, r[0].i, r[1].i, r[2].i, r[3].i, |
| offsets, rgba); |
| |
| for (j = 0; j < TGSI_QUAD_SIZE; j++) { |
| r[0].f[j] = rgba[0][j]; |
| r[1].f[j] = rgba[1][j]; |
| r[2].f[j] = rgba[2][j]; |
| r[3].f[j] = rgba[3][j]; |
| } |
| |
| if (inst->Instruction.Opcode == TGSI_OPCODE_SAMPLE_I || |
| inst->Instruction.Opcode == TGSI_OPCODE_SAMPLE_I_MS) { |
| unsigned char swizzles[4]; |
| swizzles[0] = inst->Src[1].Register.SwizzleX; |
| swizzles[1] = inst->Src[1].Register.SwizzleY; |
| swizzles[2] = inst->Src[1].Register.SwizzleZ; |
| swizzles[3] = inst->Src[1].Register.SwizzleW; |
| |
| for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) { |
| if (inst->Dst[0].Register.WriteMask & (1 << chan)) { |
| store_dest(mach, &r[swizzles[chan]], |
| &inst->Dst[0], inst, chan); |
| } |
| } |
| } |
| else { |
| for (chan = 0; chan < TGSI_NUM_CHANNELS; chan++) { |
| if (inst->Dst[0].Register.WriteMask & (1 << chan)) { |
| store_dest(mach, &r[chan], &inst->Dst[0], inst, chan); |
| } |
| } |
| } |
| } |
| |
| static void |
| exec_txq(struct tgsi_exec_machine *mach, |
| const struct tgsi_full_instruction *inst) |
| { |
| int result[4]; |
|