| /* |
| * Xtensa ISA: |
| * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm |
| * |
| * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. |
| * All rights reserved. |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are met: |
| * * Redistributions of source code must retain the above copyright |
| * notice, this list of conditions and the following disclaimer. |
| * * Redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution. |
| * * Neither the name of the Open Source and Linux Lab nor the |
| * names of its contributors may be used to endorse or promote products |
| * derived from this software without specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
| * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| */ |
| |
| #include "qemu/osdep.h" |
| |
| #include "cpu.h" |
| #include "exec/exec-all.h" |
| #include "disas/disas.h" |
| #include "tcg/tcg-op.h" |
| #include "qemu/log.h" |
| #include "qemu/qemu-print.h" |
| #include "exec/cpu_ldst.h" |
| #include "hw/semihosting/semihost.h" |
| #include "exec/translator.h" |
| |
| #include "exec/helper-proto.h" |
| #include "exec/helper-gen.h" |
| |
| #include "trace-tcg.h" |
| #include "exec/log.h" |
| |
| |
| struct DisasContext { |
| DisasContextBase base; |
| const XtensaConfig *config; |
| uint32_t pc; |
| int cring; |
| int ring; |
| uint32_t lbeg_off; |
| uint32_t lend; |
| |
| bool sar_5bit; |
| bool sar_m32_5bit; |
| bool sar_m32_allocated; |
| TCGv_i32 sar_m32; |
| |
| unsigned window; |
| unsigned callinc; |
| bool cwoe; |
| |
| bool debug; |
| bool icount; |
| TCGv_i32 next_icount; |
| |
| unsigned cpenable; |
| |
| uint32_t op_flags; |
| xtensa_insnbuf insnbuf; |
| xtensa_insnbuf slotbuf; |
| }; |
| |
| static TCGv_i32 cpu_pc; |
| static TCGv_i32 cpu_R[16]; |
| static TCGv_i32 cpu_FR[16]; |
| static TCGv_i32 cpu_MR[4]; |
| static TCGv_i32 cpu_BR[16]; |
| static TCGv_i32 cpu_BR4[4]; |
| static TCGv_i32 cpu_BR8[2]; |
| static TCGv_i32 cpu_SR[256]; |
| static TCGv_i32 cpu_UR[256]; |
| static TCGv_i32 cpu_windowbase_next; |
| static TCGv_i32 cpu_exclusive_addr; |
| static TCGv_i32 cpu_exclusive_val; |
| |
| static GHashTable *xtensa_regfile_table; |
| |
| #include "exec/gen-icount.h" |
| |
| static char *sr_name[256]; |
| static char *ur_name[256]; |
| |
| void xtensa_collect_sr_names(const XtensaConfig *config) |
| { |
| xtensa_isa isa = config->isa; |
| int n = xtensa_isa_num_sysregs(isa); |
| int i; |
| |
| for (i = 0; i < n; ++i) { |
| int sr = xtensa_sysreg_number(isa, i); |
| |
| if (sr >= 0 && sr < 256) { |
| const char *name = xtensa_sysreg_name(isa, i); |
| char **pname = |
| (xtensa_sysreg_is_user(isa, i) ? ur_name : sr_name) + sr; |
| |
| if (*pname) { |
| if (strstr(*pname, name) == NULL) { |
| char *new_name = |
| malloc(strlen(*pname) + strlen(name) + 2); |
| |
| strcpy(new_name, *pname); |
| strcat(new_name, "/"); |
| strcat(new_name, name); |
| free(*pname); |
| *pname = new_name; |
| } |
| } else { |
| *pname = strdup(name); |
| } |
| } |
| } |
| } |
| |
| void xtensa_translate_init(void) |
| { |
| static const char * const regnames[] = { |
| "ar0", "ar1", "ar2", "ar3", |
| "ar4", "ar5", "ar6", "ar7", |
| "ar8", "ar9", "ar10", "ar11", |
| "ar12", "ar13", "ar14", "ar15", |
| }; |
| static const char * const fregnames[] = { |
| "f0", "f1", "f2", "f3", |
| "f4", "f5", "f6", "f7", |
| "f8", "f9", "f10", "f11", |
| "f12", "f13", "f14", "f15", |
| }; |
| static const char * const mregnames[] = { |
| "m0", "m1", "m2", "m3", |
| }; |
| static const char * const bregnames[] = { |
| "b0", "b1", "b2", "b3", |
| "b4", "b5", "b6", "b7", |
| "b8", "b9", "b10", "b11", |
| "b12", "b13", "b14", "b15", |
| }; |
| int i; |
| |
| cpu_pc = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, pc), "pc"); |
| |
| for (i = 0; i < 16; i++) { |
| cpu_R[i] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, regs[i]), |
| regnames[i]); |
| } |
| |
| for (i = 0; i < 16; i++) { |
| cpu_FR[i] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, |
| fregs[i].f32[FP_F32_LOW]), |
| fregnames[i]); |
| } |
| |
| for (i = 0; i < 4; i++) { |
| cpu_MR[i] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, |
| sregs[MR + i]), |
| mregnames[i]); |
| } |
| |
| for (i = 0; i < 16; i++) { |
| cpu_BR[i] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, |
| sregs[BR]), |
| bregnames[i]); |
| if (i % 4 == 0) { |
| cpu_BR4[i / 4] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, |
| sregs[BR]), |
| bregnames[i]); |
| } |
| if (i % 8 == 0) { |
| cpu_BR8[i / 8] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, |
| sregs[BR]), |
| bregnames[i]); |
| } |
| } |
| |
| for (i = 0; i < 256; ++i) { |
| if (sr_name[i]) { |
| cpu_SR[i] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, |
| sregs[i]), |
| sr_name[i]); |
| } |
| } |
| |
| for (i = 0; i < 256; ++i) { |
| if (ur_name[i]) { |
| cpu_UR[i] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, |
| uregs[i]), |
| ur_name[i]); |
| } |
| } |
| |
| cpu_windowbase_next = |
| tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, windowbase_next), |
| "windowbase_next"); |
| cpu_exclusive_addr = |
| tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, exclusive_addr), |
| "exclusive_addr"); |
| cpu_exclusive_val = |
| tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUXtensaState, exclusive_val), |
| "exclusive_val"); |
| } |
| |
| void **xtensa_get_regfile_by_name(const char *name) |
| { |
| if (xtensa_regfile_table == NULL) { |
| xtensa_regfile_table = g_hash_table_new(g_str_hash, g_str_equal); |
| g_hash_table_insert(xtensa_regfile_table, |
| (void *)"AR", (void *)cpu_R); |
| g_hash_table_insert(xtensa_regfile_table, |
| (void *)"MR", (void *)cpu_MR); |
| g_hash_table_insert(xtensa_regfile_table, |
| (void *)"FR", (void *)cpu_FR); |
| g_hash_table_insert(xtensa_regfile_table, |
| (void *)"BR", (void *)cpu_BR); |
| g_hash_table_insert(xtensa_regfile_table, |
| (void *)"BR4", (void *)cpu_BR4); |
| g_hash_table_insert(xtensa_regfile_table, |
| (void *)"BR8", (void *)cpu_BR8); |
| } |
| return (void **)g_hash_table_lookup(xtensa_regfile_table, (void *)name); |
| } |
| |
| static inline bool option_enabled(DisasContext *dc, int opt) |
| { |
| return xtensa_option_enabled(dc->config, opt); |
| } |
| |
| static void init_sar_tracker(DisasContext *dc) |
| { |
| dc->sar_5bit = false; |
| dc->sar_m32_5bit = false; |
| dc->sar_m32_allocated = false; |
| } |
| |
| static void reset_sar_tracker(DisasContext *dc) |
| { |
| if (dc->sar_m32_allocated) { |
| tcg_temp_free(dc->sar_m32); |
| } |
| } |
| |
| static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) |
| { |
| tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f); |
| if (dc->sar_m32_5bit) { |
| tcg_gen_discard_i32(dc->sar_m32); |
| } |
| dc->sar_5bit = true; |
| dc->sar_m32_5bit = false; |
| } |
| |
| static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) |
| { |
| TCGv_i32 tmp = tcg_const_i32(32); |
| if (!dc->sar_m32_allocated) { |
| dc->sar_m32 = tcg_temp_local_new_i32(); |
| dc->sar_m32_allocated = true; |
| } |
| tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f); |
| tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32); |
| dc->sar_5bit = false; |
| dc->sar_m32_5bit = true; |
| tcg_temp_free(tmp); |
| } |
| |
| static void gen_exception(DisasContext *dc, int excp) |
| { |
| TCGv_i32 tmp = tcg_const_i32(excp); |
| gen_helper_exception(cpu_env, tmp); |
| tcg_temp_free(tmp); |
| } |
| |
| static void gen_exception_cause(DisasContext *dc, uint32_t cause) |
| { |
| TCGv_i32 tpc = tcg_const_i32(dc->pc); |
| TCGv_i32 tcause = tcg_const_i32(cause); |
| gen_helper_exception_cause(cpu_env, tpc, tcause); |
| tcg_temp_free(tpc); |
| tcg_temp_free(tcause); |
| if (cause == ILLEGAL_INSTRUCTION_CAUSE || |
| cause == SYSCALL_CAUSE) { |
| dc->base.is_jmp = DISAS_NORETURN; |
| } |
| } |
| |
| static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause, |
| TCGv_i32 vaddr) |
| { |
| TCGv_i32 tpc = tcg_const_i32(dc->pc); |
| TCGv_i32 tcause = tcg_const_i32(cause); |
| gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr); |
| tcg_temp_free(tpc); |
| tcg_temp_free(tcause); |
| } |
| |
| static void gen_debug_exception(DisasContext *dc, uint32_t cause) |
| { |
| TCGv_i32 tpc = tcg_const_i32(dc->pc); |
| TCGv_i32 tcause = tcg_const_i32(cause); |
| gen_helper_debug_exception(cpu_env, tpc, tcause); |
| tcg_temp_free(tpc); |
| tcg_temp_free(tcause); |
| if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) { |
| dc->base.is_jmp = DISAS_NORETURN; |
| } |
| } |
| |
| static bool gen_check_privilege(DisasContext *dc) |
| { |
| #ifndef CONFIG_USER_ONLY |
| if (!dc->cring) { |
| return true; |
| } |
| #endif |
| gen_exception_cause(dc, PRIVILEGED_CAUSE); |
| dc->base.is_jmp = DISAS_NORETURN; |
| return false; |
| } |
| |
| static bool gen_check_cpenable(DisasContext *dc, uint32_t cp_mask) |
| { |
| cp_mask &= ~dc->cpenable; |
| |
| if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) && cp_mask) { |
| gen_exception_cause(dc, COPROCESSOR0_DISABLED + ctz32(cp_mask)); |
| dc->base.is_jmp = DISAS_NORETURN; |
| return false; |
| } |
| return true; |
| } |
| |
| static int gen_postprocess(DisasContext *dc, int slot); |
| |
| static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot) |
| { |
| tcg_gen_mov_i32(cpu_pc, dest); |
| if (dc->icount) { |
| tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); |
| } |
| if (dc->base.singlestep_enabled) { |
| gen_exception(dc, EXCP_DEBUG); |
| } else { |
| if (dc->op_flags & XTENSA_OP_POSTPROCESS) { |
| slot = gen_postprocess(dc, slot); |
| } |
| if (slot >= 0) { |
| tcg_gen_goto_tb(slot); |
| tcg_gen_exit_tb(dc->base.tb, slot); |
| } else { |
| tcg_gen_exit_tb(NULL, 0); |
| } |
| } |
| dc->base.is_jmp = DISAS_NORETURN; |
| } |
| |
| static void gen_jump(DisasContext *dc, TCGv dest) |
| { |
| gen_jump_slot(dc, dest, -1); |
| } |
| |
| static int adjust_jump_slot(DisasContext *dc, uint32_t dest, int slot) |
| { |
| if (((dc->base.pc_first ^ dest) & TARGET_PAGE_MASK) != 0) { |
| return -1; |
| } else { |
| return slot; |
| } |
| } |
| |
| static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot) |
| { |
| TCGv_i32 tmp = tcg_const_i32(dest); |
| gen_jump_slot(dc, tmp, adjust_jump_slot(dc, dest, slot)); |
| tcg_temp_free(tmp); |
| } |
| |
| static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest, |
| int slot) |
| { |
| TCGv_i32 tcallinc = tcg_const_i32(callinc); |
| |
| tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS], |
| tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN); |
| tcg_temp_free(tcallinc); |
| tcg_gen_movi_i32(cpu_R[callinc << 2], |
| (callinc << 30) | (dc->base.pc_next & 0x3fffffff)); |
| gen_jump_slot(dc, dest, slot); |
| } |
| |
| static bool gen_check_loop_end(DisasContext *dc, int slot) |
| { |
| if (dc->base.pc_next == dc->lend) { |
| TCGLabel *label = gen_new_label(); |
| |
| tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label); |
| tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1); |
| if (dc->lbeg_off) { |
| gen_jumpi(dc, dc->base.pc_next - dc->lbeg_off, slot); |
| } else { |
| gen_jump(dc, cpu_SR[LBEG]); |
| } |
| gen_set_label(label); |
| gen_jumpi(dc, dc->base.pc_next, -1); |
| return true; |
| } |
| return false; |
| } |
| |
| static void gen_jumpi_check_loop_end(DisasContext *dc, int slot) |
| { |
| if (!gen_check_loop_end(dc, slot)) { |
| gen_jumpi(dc, dc->base.pc_next, slot); |
| } |
| } |
| |
| static void gen_brcond(DisasContext *dc, TCGCond cond, |
| TCGv_i32 t0, TCGv_i32 t1, uint32_t addr) |
| { |
| TCGLabel *label = gen_new_label(); |
| |
| tcg_gen_brcond_i32(cond, t0, t1, label); |
| gen_jumpi_check_loop_end(dc, 0); |
| gen_set_label(label); |
| gen_jumpi(dc, addr, 1); |
| } |
| |
| static void gen_brcondi(DisasContext *dc, TCGCond cond, |
| TCGv_i32 t0, uint32_t t1, uint32_t addr) |
| { |
| TCGv_i32 tmp = tcg_const_i32(t1); |
| gen_brcond(dc, cond, t0, tmp, addr); |
| tcg_temp_free(tmp); |
| } |
| |
| static bool test_ill_sr(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| return !xtensa_option_enabled(dc->config, par[1]); |
| } |
| |
| static bool test_ill_ccompare(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| unsigned n = par[0] - CCOMPARE; |
| |
| return test_ill_sr(dc, arg, par) || n >= dc->config->nccompare; |
| } |
| |
| static bool test_ill_dbreak(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| unsigned n = MAX_NDBREAK; |
| |
| if (par[0] >= DBREAKA && par[0] < DBREAKA + MAX_NDBREAK) { |
| n = par[0] - DBREAKA; |
| } |
| if (par[0] >= DBREAKC && par[0] < DBREAKC + MAX_NDBREAK) { |
| n = par[0] - DBREAKC; |
| } |
| return test_ill_sr(dc, arg, par) || n >= dc->config->ndbreak; |
| } |
| |
| static bool test_ill_ibreak(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| unsigned n = par[0] - IBREAKA; |
| |
| return test_ill_sr(dc, arg, par) || n >= dc->config->nibreak; |
| } |
| |
| static bool test_ill_hpi(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| unsigned n = MAX_NLEVEL + 1; |
| |
| if (par[0] >= EXCSAVE1 && par[0] < EXCSAVE1 + MAX_NLEVEL) { |
| n = par[0] - EXCSAVE1 + 1; |
| } |
| if (par[0] >= EPC1 && par[0] < EPC1 + MAX_NLEVEL) { |
| n = par[0] - EPC1 + 1; |
| } |
| if (par[0] >= EPS2 && par[0] < EPS2 + MAX_NLEVEL - 1) { |
| n = par[0] - EPS2 + 2; |
| } |
| return test_ill_sr(dc, arg, par) || n > dc->config->nlevel; |
| } |
| |
| static void gen_load_store_alignment(DisasContext *dc, int shift, |
| TCGv_i32 addr, bool no_hw_alignment) |
| { |
| if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) { |
| tcg_gen_andi_i32(addr, addr, ~0 << shift); |
| } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) && |
| no_hw_alignment) { |
| TCGLabel *label = gen_new_label(); |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_andi_i32(tmp, addr, ~(~0 << shift)); |
| tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); |
| gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr); |
| gen_set_label(label); |
| tcg_temp_free(tmp); |
| } |
| } |
| |
| #ifndef CONFIG_USER_ONLY |
| static void gen_waiti(DisasContext *dc, uint32_t imm4) |
| { |
| TCGv_i32 pc = tcg_const_i32(dc->base.pc_next); |
| TCGv_i32 intlevel = tcg_const_i32(imm4); |
| |
| if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { |
| gen_io_start(); |
| } |
| gen_helper_waiti(cpu_env, pc, intlevel); |
| tcg_temp_free(pc); |
| tcg_temp_free(intlevel); |
| } |
| #endif |
| |
| static bool gen_window_check(DisasContext *dc, uint32_t mask) |
| { |
| unsigned r = 31 - clz32(mask); |
| |
| if (r / 4 > dc->window) { |
| TCGv_i32 pc = tcg_const_i32(dc->pc); |
| TCGv_i32 w = tcg_const_i32(r / 4); |
| |
| gen_helper_window_check(cpu_env, pc, w); |
| dc->base.is_jmp = DISAS_NORETURN; |
| return false; |
| } |
| return true; |
| } |
| |
| static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned) |
| { |
| TCGv_i32 m = tcg_temp_new_i32(); |
| |
| if (hi) { |
| (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16); |
| } else { |
| (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v); |
| } |
| return m; |
| } |
| |
| static void gen_zero_check(DisasContext *dc, const OpcodeArg arg[]) |
| { |
| TCGLabel *label = gen_new_label(); |
| |
| tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0, label); |
| gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE); |
| gen_set_label(label); |
| } |
| |
| static inline unsigned xtensa_op0_insn_len(DisasContext *dc, uint8_t op0) |
| { |
| return xtensa_isa_length_from_chars(dc->config->isa, &op0); |
| } |
| |
| static int gen_postprocess(DisasContext *dc, int slot) |
| { |
| uint32_t op_flags = dc->op_flags; |
| |
| #ifndef CONFIG_USER_ONLY |
| if (op_flags & XTENSA_OP_CHECK_INTERRUPTS) { |
| if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { |
| gen_io_start(); |
| } |
| gen_helper_check_interrupts(cpu_env); |
| if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { |
| gen_io_end(); |
| } |
| } |
| #endif |
| if (op_flags & XTENSA_OP_SYNC_REGISTER_WINDOW) { |
| gen_helper_sync_windowbase(cpu_env); |
| } |
| if (op_flags & XTENSA_OP_EXIT_TB_M1) { |
| slot = -1; |
| } |
| return slot; |
| } |
| |
| struct opcode_arg_copy { |
| uint32_t resource; |
| void *temp; |
| OpcodeArg *arg; |
| }; |
| |
| struct opcode_arg_info { |
| uint32_t resource; |
| int index; |
| }; |
| |
| struct slot_prop { |
| XtensaOpcodeOps *ops; |
| OpcodeArg arg[MAX_OPCODE_ARGS]; |
| struct opcode_arg_info in[MAX_OPCODE_ARGS]; |
| struct opcode_arg_info out[MAX_OPCODE_ARGS]; |
| unsigned n_in; |
| unsigned n_out; |
| uint32_t op_flags; |
| }; |
| |
| enum resource_type { |
| RES_REGFILE, |
| RES_STATE, |
| RES_MAX, |
| }; |
| |
| static uint32_t encode_resource(enum resource_type r, unsigned g, unsigned n) |
| { |
| assert(r < RES_MAX && g < 256 && n < 65536); |
| return (r << 24) | (g << 16) | n; |
| } |
| |
| static enum resource_type get_resource_type(uint32_t resource) |
| { |
| return resource >> 24; |
| } |
| |
| /* |
| * a depends on b if b must be executed before a, |
| * because a's side effects will destroy b's inputs. |
| */ |
| static bool op_depends_on(const struct slot_prop *a, |
| const struct slot_prop *b) |
| { |
| unsigned i = 0; |
| unsigned j = 0; |
| |
| if (a->op_flags & XTENSA_OP_CONTROL_FLOW) { |
| return true; |
| } |
| if ((a->op_flags & XTENSA_OP_LOAD_STORE) < |
| (b->op_flags & XTENSA_OP_LOAD_STORE)) { |
| return true; |
| } |
| while (i < a->n_out && j < b->n_in) { |
| if (a->out[i].resource < b->in[j].resource) { |
| ++i; |
| } else if (a->out[i].resource > b->in[j].resource) { |
| ++j; |
| } else { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| /* |
| * Try to break a dependency on b, append temporary register copy records |
| * to the end of copy and update n_copy in case of success. |
| * This is not always possible: e.g. control flow must always be the last, |
| * load/store must be first and state dependencies are not supported yet. |
| */ |
| static bool break_dependency(struct slot_prop *a, |
| struct slot_prop *b, |
| struct opcode_arg_copy *copy, |
| unsigned *n_copy) |
| { |
| unsigned i = 0; |
| unsigned j = 0; |
| unsigned n = *n_copy; |
| bool rv = false; |
| |
| if (a->op_flags & XTENSA_OP_CONTROL_FLOW) { |
| return false; |
| } |
| if ((a->op_flags & XTENSA_OP_LOAD_STORE) < |
| (b->op_flags & XTENSA_OP_LOAD_STORE)) { |
| return false; |
| } |
| while (i < a->n_out && j < b->n_in) { |
| if (a->out[i].resource < b->in[j].resource) { |
| ++i; |
| } else if (a->out[i].resource > b->in[j].resource) { |
| ++j; |
| } else { |
| int index = b->in[j].index; |
| |
| if (get_resource_type(a->out[i].resource) != RES_REGFILE || |
| index < 0) { |
| return false; |
| } |
| copy[n].resource = b->in[j].resource; |
| copy[n].arg = b->arg + index; |
| ++n; |
| ++j; |
| rv = true; |
| } |
| } |
| *n_copy = n; |
| return rv; |
| } |
| |
| /* |
| * Calculate evaluation order for slot opcodes. |
| * Build opcode order graph and output its nodes in topological sort order. |
| * An edge a -> b in the graph means that opcode a must be followed by |
| * opcode b. |
| */ |
| static bool tsort(struct slot_prop *slot, |
| struct slot_prop *sorted[], |
| unsigned n, |
| struct opcode_arg_copy *copy, |
| unsigned *n_copy) |
| { |
| struct tsnode { |
| unsigned n_in_edge; |
| unsigned n_out_edge; |
| unsigned out_edge[MAX_INSN_SLOTS]; |
| } node[MAX_INSN_SLOTS]; |
| |
| unsigned in[MAX_INSN_SLOTS]; |
| unsigned i, j; |
| unsigned n_in = 0; |
| unsigned n_out = 0; |
| unsigned n_edge = 0; |
| unsigned in_idx = 0; |
| unsigned node_idx = 0; |
| |
| for (i = 0; i < n; ++i) { |
| node[i].n_in_edge = 0; |
| node[i].n_out_edge = 0; |
| } |
| |
| for (i = 0; i < n; ++i) { |
| unsigned n_out_edge = 0; |
| |
| for (j = 0; j < n; ++j) { |
| if (i != j && op_depends_on(slot + j, slot + i)) { |
| node[i].out_edge[n_out_edge] = j; |
| ++node[j].n_in_edge; |
| ++n_out_edge; |
| ++n_edge; |
| } |
| } |
| node[i].n_out_edge = n_out_edge; |
| } |
| |
| for (i = 0; i < n; ++i) { |
| if (!node[i].n_in_edge) { |
| in[n_in] = i; |
| ++n_in; |
| } |
| } |
| |
| again: |
| for (; in_idx < n_in; ++in_idx) { |
| i = in[in_idx]; |
| sorted[n_out] = slot + i; |
| ++n_out; |
| for (j = 0; j < node[i].n_out_edge; ++j) { |
| --n_edge; |
| if (--node[node[i].out_edge[j]].n_in_edge == 0) { |
| in[n_in] = node[i].out_edge[j]; |
| ++n_in; |
| } |
| } |
| } |
| if (n_edge) { |
| for (; node_idx < n; ++node_idx) { |
| struct tsnode *cnode = node + node_idx; |
| |
| if (cnode->n_in_edge) { |
| for (j = 0; j < cnode->n_out_edge; ++j) { |
| unsigned k = cnode->out_edge[j]; |
| |
| if (break_dependency(slot + k, slot + node_idx, |
| copy, n_copy) && |
| --node[k].n_in_edge == 0) { |
| in[n_in] = k; |
| ++n_in; |
| --n_edge; |
| cnode->out_edge[j] = |
| cnode->out_edge[cnode->n_out_edge - 1]; |
| --cnode->n_out_edge; |
| goto again; |
| } |
| } |
| } |
| } |
| } |
| return n_edge == 0; |
| } |
| |
| static void opcode_add_resource(struct slot_prop *op, |
| uint32_t resource, char direction, |
| int index) |
| { |
| switch (direction) { |
| case 'm': |
| case 'i': |
| assert(op->n_in < ARRAY_SIZE(op->in)); |
| op->in[op->n_in].resource = resource; |
| op->in[op->n_in].index = index; |
| ++op->n_in; |
| /* fall through */ |
| case 'o': |
| if (direction == 'm' || direction == 'o') { |
| assert(op->n_out < ARRAY_SIZE(op->out)); |
| op->out[op->n_out].resource = resource; |
| op->out[op->n_out].index = index; |
| ++op->n_out; |
| } |
| break; |
| default: |
| g_assert_not_reached(); |
| } |
| } |
| |
| static int resource_compare(const void *a, const void *b) |
| { |
| const struct opcode_arg_info *pa = a; |
| const struct opcode_arg_info *pb = b; |
| |
| return pa->resource < pb->resource ? |
| -1 : (pa->resource > pb->resource ? 1 : 0); |
| } |
| |
| static int arg_copy_compare(const void *a, const void *b) |
| { |
| const struct opcode_arg_copy *pa = a; |
| const struct opcode_arg_copy *pb = b; |
| |
| return pa->resource < pb->resource ? |
| -1 : (pa->resource > pb->resource ? 1 : 0); |
| } |
| |
| static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) |
| { |
| xtensa_isa isa = dc->config->isa; |
| unsigned char b[MAX_INSN_LENGTH] = {translator_ldub(env, dc->pc)}; |
| unsigned len = xtensa_op0_insn_len(dc, b[0]); |
| xtensa_format fmt; |
| int slot, slots; |
| unsigned i; |
| uint32_t op_flags = 0; |
| struct slot_prop slot_prop[MAX_INSN_SLOTS]; |
| struct slot_prop *ordered[MAX_INSN_SLOTS]; |
| struct opcode_arg_copy arg_copy[MAX_INSN_SLOTS * MAX_OPCODE_ARGS]; |
| unsigned n_arg_copy = 0; |
| uint32_t debug_cause = 0; |
| uint32_t windowed_register = 0; |
| uint32_t coprocessor = 0; |
| |
| if (len == XTENSA_UNDEFINED) { |
| qemu_log_mask(LOG_GUEST_ERROR, |
| "unknown instruction length (pc = %08x)\n", |
| dc->pc); |
| gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
| return; |
| } |
| |
| dc->base.pc_next = dc->pc + len; |
| for (i = 1; i < len; ++i) { |
| b[i] = translator_ldub(env, dc->pc + i); |
| } |
| xtensa_insnbuf_from_chars(isa, dc->insnbuf, b, len); |
| fmt = xtensa_format_decode(isa, dc->insnbuf); |
| if (fmt == XTENSA_UNDEFINED) { |
| qemu_log_mask(LOG_GUEST_ERROR, |
| "unrecognized instruction format (pc = %08x)\n", |
| dc->pc); |
| gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
| return; |
| } |
| slots = xtensa_format_num_slots(isa, fmt); |
| for (slot = 0; slot < slots; ++slot) { |
| xtensa_opcode opc; |
| int opnd, vopnd, opnds; |
| OpcodeArg *arg = slot_prop[slot].arg; |
| XtensaOpcodeOps *ops; |
| |
| xtensa_format_get_slot(isa, fmt, slot, dc->insnbuf, dc->slotbuf); |
| opc = xtensa_opcode_decode(isa, fmt, slot, dc->slotbuf); |
| if (opc == XTENSA_UNDEFINED) { |
| qemu_log_mask(LOG_GUEST_ERROR, |
| "unrecognized opcode in slot %d (pc = %08x)\n", |
| slot, dc->pc); |
| gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
| return; |
| } |
| opnds = xtensa_opcode_num_operands(isa, opc); |
| |
| for (opnd = vopnd = 0; opnd < opnds; ++opnd) { |
| void **register_file = NULL; |
| |
| if (xtensa_operand_is_register(isa, opc, opnd)) { |
| xtensa_regfile rf = xtensa_operand_regfile(isa, opc, opnd); |
| |
| register_file = dc->config->regfile[rf]; |
| |
| if (rf == dc->config->a_regfile) { |
| uint32_t v; |
| |
| xtensa_operand_get_field(isa, opc, opnd, fmt, slot, |
| dc->slotbuf, &v); |
| xtensa_operand_decode(isa, opc, opnd, &v); |
| windowed_register |= 1u << v; |
| } |
| } |
| if (xtensa_operand_is_visible(isa, opc, opnd)) { |
| uint32_t v; |
| |
| xtensa_operand_get_field(isa, opc, opnd, fmt, slot, |
| dc->slotbuf, &v); |
| xtensa_operand_decode(isa, opc, opnd, &v); |
| arg[vopnd].raw_imm = v; |
| if (xtensa_operand_is_PCrelative(isa, opc, opnd)) { |
| xtensa_operand_undo_reloc(isa, opc, opnd, &v, dc->pc); |
| } |
| arg[vopnd].imm = v; |
| if (register_file) { |
| arg[vopnd].in = register_file[v]; |
| arg[vopnd].out = register_file[v]; |
| } |
| ++vopnd; |
| } |
| } |
| ops = dc->config->opcode_ops[opc]; |
| slot_prop[slot].ops = ops; |
| |
| if (ops) { |
| op_flags |= ops->op_flags; |
| } else { |
| qemu_log_mask(LOG_UNIMP, |
| "unimplemented opcode '%s' in slot %d (pc = %08x)\n", |
| xtensa_opcode_name(isa, opc), slot, dc->pc); |
| op_flags |= XTENSA_OP_ILL; |
| } |
| if ((op_flags & XTENSA_OP_ILL) || |
| (ops && ops->test_ill && ops->test_ill(dc, arg, ops->par))) { |
| gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
| return; |
| } |
| if (ops->op_flags & XTENSA_OP_DEBUG_BREAK) { |
| debug_cause |= ops->par[0]; |
| } |
| if (ops->test_overflow) { |
| windowed_register |= ops->test_overflow(dc, arg, ops->par); |
| } |
| coprocessor |= ops->coprocessor; |
| |
| if (slots > 1) { |
| slot_prop[slot].n_in = 0; |
| slot_prop[slot].n_out = 0; |
| slot_prop[slot].op_flags = ops->op_flags & XTENSA_OP_LOAD_STORE; |
| |
| opnds = xtensa_opcode_num_operands(isa, opc); |
| |
| for (opnd = vopnd = 0; opnd < opnds; ++opnd) { |
| bool visible = xtensa_operand_is_visible(isa, opc, opnd); |
| |
| if (xtensa_operand_is_register(isa, opc, opnd)) { |
| xtensa_regfile rf = xtensa_operand_regfile(isa, opc, opnd); |
| uint32_t v = 0; |
| |
| xtensa_operand_get_field(isa, opc, opnd, fmt, slot, |
| dc->slotbuf, &v); |
| xtensa_operand_decode(isa, opc, opnd, &v); |
| opcode_add_resource(slot_prop + slot, |
| encode_resource(RES_REGFILE, rf, v), |
| xtensa_operand_inout(isa, opc, opnd), |
| visible ? vopnd : -1); |
| } |
| if (visible) { |
| ++vopnd; |
| } |
| } |
| |
| opnds = xtensa_opcode_num_stateOperands(isa, opc); |
| |
| for (opnd = 0; opnd < opnds; ++opnd) { |
| xtensa_state state = xtensa_stateOperand_state(isa, opc, opnd); |
| |
| opcode_add_resource(slot_prop + slot, |
| encode_resource(RES_STATE, 0, state), |
| xtensa_stateOperand_inout(isa, opc, opnd), |
| -1); |
| } |
| if (xtensa_opcode_is_branch(isa, opc) || |
| xtensa_opcode_is_jump(isa, opc) || |
| xtensa_opcode_is_loop(isa, opc) || |
| xtensa_opcode_is_call(isa, opc)) { |
| slot_prop[slot].op_flags |= XTENSA_OP_CONTROL_FLOW; |
| } |
| |
| qsort(slot_prop[slot].in, slot_prop[slot].n_in, |
| sizeof(slot_prop[slot].in[0]), resource_compare); |
| qsort(slot_prop[slot].out, slot_prop[slot].n_out, |
| sizeof(slot_prop[slot].out[0]), resource_compare); |
| } |
| } |
| |
| if (slots > 1) { |
| if (!tsort(slot_prop, ordered, slots, arg_copy, &n_arg_copy)) { |
| qemu_log_mask(LOG_UNIMP, |
| "Circular resource dependencies (pc = %08x)\n", |
| dc->pc); |
| gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); |
| return; |
| } |
| } else { |
| ordered[0] = slot_prop + 0; |
| } |
| |
| if ((op_flags & XTENSA_OP_PRIVILEGED) && |
| !gen_check_privilege(dc)) { |
| return; |
| } |
| |
| if (op_flags & XTENSA_OP_SYSCALL) { |
| gen_exception_cause(dc, SYSCALL_CAUSE); |
| return; |
| } |
| |
| if ((op_flags & XTENSA_OP_DEBUG_BREAK) && dc->debug) { |
| gen_debug_exception(dc, debug_cause); |
| return; |
| } |
| |
| if (windowed_register && !gen_window_check(dc, windowed_register)) { |
| return; |
| } |
| |
| if (op_flags & XTENSA_OP_UNDERFLOW) { |
| TCGv_i32 tmp = tcg_const_i32(dc->pc); |
| |
| gen_helper_test_underflow_retw(cpu_env, tmp); |
| tcg_temp_free(tmp); |
| } |
| |
| if (op_flags & XTENSA_OP_ALLOCA) { |
| TCGv_i32 tmp = tcg_const_i32(dc->pc); |
| |
| gen_helper_movsp(cpu_env, tmp); |
| tcg_temp_free(tmp); |
| } |
| |
| if (coprocessor && !gen_check_cpenable(dc, coprocessor)) { |
| return; |
| } |
| |
| if (n_arg_copy) { |
| uint32_t resource; |
| void *temp; |
| unsigned j; |
| |
| qsort(arg_copy, n_arg_copy, sizeof(*arg_copy), arg_copy_compare); |
| for (i = j = 0; i < n_arg_copy; ++i) { |
| if (i == 0 || arg_copy[i].resource != resource) { |
| resource = arg_copy[i].resource; |
| temp = tcg_temp_local_new(); |
| tcg_gen_mov_i32(temp, arg_copy[i].arg->in); |
| arg_copy[i].temp = temp; |
| |
| if (i != j) { |
| arg_copy[j] = arg_copy[i]; |
| } |
| ++j; |
| } |
| arg_copy[i].arg->in = temp; |
| } |
| n_arg_copy = j; |
| } |
| |
| if (op_flags & XTENSA_OP_DIVIDE_BY_ZERO) { |
| for (slot = 0; slot < slots; ++slot) { |
| if (slot_prop[slot].ops->op_flags & XTENSA_OP_DIVIDE_BY_ZERO) { |
| gen_zero_check(dc, slot_prop[slot].arg); |
| } |
| } |
| } |
| |
| dc->op_flags = op_flags; |
| |
| for (slot = 0; slot < slots; ++slot) { |
| struct slot_prop *pslot = ordered[slot]; |
| XtensaOpcodeOps *ops = pslot->ops; |
| |
| ops->translate(dc, pslot->arg, ops->par); |
| } |
| |
| for (i = 0; i < n_arg_copy; ++i) { |
| tcg_temp_free(arg_copy[i].temp); |
| } |
| |
| if (dc->base.is_jmp == DISAS_NEXT) { |
| gen_postprocess(dc, 0); |
| dc->op_flags = 0; |
| if (op_flags & XTENSA_OP_EXIT_TB_M1) { |
| /* Change in mmu index, memory mapping or tb->flags; exit tb */ |
| gen_jumpi_check_loop_end(dc, -1); |
| } else if (op_flags & XTENSA_OP_EXIT_TB_0) { |
| gen_jumpi_check_loop_end(dc, 0); |
| } else { |
| gen_check_loop_end(dc, 0); |
| } |
| } |
| dc->pc = dc->base.pc_next; |
| } |
| |
| static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc) |
| { |
| uint8_t b0 = cpu_ldub_code(env, dc->pc); |
| return xtensa_op0_insn_len(dc, b0); |
| } |
| |
| static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc) |
| { |
| unsigned i; |
| |
| for (i = 0; i < dc->config->nibreak; ++i) { |
| if ((env->sregs[IBREAKENABLE] & (1 << i)) && |
| env->sregs[IBREAKA + i] == dc->pc) { |
| gen_debug_exception(dc, DEBUGCAUSE_IB); |
| break; |
| } |
| } |
| } |
| |
| static void xtensa_tr_init_disas_context(DisasContextBase *dcbase, |
| CPUState *cpu) |
| { |
| DisasContext *dc = container_of(dcbase, DisasContext, base); |
| CPUXtensaState *env = cpu->env_ptr; |
| uint32_t tb_flags = dc->base.tb->flags; |
| |
| dc->config = env->config; |
| dc->pc = dc->base.pc_first; |
| dc->ring = tb_flags & XTENSA_TBFLAG_RING_MASK; |
| dc->cring = (tb_flags & XTENSA_TBFLAG_EXCM) ? 0 : dc->ring; |
| dc->lbeg_off = (dc->base.tb->cs_base & XTENSA_CSBASE_LBEG_OFF_MASK) >> |
| XTENSA_CSBASE_LBEG_OFF_SHIFT; |
| dc->lend = (dc->base.tb->cs_base & XTENSA_CSBASE_LEND_MASK) + |
| (dc->base.pc_first & TARGET_PAGE_MASK); |
| dc->debug = tb_flags & XTENSA_TBFLAG_DEBUG; |
| dc->icount = tb_flags & XTENSA_TBFLAG_ICOUNT; |
| dc->cpenable = (tb_flags & XTENSA_TBFLAG_CPENABLE_MASK) >> |
| XTENSA_TBFLAG_CPENABLE_SHIFT; |
| dc->window = ((tb_flags & XTENSA_TBFLAG_WINDOW_MASK) >> |
| XTENSA_TBFLAG_WINDOW_SHIFT); |
| dc->cwoe = tb_flags & XTENSA_TBFLAG_CWOE; |
| dc->callinc = ((tb_flags & XTENSA_TBFLAG_CALLINC_MASK) >> |
| XTENSA_TBFLAG_CALLINC_SHIFT); |
| |
| /* |
| * FIXME: This will leak when a failed instruction load or similar |
| * event causes us to longjump out of the translation loop and |
| * hence not clean-up in xtensa_tr_tb_stop |
| */ |
| if (dc->config->isa) { |
| dc->insnbuf = xtensa_insnbuf_alloc(dc->config->isa); |
| dc->slotbuf = xtensa_insnbuf_alloc(dc->config->isa); |
| } |
| init_sar_tracker(dc); |
| } |
| |
| static void xtensa_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) |
| { |
| DisasContext *dc = container_of(dcbase, DisasContext, base); |
| |
| if (dc->icount) { |
| dc->next_icount = tcg_temp_local_new_i32(); |
| } |
| } |
| |
| static void xtensa_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) |
| { |
| tcg_gen_insn_start(dcbase->pc_next); |
| } |
| |
| static bool xtensa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, |
| const CPUBreakpoint *bp) |
| { |
| DisasContext *dc = container_of(dcbase, DisasContext, base); |
| |
| tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); |
| gen_exception(dc, EXCP_DEBUG); |
| dc->base.is_jmp = DISAS_NORETURN; |
| /* The address covered by the breakpoint must be included in |
| [tb->pc, tb->pc + tb->size) in order to for it to be |
| properly cleared -- thus we increment the PC here so that |
| the logic setting tb->size below does the right thing. */ |
| dc->base.pc_next += 2; |
| return true; |
| } |
| |
| static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) |
| { |
| DisasContext *dc = container_of(dcbase, DisasContext, base); |
| CPUXtensaState *env = cpu->env_ptr; |
| target_ulong page_start; |
| |
| /* These two conditions only apply to the first insn in the TB, |
| but this is the first TranslateOps hook that allows exiting. */ |
| if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT) |
| && (dc->base.tb->flags & XTENSA_TBFLAG_YIELD)) { |
| gen_exception(dc, EXCP_YIELD); |
| dc->base.is_jmp = DISAS_NORETURN; |
| return; |
| } |
| if (dc->base.tb->flags & XTENSA_TBFLAG_EXCEPTION) { |
| gen_exception(dc, EXCP_DEBUG); |
| dc->base.is_jmp = DISAS_NORETURN; |
| return; |
| } |
| |
| if (dc->icount) { |
| TCGLabel *label = gen_new_label(); |
| |
| tcg_gen_addi_i32(dc->next_icount, cpu_SR[ICOUNT], 1); |
| tcg_gen_brcondi_i32(TCG_COND_NE, dc->next_icount, 0, label); |
| tcg_gen_mov_i32(dc->next_icount, cpu_SR[ICOUNT]); |
| if (dc->debug) { |
| gen_debug_exception(dc, DEBUGCAUSE_IC); |
| } |
| gen_set_label(label); |
| } |
| |
| if (dc->debug) { |
| gen_ibreak_check(env, dc); |
| } |
| |
| disas_xtensa_insn(env, dc); |
| |
| if (dc->icount) { |
| tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); |
| } |
| |
| /* End the TB if the next insn will cross into the next page. */ |
| page_start = dc->base.pc_first & TARGET_PAGE_MASK; |
| if (dc->base.is_jmp == DISAS_NEXT && |
| (dc->pc - page_start >= TARGET_PAGE_SIZE || |
| dc->pc - page_start + xtensa_insn_len(env, dc) > TARGET_PAGE_SIZE)) { |
| dc->base.is_jmp = DISAS_TOO_MANY; |
| } |
| } |
| |
| static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) |
| { |
| DisasContext *dc = container_of(dcbase, DisasContext, base); |
| |
| reset_sar_tracker(dc); |
| if (dc->config->isa) { |
| xtensa_insnbuf_free(dc->config->isa, dc->insnbuf); |
| xtensa_insnbuf_free(dc->config->isa, dc->slotbuf); |
| } |
| if (dc->icount) { |
| tcg_temp_free(dc->next_icount); |
| } |
| |
| switch (dc->base.is_jmp) { |
| case DISAS_NORETURN: |
| break; |
| case DISAS_TOO_MANY: |
| if (dc->base.singlestep_enabled) { |
| tcg_gen_movi_i32(cpu_pc, dc->pc); |
| gen_exception(dc, EXCP_DEBUG); |
| } else { |
| gen_jumpi(dc, dc->pc, 0); |
| } |
| break; |
| default: |
| g_assert_not_reached(); |
| } |
| } |
| |
| static void xtensa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) |
| { |
| qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); |
| log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); |
| } |
| |
| static const TranslatorOps xtensa_translator_ops = { |
| .init_disas_context = xtensa_tr_init_disas_context, |
| .tb_start = xtensa_tr_tb_start, |
| .insn_start = xtensa_tr_insn_start, |
| .breakpoint_check = xtensa_tr_breakpoint_check, |
| .translate_insn = xtensa_tr_translate_insn, |
| .tb_stop = xtensa_tr_tb_stop, |
| .disas_log = xtensa_tr_disas_log, |
| }; |
| |
| void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns) |
| { |
| DisasContext dc = {}; |
| translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb, max_insns); |
| } |
| |
| void xtensa_cpu_dump_state(CPUState *cs, FILE *f, int flags) |
| { |
| XtensaCPU *cpu = XTENSA_CPU(cs); |
| CPUXtensaState *env = &cpu->env; |
| xtensa_isa isa = env->config->isa; |
| int i, j; |
| |
| qemu_fprintf(f, "PC=%08x\n\n", env->pc); |
| |
| for (i = j = 0; i < xtensa_isa_num_sysregs(isa); ++i) { |
| const uint32_t *reg = |
| xtensa_sysreg_is_user(isa, i) ? env->uregs : env->sregs; |
| int regno = xtensa_sysreg_number(isa, i); |
| |
| if (regno >= 0) { |
| qemu_fprintf(f, "%12s=%08x%c", |
| xtensa_sysreg_name(isa, i), |
| reg[regno], |
| (j++ % 4) == 3 ? '\n' : ' '); |
| } |
| } |
| |
| qemu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n"); |
| |
| for (i = 0; i < 16; ++i) { |
| qemu_fprintf(f, " A%02d=%08x%c", |
| i, env->regs[i], (i % 4) == 3 ? '\n' : ' '); |
| } |
| |
| xtensa_sync_phys_from_window(env); |
| qemu_fprintf(f, "\n"); |
| |
| for (i = 0; i < env->config->nareg; ++i) { |
| qemu_fprintf(f, "AR%02d=%08x ", i, env->phys_regs[i]); |
| if (i % 4 == 3) { |
| bool ws = (env->sregs[WINDOW_START] & (1 << (i / 4))) != 0; |
| bool cw = env->sregs[WINDOW_BASE] == i / 4; |
| |
| qemu_fprintf(f, "%c%c\n", ws ? '<' : ' ', cw ? '=' : ' '); |
| } |
| } |
| |
| if ((flags & CPU_DUMP_FPU) && |
| xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) { |
| qemu_fprintf(f, "\n"); |
| |
| for (i = 0; i < 16; ++i) { |
| qemu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i, |
| float32_val(env->fregs[i].f32[FP_F32_LOW]), |
| *(float *)(env->fregs[i].f32 + FP_F32_LOW), |
| (i % 2) == 1 ? '\n' : ' '); |
| } |
| } |
| } |
| |
| void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, |
| target_ulong *data) |
| { |
| env->pc = data[0]; |
| } |
| |
| static void translate_abs(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_abs_i32(arg[0].out, arg[1].in); |
| } |
| |
| static void translate_add(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_add_i32(arg[0].out, arg[1].in, arg[2].in); |
| } |
| |
| static void translate_addi(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_addi_i32(arg[0].out, arg[1].in, arg[2].imm); |
| } |
| |
| static void translate_addx(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_shli_i32(tmp, arg[1].in, par[0]); |
| tcg_gen_add_i32(arg[0].out, tmp, arg[2].in); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_all(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| uint32_t shift = par[1]; |
| TCGv_i32 mask = tcg_const_i32(((1 << shift) - 1) << arg[1].imm); |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| |
| tcg_gen_and_i32(tmp, arg[1].in, mask); |
| if (par[0]) { |
| tcg_gen_addi_i32(tmp, tmp, 1 << arg[1].imm); |
| } else { |
| tcg_gen_add_i32(tmp, tmp, mask); |
| } |
| tcg_gen_shri_i32(tmp, tmp, arg[1].imm + shift); |
| tcg_gen_deposit_i32(arg[0].out, arg[0].out, |
| tmp, arg[0].imm, 1); |
| tcg_temp_free(mask); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_and(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_and_i32(arg[0].out, arg[1].in, arg[2].in); |
| } |
| |
| static void translate_ball(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_and_i32(tmp, arg[0].in, arg[1].in); |
| gen_brcond(dc, par[0], tmp, arg[1].in, arg[2].imm); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_bany(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_and_i32(tmp, arg[0].in, arg[1].in); |
| gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_b(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_brcond(dc, par[0], arg[0].in, arg[1].in, arg[2].imm); |
| } |
| |
| static void translate_bb(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifdef TARGET_WORDS_BIGENDIAN |
| TCGv_i32 bit = tcg_const_i32(0x80000000u); |
| #else |
| TCGv_i32 bit = tcg_const_i32(0x00000001u); |
| #endif |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_andi_i32(tmp, arg[1].in, 0x1f); |
| #ifdef TARGET_WORDS_BIGENDIAN |
| tcg_gen_shr_i32(bit, bit, tmp); |
| #else |
| tcg_gen_shl_i32(bit, bit, tmp); |
| #endif |
| tcg_gen_and_i32(tmp, arg[0].in, bit); |
| gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); |
| tcg_temp_free(tmp); |
| tcg_temp_free(bit); |
| } |
| |
| static void translate_bbi(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| #ifdef TARGET_WORDS_BIGENDIAN |
| tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm); |
| #else |
| tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm); |
| #endif |
| gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_bi(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_brcondi(dc, par[0], arg[0].in, arg[1].imm, arg[2].imm); |
| } |
| |
| static void translate_bz(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_brcondi(dc, par[0], arg[0].in, 0, arg[1].imm); |
| } |
| |
| enum { |
| BOOLEAN_AND, |
| BOOLEAN_ANDC, |
| BOOLEAN_OR, |
| BOOLEAN_ORC, |
| BOOLEAN_XOR, |
| }; |
| |
| static void translate_boolean(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| static void (* const op[])(TCGv_i32, TCGv_i32, TCGv_i32) = { |
| [BOOLEAN_AND] = tcg_gen_and_i32, |
| [BOOLEAN_ANDC] = tcg_gen_andc_i32, |
| [BOOLEAN_OR] = tcg_gen_or_i32, |
| [BOOLEAN_ORC] = tcg_gen_orc_i32, |
| [BOOLEAN_XOR] = tcg_gen_xor_i32, |
| }; |
| |
| TCGv_i32 tmp1 = tcg_temp_new_i32(); |
| TCGv_i32 tmp2 = tcg_temp_new_i32(); |
| |
| tcg_gen_shri_i32(tmp1, arg[1].in, arg[1].imm); |
| tcg_gen_shri_i32(tmp2, arg[2].in, arg[2].imm); |
| op[par[0]](tmp1, tmp1, tmp2); |
| tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp1, arg[0].imm, 1); |
| tcg_temp_free(tmp1); |
| tcg_temp_free(tmp2); |
| } |
| |
| static void translate_bp(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| |
| tcg_gen_andi_i32(tmp, arg[0].in, 1 << arg[0].imm); |
| gen_brcondi(dc, par[0], tmp, 0, arg[1].imm); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_call0(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next); |
| gen_jumpi(dc, arg[0].imm, 0); |
| } |
| |
| static void translate_callw(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_const_i32(arg[0].imm); |
| gen_callw_slot(dc, par[0], tmp, adjust_jump_slot(dc, arg[0].imm, 0)); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_callx0(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_mov_i32(tmp, arg[0].in); |
| tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next); |
| gen_jump(dc, tmp); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_callxw(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| |
| tcg_gen_mov_i32(tmp, arg[0].in); |
| gen_callw_slot(dc, par[0], tmp, -1); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_clamps(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp1 = tcg_const_i32(-1u << arg[2].imm); |
| TCGv_i32 tmp2 = tcg_const_i32((1 << arg[2].imm) - 1); |
| |
| tcg_gen_smax_i32(tmp1, tmp1, arg[1].in); |
| tcg_gen_smin_i32(arg[0].out, tmp1, tmp2); |
| tcg_temp_free(tmp1); |
| tcg_temp_free(tmp2); |
| } |
| |
| static void translate_clrb_expstate(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| /* TODO: GPIO32 may be a part of coprocessor */ |
| tcg_gen_andi_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], ~(1u << arg[0].imm)); |
| } |
| |
| static void translate_clrex(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_movi_i32(cpu_exclusive_addr, -1); |
| } |
| |
| static void translate_const16(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 c = tcg_const_i32(arg[1].imm); |
| |
| tcg_gen_deposit_i32(arg[0].out, c, arg[0].in, 16, 16); |
| tcg_temp_free(c); |
| } |
| |
| static void translate_dcache(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 addr = tcg_temp_new_i32(); |
| TCGv_i32 res = tcg_temp_new_i32(); |
| |
| tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); |
| tcg_gen_qemu_ld8u(res, addr, dc->cring); |
| tcg_temp_free(addr); |
| tcg_temp_free(res); |
| } |
| |
| static void translate_depbits(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_deposit_i32(arg[1].out, arg[1].in, arg[0].in, |
| arg[2].imm, arg[3].imm); |
| } |
| |
| static void translate_diwbuip(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_addi_i32(arg[0].out, arg[0].in, dc->config->dcache_line_bytes); |
| } |
| |
| static bool test_ill_entry(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| if (arg[0].imm > 3 || !dc->cwoe) { |
| qemu_log_mask(LOG_GUEST_ERROR, |
| "Illegal entry instruction(pc = %08x)\n", dc->pc); |
| return true; |
| } else { |
| return false; |
| } |
| } |
| |
| static uint32_t test_overflow_entry(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| return 1 << (dc->callinc * 4); |
| } |
| |
| static void translate_entry(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 pc = tcg_const_i32(dc->pc); |
| TCGv_i32 s = tcg_const_i32(arg[0].imm); |
| TCGv_i32 imm = tcg_const_i32(arg[1].imm); |
| gen_helper_entry(cpu_env, pc, s, imm); |
| tcg_temp_free(imm); |
| tcg_temp_free(s); |
| tcg_temp_free(pc); |
| } |
| |
| static void translate_extui(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| int maskimm = (1 << arg[3].imm) - 1; |
| |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_shri_i32(tmp, arg[1].in, arg[2].imm); |
| tcg_gen_andi_i32(arg[0].out, tmp, maskimm); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_getex(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| |
| tcg_gen_extract_i32(tmp, cpu_SR[ATOMCTL], 8, 1); |
| tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], arg[0].in, 8, 1); |
| tcg_gen_mov_i32(arg[0].out, tmp); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_icache(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| TCGv_i32 addr = tcg_temp_new_i32(); |
| |
| tcg_gen_movi_i32(cpu_pc, dc->pc); |
| tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); |
| gen_helper_itlb_hit_test(cpu_env, addr); |
| tcg_temp_free(addr); |
| #endif |
| } |
| |
| static void translate_itlb(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| TCGv_i32 dtlb = tcg_const_i32(par[0]); |
| |
| gen_helper_itlb(cpu_env, arg[0].in, dtlb); |
| tcg_temp_free(dtlb); |
| #endif |
| } |
| |
| static void translate_j(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_jumpi(dc, arg[0].imm, 0); |
| } |
| |
| static void translate_jx(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_jump(dc, arg[0].in); |
| } |
| |
| static void translate_l32e(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 addr = tcg_temp_new_i32(); |
| |
| tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); |
| gen_load_store_alignment(dc, 2, addr, false); |
| tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL); |
| tcg_temp_free(addr); |
| } |
| |
| #ifdef CONFIG_USER_ONLY |
| static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write) |
| { |
| } |
| #else |
| static void gen_check_exclusive(DisasContext *dc, TCGv_i32 addr, bool is_write) |
| { |
| if (!option_enabled(dc, XTENSA_OPTION_MPU)) { |
| TCGv_i32 tpc = tcg_const_i32(dc->pc); |
| TCGv_i32 write = tcg_const_i32(is_write); |
| |
| gen_helper_check_exclusive(cpu_env, tpc, addr, write); |
| tcg_temp_free(tpc); |
| tcg_temp_free(write); |
| } |
| } |
| #endif |
| |
| static void translate_l32ex(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 addr = tcg_temp_new_i32(); |
| |
| tcg_gen_mov_i32(addr, arg[1].in); |
| gen_load_store_alignment(dc, 2, addr, true); |
| gen_check_exclusive(dc, addr, false); |
| tcg_gen_qemu_ld_i32(arg[0].out, addr, dc->ring, MO_TEUL); |
| tcg_gen_mov_i32(cpu_exclusive_addr, addr); |
| tcg_gen_mov_i32(cpu_exclusive_val, arg[0].out); |
| tcg_temp_free(addr); |
| } |
| |
| static void translate_ldst(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 addr = tcg_temp_new_i32(); |
| |
| tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); |
| if (par[0] & MO_SIZE) { |
| gen_load_store_alignment(dc, par[0] & MO_SIZE, addr, par[1]); |
| } |
| if (par[2]) { |
| if (par[1]) { |
| tcg_gen_mb(TCG_BAR_STRL | TCG_MO_ALL); |
| } |
| tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, par[0]); |
| } else { |
| tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, par[0]); |
| if (par[1]) { |
| tcg_gen_mb(TCG_BAR_LDAQ | TCG_MO_ALL); |
| } |
| } |
| tcg_temp_free(addr); |
| } |
| |
| static void translate_l32r(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp; |
| |
| if (dc->base.tb->flags & XTENSA_TBFLAG_LITBASE) { |
| tmp = tcg_const_i32(arg[1].raw_imm - 1); |
| tcg_gen_add_i32(tmp, cpu_SR[LITBASE], tmp); |
| } else { |
| tmp = tcg_const_i32(arg[1].imm); |
| } |
| tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_loop(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| uint32_t lend = arg[1].imm; |
| |
| tcg_gen_subi_i32(cpu_SR[LCOUNT], arg[0].in, 1); |
| tcg_gen_movi_i32(cpu_SR[LBEG], dc->base.pc_next); |
| tcg_gen_movi_i32(cpu_SR[LEND], lend); |
| |
| if (par[0] != TCG_COND_NEVER) { |
| TCGLabel *label = gen_new_label(); |
| tcg_gen_brcondi_i32(par[0], arg[0].in, 0, label); |
| gen_jumpi(dc, lend, 1); |
| gen_set_label(label); |
| } |
| |
| gen_jumpi(dc, dc->base.pc_next, 0); |
| } |
| |
| enum { |
| MAC16_UMUL, |
| MAC16_MUL, |
| MAC16_MULA, |
| MAC16_MULS, |
| MAC16_NONE, |
| }; |
| |
| enum { |
| MAC16_LL, |
| MAC16_HL, |
| MAC16_LH, |
| MAC16_HH, |
| |
| MAC16_HX = 0x1, |
| MAC16_XH = 0x2, |
| }; |
| |
| static void translate_mac16(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| int op = par[0]; |
| unsigned half = par[1]; |
| uint32_t ld_offset = par[2]; |
| unsigned off = ld_offset ? 2 : 0; |
| TCGv_i32 vaddr = tcg_temp_new_i32(); |
| TCGv_i32 mem32 = tcg_temp_new_i32(); |
| |
| if (ld_offset) { |
| tcg_gen_addi_i32(vaddr, arg[1].in, ld_offset); |
| gen_load_store_alignment(dc, 2, vaddr, false); |
| tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring); |
| } |
| if (op != MAC16_NONE) { |
| TCGv_i32 m1 = gen_mac16_m(arg[off].in, |
| half & MAC16_HX, op == MAC16_UMUL); |
| TCGv_i32 m2 = gen_mac16_m(arg[off + 1].in, |
| half & MAC16_XH, op == MAC16_UMUL); |
| |
| if (op == MAC16_MUL || op == MAC16_UMUL) { |
| tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2); |
| if (op == MAC16_UMUL) { |
| tcg_gen_movi_i32(cpu_SR[ACCHI], 0); |
| } else { |
| tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31); |
| } |
| } else { |
| TCGv_i32 lo = tcg_temp_new_i32(); |
| TCGv_i32 hi = tcg_temp_new_i32(); |
| |
| tcg_gen_mul_i32(lo, m1, m2); |
| tcg_gen_sari_i32(hi, lo, 31); |
| if (op == MAC16_MULA) { |
| tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI], |
| cpu_SR[ACCLO], cpu_SR[ACCHI], |
| lo, hi); |
| } else { |
| tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI], |
| cpu_SR[ACCLO], cpu_SR[ACCHI], |
| lo, hi); |
| } |
| tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); |
| |
| tcg_temp_free_i32(lo); |
| tcg_temp_free_i32(hi); |
| } |
| tcg_temp_free(m1); |
| tcg_temp_free(m2); |
| } |
| if (ld_offset) { |
| tcg_gen_mov_i32(arg[1].out, vaddr); |
| tcg_gen_mov_i32(cpu_SR[MR + arg[0].imm], mem32); |
| } |
| tcg_temp_free(vaddr); |
| tcg_temp_free(mem32); |
| } |
| |
| static void translate_memw(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); |
| } |
| |
| static void translate_smin(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_smin_i32(arg[0].out, arg[1].in, arg[2].in); |
| } |
| |
| static void translate_umin(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_umin_i32(arg[0].out, arg[1].in, arg[2].in); |
| } |
| |
| static void translate_smax(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_smax_i32(arg[0].out, arg[1].in, arg[2].in); |
| } |
| |
| static void translate_umax(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_umax_i32(arg[0].out, arg[1].in, arg[2].in); |
| } |
| |
| static void translate_mov(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mov_i32(arg[0].out, arg[1].in); |
| } |
| |
| static void translate_movcond(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 zero = tcg_const_i32(0); |
| |
| tcg_gen_movcond_i32(par[0], arg[0].out, |
| arg[2].in, zero, arg[1].in, arg[0].in); |
| tcg_temp_free(zero); |
| } |
| |
| static void translate_movi(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_movi_i32(arg[0].out, arg[1].imm); |
| } |
| |
| static void translate_movp(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 zero = tcg_const_i32(0); |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| |
| tcg_gen_andi_i32(tmp, arg[2].in, 1 << arg[2].imm); |
| tcg_gen_movcond_i32(par[0], |
| arg[0].out, tmp, zero, |
| arg[1].in, arg[0].in); |
| tcg_temp_free(tmp); |
| tcg_temp_free(zero); |
| } |
| |
| static void translate_movsp(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mov_i32(arg[0].out, arg[1].in); |
| } |
| |
| static void translate_mul16(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 v1 = tcg_temp_new_i32(); |
| TCGv_i32 v2 = tcg_temp_new_i32(); |
| |
| if (par[0]) { |
| tcg_gen_ext16s_i32(v1, arg[1].in); |
| tcg_gen_ext16s_i32(v2, arg[2].in); |
| } else { |
| tcg_gen_ext16u_i32(v1, arg[1].in); |
| tcg_gen_ext16u_i32(v2, arg[2].in); |
| } |
| tcg_gen_mul_i32(arg[0].out, v1, v2); |
| tcg_temp_free(v2); |
| tcg_temp_free(v1); |
| } |
| |
| static void translate_mull(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mul_i32(arg[0].out, arg[1].in, arg[2].in); |
| } |
| |
| static void translate_mulh(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 lo = tcg_temp_new(); |
| |
| if (par[0]) { |
| tcg_gen_muls2_i32(lo, arg[0].out, arg[1].in, arg[2].in); |
| } else { |
| tcg_gen_mulu2_i32(lo, arg[0].out, arg[1].in, arg[2].in); |
| } |
| tcg_temp_free(lo); |
| } |
| |
| static void translate_neg(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_neg_i32(arg[0].out, arg[1].in); |
| } |
| |
| static void translate_nop(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| } |
| |
| static void translate_nsa(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_clrsb_i32(arg[0].out, arg[1].in); |
| } |
| |
| static void translate_nsau(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_clzi_i32(arg[0].out, arg[1].in, 32); |
| } |
| |
| static void translate_or(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_or_i32(arg[0].out, arg[1].in, arg[2].in); |
| } |
| |
| static void translate_ptlb(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| TCGv_i32 dtlb = tcg_const_i32(par[0]); |
| |
| tcg_gen_movi_i32(cpu_pc, dc->pc); |
| gen_helper_ptlb(arg[0].out, cpu_env, arg[1].in, dtlb); |
| tcg_temp_free(dtlb); |
| #endif |
| } |
| |
| static void translate_pptlb(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| tcg_gen_movi_i32(cpu_pc, dc->pc); |
| gen_helper_pptlb(arg[0].out, cpu_env, arg[1].in); |
| #endif |
| } |
| |
| static void translate_quos(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGLabel *label1 = gen_new_label(); |
| TCGLabel *label2 = gen_new_label(); |
| |
| tcg_gen_brcondi_i32(TCG_COND_NE, arg[1].in, 0x80000000, |
| label1); |
| tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0xffffffff, |
| label1); |
| tcg_gen_movi_i32(arg[0].out, |
| par[0] ? 0x80000000 : 0); |
| tcg_gen_br(label2); |
| gen_set_label(label1); |
| if (par[0]) { |
| tcg_gen_div_i32(arg[0].out, |
| arg[1].in, arg[2].in); |
| } else { |
| tcg_gen_rem_i32(arg[0].out, |
| arg[1].in, arg[2].in); |
| } |
| gen_set_label(label2); |
| } |
| |
| static void translate_quou(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_divu_i32(arg[0].out, |
| arg[1].in, arg[2].in); |
| } |
| |
| static void translate_read_impwire(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| /* TODO: GPIO32 may be a part of coprocessor */ |
| tcg_gen_movi_i32(arg[0].out, 0); |
| } |
| |
| static void translate_remu(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_remu_i32(arg[0].out, |
| arg[1].in, arg[2].in); |
| } |
| |
| static void translate_rer(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_helper_rer(arg[0].out, cpu_env, arg[1].in); |
| } |
| |
| static void translate_ret(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_jump(dc, cpu_R[0]); |
| } |
| |
| static bool test_ill_retw(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| if (!dc->cwoe) { |
| qemu_log_mask(LOG_GUEST_ERROR, |
| "Illegal retw instruction(pc = %08x)\n", dc->pc); |
| return true; |
| } else { |
| TCGv_i32 tmp = tcg_const_i32(dc->pc); |
| |
| gen_helper_test_ill_retw(cpu_env, tmp); |
| tcg_temp_free(tmp); |
| return false; |
| } |
| } |
| |
| static void translate_retw(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_const_i32(1); |
| tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); |
| tcg_gen_andc_i32(cpu_SR[WINDOW_START], |
| cpu_SR[WINDOW_START], tmp); |
| tcg_gen_movi_i32(tmp, dc->pc); |
| tcg_gen_deposit_i32(tmp, tmp, cpu_R[0], 0, 30); |
| gen_helper_retw(cpu_env, cpu_R[0]); |
| gen_jump(dc, tmp); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_rfde(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_jump(dc, cpu_SR[dc->config->ndepc ? DEPC : EPC1]); |
| } |
| |
| static void translate_rfe(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); |
| gen_jump(dc, cpu_SR[EPC1]); |
| } |
| |
| static void translate_rfi(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + arg[0].imm - 2]); |
| gen_jump(dc, cpu_SR[EPC1 + arg[0].imm - 1]); |
| } |
| |
| static void translate_rfw(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_const_i32(1); |
| |
| tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); |
| tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); |
| |
| if (par[0]) { |
| tcg_gen_andc_i32(cpu_SR[WINDOW_START], |
| cpu_SR[WINDOW_START], tmp); |
| } else { |
| tcg_gen_or_i32(cpu_SR[WINDOW_START], |
| cpu_SR[WINDOW_START], tmp); |
| } |
| |
| tcg_temp_free(tmp); |
| gen_helper_restore_owb(cpu_env); |
| gen_jump(dc, cpu_SR[EPC1]); |
| } |
| |
| static void translate_rotw(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_addi_i32(cpu_windowbase_next, cpu_SR[WINDOW_BASE], arg[0].imm); |
| } |
| |
| static void translate_rsil(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mov_i32(arg[0].out, cpu_SR[PS]); |
| tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL); |
| tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], arg[1].imm); |
| } |
| |
| static void translate_rsr(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]); |
| } |
| |
| static void translate_rsr_ccount(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { |
| gen_io_start(); |
| } |
| gen_helper_update_ccount(cpu_env); |
| tcg_gen_mov_i32(arg[0].out, cpu_SR[par[0]]); |
| #endif |
| } |
| |
| static void translate_rsr_ptevaddr(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| |
| tcg_gen_shri_i32(tmp, cpu_SR[EXCVADDR], 10); |
| tcg_gen_or_i32(tmp, tmp, cpu_SR[PTEVADDR]); |
| tcg_gen_andi_i32(arg[0].out, tmp, 0xfffffffc); |
| tcg_temp_free(tmp); |
| #endif |
| } |
| |
| static void translate_rtlb(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| static void (* const helper[])(TCGv_i32 r, TCGv_env env, TCGv_i32 a1, |
| TCGv_i32 a2) = { |
| gen_helper_rtlb0, |
| gen_helper_rtlb1, |
| }; |
| TCGv_i32 dtlb = tcg_const_i32(par[0]); |
| |
| helper[par[1]](arg[0].out, cpu_env, arg[1].in, dtlb); |
| tcg_temp_free(dtlb); |
| #endif |
| } |
| |
| static void translate_rptlb0(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_rptlb0(arg[0].out, cpu_env, arg[1].in); |
| #endif |
| } |
| |
| static void translate_rptlb1(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_rptlb1(arg[0].out, cpu_env, arg[1].in); |
| #endif |
| } |
| |
| static void translate_rur(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mov_i32(arg[0].out, cpu_UR[par[0]]); |
| } |
| |
| static void translate_setb_expstate(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| /* TODO: GPIO32 may be a part of coprocessor */ |
| tcg_gen_ori_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], 1u << arg[0].imm); |
| } |
| |
| #ifdef CONFIG_USER_ONLY |
| static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr) |
| { |
| } |
| #else |
| static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr) |
| { |
| TCGv_i32 tpc = tcg_const_i32(dc->pc); |
| |
| gen_helper_check_atomctl(cpu_env, tpc, addr); |
| tcg_temp_free(tpc); |
| } |
| #endif |
| |
| static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_local_new_i32(); |
| TCGv_i32 addr = tcg_temp_local_new_i32(); |
| |
| tcg_gen_mov_i32(tmp, arg[0].in); |
| tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); |
| gen_load_store_alignment(dc, 2, addr, true); |
| gen_check_atomctl(dc, addr); |
| tcg_gen_atomic_cmpxchg_i32(arg[0].out, addr, cpu_SR[SCOMPARE1], |
| tmp, dc->cring, MO_TEUL); |
| tcg_temp_free(addr); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_s32e(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 addr = tcg_temp_new_i32(); |
| |
| tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); |
| gen_load_store_alignment(dc, 2, addr, false); |
| tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, MO_TEUL); |
| tcg_temp_free(addr); |
| } |
| |
| static void translate_s32ex(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 prev = tcg_temp_new_i32(); |
| TCGv_i32 addr = tcg_temp_local_new_i32(); |
| TCGv_i32 res = tcg_temp_local_new_i32(); |
| TCGLabel *label = gen_new_label(); |
| |
| tcg_gen_movi_i32(res, 0); |
| tcg_gen_mov_i32(addr, arg[1].in); |
| gen_load_store_alignment(dc, 2, addr, true); |
| tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, label); |
| gen_check_exclusive(dc, addr, true); |
| tcg_gen_atomic_cmpxchg_i32(prev, cpu_exclusive_addr, cpu_exclusive_val, |
| arg[0].in, dc->cring, MO_TEUL); |
| tcg_gen_setcond_i32(TCG_COND_EQ, res, prev, cpu_exclusive_val); |
| tcg_gen_movcond_i32(TCG_COND_EQ, cpu_exclusive_val, |
| prev, cpu_exclusive_val, prev, cpu_exclusive_val); |
| tcg_gen_movi_i32(cpu_exclusive_addr, -1); |
| gen_set_label(label); |
| tcg_gen_extract_i32(arg[0].out, cpu_SR[ATOMCTL], 8, 1); |
| tcg_gen_deposit_i32(cpu_SR[ATOMCTL], cpu_SR[ATOMCTL], res, 8, 1); |
| tcg_temp_free(prev); |
| tcg_temp_free(addr); |
| tcg_temp_free(res); |
| } |
| |
| static void translate_salt(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_setcond_i32(par[0], |
| arg[0].out, |
| arg[1].in, arg[2].in); |
| } |
| |
| static void translate_sext(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| int shift = 31 - arg[2].imm; |
| |
| if (shift == 24) { |
| tcg_gen_ext8s_i32(arg[0].out, arg[1].in); |
| } else if (shift == 16) { |
| tcg_gen_ext16s_i32(arg[0].out, arg[1].in); |
| } else { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_shli_i32(tmp, arg[1].in, shift); |
| tcg_gen_sari_i32(arg[0].out, tmp, shift); |
| tcg_temp_free(tmp); |
| } |
| } |
| |
| static bool test_ill_simcall(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifdef CONFIG_USER_ONLY |
| bool ill = true; |
| #else |
| bool ill = !semihosting_enabled(); |
| #endif |
| if (ill) { |
| qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n"); |
| } |
| return ill; |
| } |
| |
| static void translate_simcall(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_simcall(cpu_env); |
| #endif |
| } |
| |
| /* |
| * Note: 64 bit ops are used here solely because SAR values |
| * have range 0..63 |
| */ |
| #define gen_shift_reg(cmd, reg) do { \ |
| TCGv_i64 tmp = tcg_temp_new_i64(); \ |
| tcg_gen_extu_i32_i64(tmp, reg); \ |
| tcg_gen_##cmd##_i64(v, v, tmp); \ |
| tcg_gen_extrl_i64_i32(arg[0].out, v); \ |
| tcg_temp_free_i64(v); \ |
| tcg_temp_free_i64(tmp); \ |
| } while (0) |
| |
| #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR]) |
| |
| static void translate_sll(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| if (dc->sar_m32_5bit) { |
| tcg_gen_shl_i32(arg[0].out, arg[1].in, dc->sar_m32); |
| } else { |
| TCGv_i64 v = tcg_temp_new_i64(); |
| TCGv_i32 s = tcg_const_i32(32); |
| tcg_gen_sub_i32(s, s, cpu_SR[SAR]); |
| tcg_gen_andi_i32(s, s, 0x3f); |
| tcg_gen_extu_i32_i64(v, arg[1].in); |
| gen_shift_reg(shl, s); |
| tcg_temp_free(s); |
| } |
| } |
| |
| static void translate_slli(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| if (arg[2].imm == 32) { |
| qemu_log_mask(LOG_GUEST_ERROR, "slli a%d, a%d, 32 is undefined\n", |
| arg[0].imm, arg[1].imm); |
| } |
| tcg_gen_shli_i32(arg[0].out, arg[1].in, arg[2].imm & 0x1f); |
| } |
| |
| static void translate_sra(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| if (dc->sar_m32_5bit) { |
| tcg_gen_sar_i32(arg[0].out, arg[1].in, cpu_SR[SAR]); |
| } else { |
| TCGv_i64 v = tcg_temp_new_i64(); |
| tcg_gen_ext_i32_i64(v, arg[1].in); |
| gen_shift(sar); |
| } |
| } |
| |
| static void translate_srai(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_sari_i32(arg[0].out, arg[1].in, arg[2].imm); |
| } |
| |
| static void translate_src(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i64 v = tcg_temp_new_i64(); |
| tcg_gen_concat_i32_i64(v, arg[2].in, arg[1].in); |
| gen_shift(shr); |
| } |
| |
| static void translate_srl(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| if (dc->sar_m32_5bit) { |
| tcg_gen_shr_i32(arg[0].out, arg[1].in, cpu_SR[SAR]); |
| } else { |
| TCGv_i64 v = tcg_temp_new_i64(); |
| tcg_gen_extu_i32_i64(v, arg[1].in); |
| gen_shift(shr); |
| } |
| } |
| |
| #undef gen_shift |
| #undef gen_shift_reg |
| |
| static void translate_srli(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_shri_i32(arg[0].out, arg[1].in, arg[2].imm); |
| } |
| |
| static void translate_ssa8b(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_shli_i32(tmp, arg[0].in, 3); |
| gen_left_shift_sar(dc, tmp); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_shli_i32(tmp, arg[0].in, 3); |
| gen_right_shift_sar(dc, tmp); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_ssai(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_const_i32(arg[0].imm); |
| gen_right_shift_sar(dc, tmp); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_ssl(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_left_shift_sar(dc, arg[0].in); |
| } |
| |
| static void translate_ssr(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_right_shift_sar(dc, arg[0].in); |
| } |
| |
| static void translate_sub(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_sub_i32(arg[0].out, arg[1].in, arg[2].in); |
| } |
| |
| static void translate_subx(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| TCGv_i32 tmp = tcg_temp_new_i32(); |
| tcg_gen_shli_i32(tmp, arg[1].in, par[0]); |
| tcg_gen_sub_i32(arg[0].out, tmp, arg[2].in); |
| tcg_temp_free(tmp); |
| } |
| |
| static void translate_waiti(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_waiti(dc, arg[0].imm); |
| #endif |
| } |
| |
| static void translate_wtlb(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| TCGv_i32 dtlb = tcg_const_i32(par[0]); |
| |
| gen_helper_wtlb(cpu_env, arg[0].in, arg[1].in, dtlb); |
| tcg_temp_free(dtlb); |
| #endif |
| } |
| |
| static void translate_wptlb(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_wptlb(cpu_env, arg[0].in, arg[1].in); |
| #endif |
| } |
| |
| static void translate_wer(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_helper_wer(cpu_env, arg[0].in, arg[1].in); |
| } |
| |
| static void translate_wrmsk_expstate(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| /* TODO: GPIO32 may be a part of coprocessor */ |
| tcg_gen_and_i32(cpu_UR[EXPSTATE], arg[0].in, arg[1].in); |
| } |
| |
| static void translate_wsr(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in); |
| } |
| |
| static void translate_wsr_mask(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, par[2]); |
| } |
| |
| static void translate_wsr_acchi(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_ext8s_i32(cpu_SR[par[0]], arg[0].in); |
| } |
| |
| static void translate_wsr_ccompare(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| uint32_t id = par[0] - CCOMPARE; |
| TCGv_i32 tmp = tcg_const_i32(id); |
| |
| assert(id < dc->config->nccompare); |
| if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { |
| gen_io_start(); |
| } |
| tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in); |
| gen_helper_update_ccompare(cpu_env, tmp); |
| tcg_temp_free(tmp); |
| #endif |
| } |
| |
| static void translate_wsr_ccount(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { |
| gen_io_start(); |
| } |
| gen_helper_wsr_ccount(cpu_env, arg[0].in); |
| #endif |
| } |
| |
| static void translate_wsr_dbreaka(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| unsigned id = par[0] - DBREAKA; |
| TCGv_i32 tmp = tcg_const_i32(id); |
| |
| assert(id < dc->config->ndbreak); |
| gen_helper_wsr_dbreaka(cpu_env, tmp, arg[0].in); |
| tcg_temp_free(tmp); |
| #endif |
| } |
| |
| static void translate_wsr_dbreakc(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| unsigned id = par[0] - DBREAKC; |
| TCGv_i32 tmp = tcg_const_i32(id); |
| |
| assert(id < dc->config->ndbreak); |
| gen_helper_wsr_dbreakc(cpu_env, tmp, arg[0].in); |
| tcg_temp_free(tmp); |
| #endif |
| } |
| |
| static void translate_wsr_ibreaka(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| unsigned id = par[0] - IBREAKA; |
| TCGv_i32 tmp = tcg_const_i32(id); |
| |
| assert(id < dc->config->nibreak); |
| gen_helper_wsr_ibreaka(cpu_env, tmp, arg[0].in); |
| tcg_temp_free(tmp); |
| #endif |
| } |
| |
| static void translate_wsr_ibreakenable(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_wsr_ibreakenable(cpu_env, arg[0].in); |
| #endif |
| } |
| |
| static void translate_wsr_icount(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| if (dc->icount) { |
| tcg_gen_mov_i32(dc->next_icount, arg[0].in); |
| } else { |
| tcg_gen_mov_i32(cpu_SR[par[0]], arg[0].in); |
| } |
| #endif |
| } |
| |
| static void translate_wsr_intclear(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_intclear(cpu_env, arg[0].in); |
| #endif |
| } |
| |
| static void translate_wsr_intset(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_intset(cpu_env, arg[0].in); |
| #endif |
| } |
| |
| static void translate_wsr_memctl(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_wsr_memctl(cpu_env, arg[0].in); |
| #endif |
| } |
| |
| static void translate_wsr_mpuenb(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_wsr_mpuenb(cpu_env, arg[0].in); |
| #endif |
| } |
| |
| static void translate_wsr_ps(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB | |
| PS_UM | PS_EXCM | PS_INTLEVEL; |
| |
| if (option_enabled(dc, XTENSA_OPTION_MMU) || |
| option_enabled(dc, XTENSA_OPTION_MPU)) { |
| mask |= PS_RING; |
| } |
| tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, mask); |
| #endif |
| } |
| |
| static void translate_wsr_rasid(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| gen_helper_wsr_rasid(cpu_env, arg[0].in); |
| #endif |
| } |
| |
| static void translate_wsr_sar(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, 0x3f); |
| if (dc->sar_m32_5bit) { |
| tcg_gen_discard_i32(dc->sar_m32); |
| } |
| dc->sar_5bit = false; |
| dc->sar_m32_5bit = false; |
| } |
| |
| static void translate_wsr_windowbase(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| tcg_gen_mov_i32(cpu_windowbase_next, arg[0].in); |
| #endif |
| } |
| |
| static void translate_wsr_windowstart(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| #ifndef CONFIG_USER_ONLY |
| tcg_gen_andi_i32(cpu_SR[par[0]], arg[0].in, |
| (1 << dc->config->nareg / 4) - 1); |
| #endif |
| } |
| |
| static void translate_wur(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_mov_i32(cpu_UR[par[0]], arg[0].in); |
| } |
| |
| static void translate_wur_fcr(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| gen_helper_wur_fcr(cpu_env, arg[0].in); |
| } |
| |
| static void translate_wur_fsr(DisasContext *dc, const OpcodeArg arg[], |
| const uint32_t par[]) |
| { |
| tcg_gen_andi_i32(cpu_UR[par[0]], arg[0].in, 0xffffff80); |
| } |
| |
| static void translate_xor(DisasContext *dc, const OpcodeArg arg
|