| /* |
| * PowerPC emulation for qemu: main translation routines. |
| * |
| * Copyright (c) 2003-2007 Jocelyn Mayer |
| * Copyright (C) 2011 Freescale Semiconductor, Inc. |
| * |
| * This library is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU Lesser General Public |
| * License as published by the Free Software Foundation; either |
| * version 2.1 of the License, or (at your option) any later version. |
| * |
| * This library is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| * Lesser General Public License for more details. |
| * |
| * You should have received a copy of the GNU Lesser General Public |
| * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| */ |
| |
| #include "qemu/osdep.h" |
| #include "cpu.h" |
| #include "internal.h" |
| #include "disas/disas.h" |
| #include "exec/exec-all.h" |
| #include "tcg/tcg-op.h" |
| #include "tcg/tcg-op-gvec.h" |
| #include "qemu/host-utils.h" |
| #include "qemu/main-loop.h" |
| #include "exec/cpu_ldst.h" |
| |
| #include "exec/helper-proto.h" |
| #include "exec/helper-gen.h" |
| |
| #include "exec/translator.h" |
| #include "exec/log.h" |
| #include "qemu/atomic128.h" |
| #include "spr_common.h" |
| |
| #include "qemu/qemu-print.h" |
| #include "qapi/error.h" |
| |
| #define CPU_SINGLE_STEP 0x1 |
| #define CPU_BRANCH_STEP 0x2 |
| |
| /* Include definitions for instructions classes and implementations flags */ |
| /* #define PPC_DEBUG_DISAS */ |
| |
| #ifdef PPC_DEBUG_DISAS |
| # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) |
| #else |
| # define LOG_DISAS(...) do { } while (0) |
| #endif |
| /*****************************************************************************/ |
| /* Code translation helpers */ |
| |
| /* global register indexes */ |
| static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */ |
| + 10 * 4 + 22 * 5 /* SPE GPRh */ |
| + 8 * 5 /* CRF */]; |
| static TCGv cpu_gpr[32]; |
| static TCGv cpu_gprh[32]; |
| static TCGv_i32 cpu_crf[8]; |
| static TCGv cpu_nip; |
| static TCGv cpu_msr; |
| static TCGv cpu_ctr; |
| static TCGv cpu_lr; |
| #if defined(TARGET_PPC64) |
| static TCGv cpu_cfar; |
| #endif |
| static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32; |
| static TCGv cpu_reserve; |
| static TCGv cpu_reserve_val; |
| static TCGv cpu_fpscr; |
| static TCGv_i32 cpu_access_type; |
| |
| #include "exec/gen-icount.h" |
| |
| void ppc_translate_init(void) |
| { |
| int i; |
| char *p; |
| size_t cpu_reg_names_size; |
| |
| p = cpu_reg_names; |
| cpu_reg_names_size = sizeof(cpu_reg_names); |
| |
| for (i = 0; i < 8; i++) { |
| snprintf(p, cpu_reg_names_size, "crf%d", i); |
| cpu_crf[i] = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUPPCState, crf[i]), p); |
| p += 5; |
| cpu_reg_names_size -= 5; |
| } |
| |
| for (i = 0; i < 32; i++) { |
| snprintf(p, cpu_reg_names_size, "r%d", i); |
| cpu_gpr[i] = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, gpr[i]), p); |
| p += (i < 10) ? 3 : 4; |
| cpu_reg_names_size -= (i < 10) ? 3 : 4; |
| snprintf(p, cpu_reg_names_size, "r%dH", i); |
| cpu_gprh[i] = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, gprh[i]), p); |
| p += (i < 10) ? 4 : 5; |
| cpu_reg_names_size -= (i < 10) ? 4 : 5; |
| } |
| |
| cpu_nip = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, nip), "nip"); |
| |
| cpu_msr = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, msr), "msr"); |
| |
| cpu_ctr = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, ctr), "ctr"); |
| |
| cpu_lr = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, lr), "lr"); |
| |
| #if defined(TARGET_PPC64) |
| cpu_cfar = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, cfar), "cfar"); |
| #endif |
| |
| cpu_xer = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, xer), "xer"); |
| cpu_so = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, so), "SO"); |
| cpu_ov = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, ov), "OV"); |
| cpu_ca = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, ca), "CA"); |
| cpu_ov32 = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, ov32), "OV32"); |
| cpu_ca32 = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, ca32), "CA32"); |
| |
| cpu_reserve = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, reserve_addr), |
| "reserve_addr"); |
| cpu_reserve_val = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, reserve_val), |
| "reserve_val"); |
| |
| cpu_fpscr = tcg_global_mem_new(cpu_env, |
| offsetof(CPUPPCState, fpscr), "fpscr"); |
| |
| cpu_access_type = tcg_global_mem_new_i32(cpu_env, |
| offsetof(CPUPPCState, access_type), |
| "access_type"); |
| } |
| |
| /* internal defines */ |
| struct DisasContext { |
| DisasContextBase base; |
| target_ulong cia; /* current instruction address */ |
| uint32_t opcode; |
| /* Routine used to access memory */ |
| bool pr, hv, dr, le_mode; |
| bool lazy_tlb_flush; |
| bool need_access_type; |
| int mem_idx; |
| int access_type; |
| /* Translation flags */ |
| MemOp default_tcg_memop_mask; |
| #if defined(TARGET_PPC64) |
| bool sf_mode; |
| bool has_cfar; |
| #endif |
| bool fpu_enabled; |
| bool altivec_enabled; |
| bool vsx_enabled; |
| bool spe_enabled; |
| bool tm_enabled; |
| bool gtse; |
| bool hr; |
| bool mmcr0_pmcc0; |
| bool mmcr0_pmcc1; |
| bool pmu_insn_cnt; |
| ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ |
| int singlestep_enabled; |
| uint32_t flags; |
| uint64_t insns_flags; |
| uint64_t insns_flags2; |
| }; |
| |
| #define DISAS_EXIT DISAS_TARGET_0 /* exit to main loop, pc updated */ |
| #define DISAS_EXIT_UPDATE DISAS_TARGET_1 /* exit to main loop, pc stale */ |
| #define DISAS_CHAIN DISAS_TARGET_2 /* lookup next tb, pc updated */ |
| #define DISAS_CHAIN_UPDATE DISAS_TARGET_3 /* lookup next tb, pc stale */ |
| |
| /* Return true iff byteswap is needed in a scalar memop */ |
| static inline bool need_byteswap(const DisasContext *ctx) |
| { |
| #if defined(TARGET_WORDS_BIGENDIAN) |
| return ctx->le_mode; |
| #else |
| return !ctx->le_mode; |
| #endif |
| } |
| |
| /* True when active word size < size of target_long. */ |
| #ifdef TARGET_PPC64 |
| # define NARROW_MODE(C) (!(C)->sf_mode) |
| #else |
| # define NARROW_MODE(C) 0 |
| #endif |
| |
| struct opc_handler_t { |
| /* invalid bits for instruction 1 (Rc(opcode) == 0) */ |
| uint32_t inval1; |
| /* invalid bits for instruction 2 (Rc(opcode) == 1) */ |
| uint32_t inval2; |
| /* instruction type */ |
| uint64_t type; |
| /* extended instruction type */ |
| uint64_t type2; |
| /* handler */ |
| void (*handler)(DisasContext *ctx); |
| }; |
| |
| /* SPR load/store helpers */ |
| static inline void gen_load_spr(TCGv t, int reg) |
| { |
| tcg_gen_ld_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg])); |
| } |
| |
| static inline void gen_store_spr(int reg, TCGv t) |
| { |
| tcg_gen_st_tl(t, cpu_env, offsetof(CPUPPCState, spr[reg])); |
| } |
| |
| static inline void gen_set_access_type(DisasContext *ctx, int access_type) |
| { |
| if (ctx->need_access_type && ctx->access_type != access_type) { |
| tcg_gen_movi_i32(cpu_access_type, access_type); |
| ctx->access_type = access_type; |
| } |
| } |
| |
| static inline void gen_update_nip(DisasContext *ctx, target_ulong nip) |
| { |
| if (NARROW_MODE(ctx)) { |
| nip = (uint32_t)nip; |
| } |
| tcg_gen_movi_tl(cpu_nip, nip); |
| } |
| |
| static void gen_exception_err(DisasContext *ctx, uint32_t excp, uint32_t error) |
| { |
| TCGv_i32 t0, t1; |
| |
| /* |
| * These are all synchronous exceptions, we set the PC back to the |
| * faulting instruction |
| */ |
| gen_update_nip(ctx, ctx->cia); |
| t0 = tcg_const_i32(excp); |
| t1 = tcg_const_i32(error); |
| gen_helper_raise_exception_err(cpu_env, t0, t1); |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| ctx->base.is_jmp = DISAS_NORETURN; |
| } |
| |
| static void gen_exception(DisasContext *ctx, uint32_t excp) |
| { |
| TCGv_i32 t0; |
| |
| /* |
| * These are all synchronous exceptions, we set the PC back to the |
| * faulting instruction |
| */ |
| gen_update_nip(ctx, ctx->cia); |
| t0 = tcg_const_i32(excp); |
| gen_helper_raise_exception(cpu_env, t0); |
| tcg_temp_free_i32(t0); |
| ctx->base.is_jmp = DISAS_NORETURN; |
| } |
| |
| static void gen_exception_nip(DisasContext *ctx, uint32_t excp, |
| target_ulong nip) |
| { |
| TCGv_i32 t0; |
| |
| gen_update_nip(ctx, nip); |
| t0 = tcg_const_i32(excp); |
| gen_helper_raise_exception(cpu_env, t0); |
| tcg_temp_free_i32(t0); |
| ctx->base.is_jmp = DISAS_NORETURN; |
| } |
| |
| static void gen_icount_io_start(DisasContext *ctx) |
| { |
| if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { |
| gen_io_start(); |
| /* |
| * An I/O instruction must be last in the TB. |
| * Chain to the next TB, and let the code from gen_tb_start |
| * decide if we need to return to the main loop. |
| * Doing this first also allows this value to be overridden. |
| */ |
| ctx->base.is_jmp = DISAS_TOO_MANY; |
| } |
| } |
| |
| /* |
| * Tells the caller what is the appropriate exception to generate and prepares |
| * SPR registers for this exception. |
| * |
| * The exception can be either POWERPC_EXCP_TRACE (on most PowerPCs) or |
| * POWERPC_EXCP_DEBUG (on BookE). |
| */ |
| static uint32_t gen_prep_dbgex(DisasContext *ctx) |
| { |
| if (ctx->flags & POWERPC_FLAG_DE) { |
| target_ulong dbsr = 0; |
| if (ctx->singlestep_enabled & CPU_SINGLE_STEP) { |
| dbsr = DBCR0_ICMP; |
| } else { |
| /* Must have been branch */ |
| dbsr = DBCR0_BRT; |
| } |
| TCGv t0 = tcg_temp_new(); |
| gen_load_spr(t0, SPR_BOOKE_DBSR); |
| tcg_gen_ori_tl(t0, t0, dbsr); |
| gen_store_spr(SPR_BOOKE_DBSR, t0); |
| tcg_temp_free(t0); |
| return POWERPC_EXCP_DEBUG; |
| } else { |
| return POWERPC_EXCP_TRACE; |
| } |
| } |
| |
| static void gen_debug_exception(DisasContext *ctx) |
| { |
| gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx))); |
| ctx->base.is_jmp = DISAS_NORETURN; |
| } |
| |
| static inline void gen_inval_exception(DisasContext *ctx, uint32_t error) |
| { |
| /* Will be converted to program check if needed */ |
| gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_INVAL | error); |
| } |
| |
| static inline void gen_priv_exception(DisasContext *ctx, uint32_t error) |
| { |
| gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_PRIV | error); |
| } |
| |
| static inline void gen_hvpriv_exception(DisasContext *ctx, uint32_t error) |
| { |
| /* Will be converted to program check if needed */ |
| gen_exception_err(ctx, POWERPC_EXCP_HV_EMU, POWERPC_EXCP_PRIV | error); |
| } |
| |
| /*****************************************************************************/ |
| /* SPR READ/WRITE CALLBACKS */ |
| |
| void spr_noaccess(DisasContext *ctx, int gprn, int sprn) |
| { |
| #if 0 |
| sprn = ((sprn >> 5) & 0x1F) | ((sprn & 0x1F) << 5); |
| printf("ERROR: try to access SPR %d !\n", sprn); |
| #endif |
| } |
| |
| /* #define PPC_DUMP_SPR_ACCESSES */ |
| |
| /* |
| * Generic callbacks: |
| * do nothing but store/retrieve spr value |
| */ |
| static void spr_load_dump_spr(int sprn) |
| { |
| #ifdef PPC_DUMP_SPR_ACCESSES |
| TCGv_i32 t0 = tcg_const_i32(sprn); |
| gen_helper_load_dump_spr(cpu_env, t0); |
| tcg_temp_free_i32(t0); |
| #endif |
| } |
| |
| void spr_read_generic(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_load_spr(cpu_gpr[gprn], sprn); |
| spr_load_dump_spr(sprn); |
| } |
| |
| static void spr_store_dump_spr(int sprn) |
| { |
| #ifdef PPC_DUMP_SPR_ACCESSES |
| TCGv_i32 t0 = tcg_const_i32(sprn); |
| gen_helper_store_dump_spr(cpu_env, t0); |
| tcg_temp_free_i32(t0); |
| #endif |
| } |
| |
| void spr_write_generic(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_store_spr(sprn, cpu_gpr[gprn]); |
| spr_store_dump_spr(sprn); |
| } |
| |
| void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn) |
| { |
| spr_write_generic(ctx, sprn, gprn); |
| |
| /* |
| * SPR_CTRL writes must force a new translation block, |
| * allowing the PMU to calculate the run latch events with |
| * more accuracy. |
| */ |
| ctx->base.is_jmp = DISAS_EXIT_UPDATE; |
| } |
| |
| #if !defined(CONFIG_USER_ONLY) |
| void spr_write_generic32(DisasContext *ctx, int sprn, int gprn) |
| { |
| #ifdef TARGET_PPC64 |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); |
| gen_store_spr(sprn, t0); |
| tcg_temp_free(t0); |
| spr_store_dump_spr(sprn); |
| #else |
| spr_write_generic(ctx, sprn, gprn); |
| #endif |
| } |
| |
| void spr_write_clear(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| TCGv t1 = tcg_temp_new(); |
| gen_load_spr(t0, sprn); |
| tcg_gen_neg_tl(t1, cpu_gpr[gprn]); |
| tcg_gen_and_tl(t0, t0, t1); |
| gen_store_spr(sprn, t0); |
| tcg_temp_free(t0); |
| tcg_temp_free(t1); |
| } |
| |
| void spr_access_nop(DisasContext *ctx, int sprn, int gprn) |
| { |
| } |
| |
| #endif |
| |
| /* SPR common to all PowerPC */ |
| /* XER */ |
| void spr_read_xer(DisasContext *ctx, int gprn, int sprn) |
| { |
| TCGv dst = cpu_gpr[gprn]; |
| TCGv t0 = tcg_temp_new(); |
| TCGv t1 = tcg_temp_new(); |
| TCGv t2 = tcg_temp_new(); |
| tcg_gen_mov_tl(dst, cpu_xer); |
| tcg_gen_shli_tl(t0, cpu_so, XER_SO); |
| tcg_gen_shli_tl(t1, cpu_ov, XER_OV); |
| tcg_gen_shli_tl(t2, cpu_ca, XER_CA); |
| tcg_gen_or_tl(t0, t0, t1); |
| tcg_gen_or_tl(dst, dst, t2); |
| tcg_gen_or_tl(dst, dst, t0); |
| if (is_isa300(ctx)) { |
| tcg_gen_shli_tl(t0, cpu_ov32, XER_OV32); |
| tcg_gen_or_tl(dst, dst, t0); |
| tcg_gen_shli_tl(t0, cpu_ca32, XER_CA32); |
| tcg_gen_or_tl(dst, dst, t0); |
| } |
| tcg_temp_free(t0); |
| tcg_temp_free(t1); |
| tcg_temp_free(t2); |
| } |
| |
| void spr_write_xer(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv src = cpu_gpr[gprn]; |
| /* Write all flags, while reading back check for isa300 */ |
| tcg_gen_andi_tl(cpu_xer, src, |
| ~((1u << XER_SO) | |
| (1u << XER_OV) | (1u << XER_OV32) | |
| (1u << XER_CA) | (1u << XER_CA32))); |
| tcg_gen_extract_tl(cpu_ov32, src, XER_OV32, 1); |
| tcg_gen_extract_tl(cpu_ca32, src, XER_CA32, 1); |
| tcg_gen_extract_tl(cpu_so, src, XER_SO, 1); |
| tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1); |
| tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1); |
| } |
| |
| /* LR */ |
| void spr_read_lr(DisasContext *ctx, int gprn, int sprn) |
| { |
| tcg_gen_mov_tl(cpu_gpr[gprn], cpu_lr); |
| } |
| |
| void spr_write_lr(DisasContext *ctx, int sprn, int gprn) |
| { |
| tcg_gen_mov_tl(cpu_lr, cpu_gpr[gprn]); |
| } |
| |
| /* CFAR */ |
| #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) |
| void spr_read_cfar(DisasContext *ctx, int gprn, int sprn) |
| { |
| tcg_gen_mov_tl(cpu_gpr[gprn], cpu_cfar); |
| } |
| |
| void spr_write_cfar(DisasContext *ctx, int sprn, int gprn) |
| { |
| tcg_gen_mov_tl(cpu_cfar, cpu_gpr[gprn]); |
| } |
| #endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ |
| |
| /* CTR */ |
| void spr_read_ctr(DisasContext *ctx, int gprn, int sprn) |
| { |
| tcg_gen_mov_tl(cpu_gpr[gprn], cpu_ctr); |
| } |
| |
| void spr_write_ctr(DisasContext *ctx, int sprn, int gprn) |
| { |
| tcg_gen_mov_tl(cpu_ctr, cpu_gpr[gprn]); |
| } |
| |
| /* User read access to SPR */ |
| /* USPRx */ |
| /* UMMCRx */ |
| /* UPMCx */ |
| /* USIA */ |
| /* UDECR */ |
| void spr_read_ureg(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_load_spr(cpu_gpr[gprn], sprn + 0x10); |
| } |
| |
| #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) |
| void spr_write_ureg(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_store_spr(sprn + 0x10, cpu_gpr[gprn]); |
| } |
| #endif |
| |
| /* SPR common to all non-embedded PowerPC */ |
| /* DECR */ |
| #if !defined(CONFIG_USER_ONLY) |
| void spr_read_decr(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_load_decr(cpu_gpr[gprn], cpu_env); |
| } |
| |
| void spr_write_decr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_decr(cpu_env, cpu_gpr[gprn]); |
| } |
| #endif |
| |
| /* SPR common to all non-embedded PowerPC, except 601 */ |
| /* Time base */ |
| void spr_read_tbl(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_load_tbl(cpu_gpr[gprn], cpu_env); |
| } |
| |
| void spr_read_tbu(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_load_tbu(cpu_gpr[gprn], cpu_env); |
| } |
| |
| void spr_read_atbl(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_helper_load_atbl(cpu_gpr[gprn], cpu_env); |
| } |
| |
| void spr_read_atbu(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_helper_load_atbu(cpu_gpr[gprn], cpu_env); |
| } |
| |
| #if !defined(CONFIG_USER_ONLY) |
| void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_atbl(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_atbu(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| #if defined(TARGET_PPC64) |
| void spr_read_purr(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_load_purr(cpu_gpr[gprn], cpu_env); |
| } |
| |
| void spr_write_purr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_purr(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| /* HDECR */ |
| void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env); |
| } |
| |
| void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_read_vtb(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_load_vtb(cpu_gpr[gprn], cpu_env); |
| } |
| |
| void spr_write_vtb(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_vtb(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_tbu40(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_tbu40(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| #endif |
| #endif |
| |
| #if !defined(CONFIG_USER_ONLY) |
| /* IBAT0U...IBAT0U */ |
| /* IBAT0L...IBAT7L */ |
| void spr_read_ibat(DisasContext *ctx, int gprn, int sprn) |
| { |
| tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, |
| offsetof(CPUPPCState, |
| IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); |
| } |
| |
| void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) |
| { |
| tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, |
| offsetof(CPUPPCState, |
| IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4])); |
| } |
| |
| void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0U) / 2); |
| gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); |
| tcg_temp_free_i32(t0); |
| } |
| |
| void spr_write_ibatu_h(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4U) / 2) + 4); |
| gen_helper_store_ibatu(cpu_env, t0, cpu_gpr[gprn]); |
| tcg_temp_free_i32(t0); |
| } |
| |
| void spr_write_ibatl(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_const_i32((sprn - SPR_IBAT0L) / 2); |
| gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); |
| tcg_temp_free_i32(t0); |
| } |
| |
| void spr_write_ibatl_h(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_IBAT4L) / 2) + 4); |
| gen_helper_store_ibatl(cpu_env, t0, cpu_gpr[gprn]); |
| tcg_temp_free_i32(t0); |
| } |
| |
| /* DBAT0U...DBAT7U */ |
| /* DBAT0L...DBAT7L */ |
| void spr_read_dbat(DisasContext *ctx, int gprn, int sprn) |
| { |
| tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, |
| offsetof(CPUPPCState, |
| DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2])); |
| } |
| |
| void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) |
| { |
| tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, |
| offsetof(CPUPPCState, |
| DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4])); |
| } |
| |
| void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0U) / 2); |
| gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); |
| tcg_temp_free_i32(t0); |
| } |
| |
| void spr_write_dbatu_h(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4U) / 2) + 4); |
| gen_helper_store_dbatu(cpu_env, t0, cpu_gpr[gprn]); |
| tcg_temp_free_i32(t0); |
| } |
| |
| void spr_write_dbatl(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_const_i32((sprn - SPR_DBAT0L) / 2); |
| gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); |
| tcg_temp_free_i32(t0); |
| } |
| |
| void spr_write_dbatl_h(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_const_i32(((sprn - SPR_DBAT4L) / 2) + 4); |
| gen_helper_store_dbatl(cpu_env, t0, cpu_gpr[gprn]); |
| tcg_temp_free_i32(t0); |
| } |
| |
| /* SDR1 */ |
| void spr_write_sdr1(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_store_sdr1(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| #if defined(TARGET_PPC64) |
| /* 64 bits PowerPC specific SPRs */ |
| /* PIDR */ |
| void spr_write_pidr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_store_pidr(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_lpidr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_store_lpidr(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_read_hior(DisasContext *ctx, int gprn, int sprn) |
| { |
| tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, excp_prefix)); |
| } |
| |
| void spr_write_hior(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0x3FFFFF00000ULL); |
| tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); |
| tcg_temp_free(t0); |
| } |
| void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_store_ptcr(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_pcr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_store_pcr(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| /* DPDES */ |
| void spr_read_dpdes(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_helper_load_dpdes(cpu_gpr[gprn], cpu_env); |
| } |
| |
| void spr_write_dpdes(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_store_dpdes(cpu_env, cpu_gpr[gprn]); |
| } |
| #endif |
| #endif |
| |
| /* PowerPC 40x specific registers */ |
| #if !defined(CONFIG_USER_ONLY) |
| void spr_read_40x_pit(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_load_40x_pit(cpu_gpr[gprn], cpu_env); |
| } |
| |
| void spr_write_40x_pit(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_40x_pit(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_40x_dbcr0(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_store_spr(sprn, cpu_gpr[gprn]); |
| gen_helper_store_40x_dbcr0(cpu_env, cpu_gpr[gprn]); |
| /* We must stop translation as we may have rebooted */ |
| ctx->base.is_jmp = DISAS_EXIT_UPDATE; |
| } |
| |
| void spr_write_40x_sler(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_40x_sler(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_40x_tcr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_40x_tcr(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_40x_tsr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_40x_tsr(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_40x_pid(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xFF); |
| gen_helper_store_40x_pid(cpu_env, t0); |
| tcg_temp_free(t0); |
| } |
| |
| void spr_write_booke_tcr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_booke_tcr(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_booke_tsr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_icount_io_start(ctx); |
| gen_helper_store_booke_tsr(cpu_env, cpu_gpr[gprn]); |
| } |
| #endif |
| |
| /* PIR */ |
| #if !defined(CONFIG_USER_ONLY) |
| void spr_write_pir(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_andi_tl(t0, cpu_gpr[gprn], 0xF); |
| gen_store_spr(SPR_PIR, t0); |
| tcg_temp_free(t0); |
| } |
| #endif |
| |
| /* SPE specific registers */ |
| void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| tcg_gen_ld_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); |
| tcg_gen_extu_i32_tl(cpu_gpr[gprn], t0); |
| tcg_temp_free_i32(t0); |
| } |
| |
| void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| tcg_gen_trunc_tl_i32(t0, cpu_gpr[gprn]); |
| tcg_gen_st_i32(t0, cpu_env, offsetof(CPUPPCState, spe_fscr)); |
| tcg_temp_free_i32(t0); |
| } |
| |
| #if !defined(CONFIG_USER_ONLY) |
| /* Callback used to write the exception vector base */ |
| void spr_write_excp_prefix(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivpr_mask)); |
| tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); |
| tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_prefix)); |
| gen_store_spr(sprn, t0); |
| tcg_temp_free(t0); |
| } |
| |
| void spr_write_excp_vector(DisasContext *ctx, int sprn, int gprn) |
| { |
| int sprn_offs; |
| |
| if (sprn >= SPR_BOOKE_IVOR0 && sprn <= SPR_BOOKE_IVOR15) { |
| sprn_offs = sprn - SPR_BOOKE_IVOR0; |
| } else if (sprn >= SPR_BOOKE_IVOR32 && sprn <= SPR_BOOKE_IVOR37) { |
| sprn_offs = sprn - SPR_BOOKE_IVOR32 + 32; |
| } else if (sprn >= SPR_BOOKE_IVOR38 && sprn <= SPR_BOOKE_IVOR42) { |
| sprn_offs = sprn - SPR_BOOKE_IVOR38 + 38; |
| } else { |
| printf("Trying to write an unknown exception vector %d %03x\n", |
| sprn, sprn); |
| gen_inval_exception(ctx, POWERPC_EXCP_PRIV_REG); |
| return; |
| } |
| |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUPPCState, ivor_mask)); |
| tcg_gen_and_tl(t0, t0, cpu_gpr[gprn]); |
| tcg_gen_st_tl(t0, cpu_env, offsetof(CPUPPCState, excp_vectors[sprn_offs])); |
| gen_store_spr(sprn, t0); |
| tcg_temp_free(t0); |
| } |
| #endif |
| |
| #ifdef TARGET_PPC64 |
| #ifndef CONFIG_USER_ONLY |
| void spr_write_amr(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| TCGv t1 = tcg_temp_new(); |
| TCGv t2 = tcg_temp_new(); |
| |
| /* |
| * Note, the HV=1 PR=0 case is handled earlier by simply using |
| * spr_write_generic for HV mode in the SPR table |
| */ |
| |
| /* Build insertion mask into t1 based on context */ |
| if (ctx->pr) { |
| gen_load_spr(t1, SPR_UAMOR); |
| } else { |
| gen_load_spr(t1, SPR_AMOR); |
| } |
| |
| /* Mask new bits into t2 */ |
| tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); |
| |
| /* Load AMR and clear new bits in t0 */ |
| gen_load_spr(t0, SPR_AMR); |
| tcg_gen_andc_tl(t0, t0, t1); |
| |
| /* Or'in new bits and write it out */ |
| tcg_gen_or_tl(t0, t0, t2); |
| gen_store_spr(SPR_AMR, t0); |
| spr_store_dump_spr(SPR_AMR); |
| |
| tcg_temp_free(t0); |
| tcg_temp_free(t1); |
| tcg_temp_free(t2); |
| } |
| |
| void spr_write_uamor(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| TCGv t1 = tcg_temp_new(); |
| TCGv t2 = tcg_temp_new(); |
| |
| /* |
| * Note, the HV=1 case is handled earlier by simply using |
| * spr_write_generic for HV mode in the SPR table |
| */ |
| |
| /* Build insertion mask into t1 based on context */ |
| gen_load_spr(t1, SPR_AMOR); |
| |
| /* Mask new bits into t2 */ |
| tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); |
| |
| /* Load AMR and clear new bits in t0 */ |
| gen_load_spr(t0, SPR_UAMOR); |
| tcg_gen_andc_tl(t0, t0, t1); |
| |
| /* Or'in new bits and write it out */ |
| tcg_gen_or_tl(t0, t0, t2); |
| gen_store_spr(SPR_UAMOR, t0); |
| spr_store_dump_spr(SPR_UAMOR); |
| |
| tcg_temp_free(t0); |
| tcg_temp_free(t1); |
| tcg_temp_free(t2); |
| } |
| |
| void spr_write_iamr(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| TCGv t1 = tcg_temp_new(); |
| TCGv t2 = tcg_temp_new(); |
| |
| /* |
| * Note, the HV=1 case is handled earlier by simply using |
| * spr_write_generic for HV mode in the SPR table |
| */ |
| |
| /* Build insertion mask into t1 based on context */ |
| gen_load_spr(t1, SPR_AMOR); |
| |
| /* Mask new bits into t2 */ |
| tcg_gen_and_tl(t2, t1, cpu_gpr[gprn]); |
| |
| /* Load AMR and clear new bits in t0 */ |
| gen_load_spr(t0, SPR_IAMR); |
| tcg_gen_andc_tl(t0, t0, t1); |
| |
| /* Or'in new bits and write it out */ |
| tcg_gen_or_tl(t0, t0, t2); |
| gen_store_spr(SPR_IAMR, t0); |
| spr_store_dump_spr(SPR_IAMR); |
| |
| tcg_temp_free(t0); |
| tcg_temp_free(t1); |
| tcg_temp_free(t2); |
| } |
| #endif |
| #endif |
| |
| #ifndef CONFIG_USER_ONLY |
| void spr_read_thrm(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_helper_fixup_thrm(cpu_env); |
| gen_load_spr(cpu_gpr[gprn], sprn); |
| spr_load_dump_spr(sprn); |
| } |
| #endif /* !CONFIG_USER_ONLY */ |
| |
| #if !defined(CONFIG_USER_ONLY) |
| void spr_write_e500_l1csr0(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| |
| tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR0_DCE | L1CSR0_CPE); |
| gen_store_spr(sprn, t0); |
| tcg_temp_free(t0); |
| } |
| |
| void spr_write_e500_l1csr1(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| |
| tcg_gen_andi_tl(t0, cpu_gpr[gprn], L1CSR1_ICE | L1CSR1_CPE); |
| gen_store_spr(sprn, t0); |
| tcg_temp_free(t0); |
| } |
| |
| void spr_write_e500_l2csr0(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv t0 = tcg_temp_new(); |
| |
| tcg_gen_andi_tl(t0, cpu_gpr[gprn], |
| ~(E500_L2CSR0_L2FI | E500_L2CSR0_L2FL | E500_L2CSR0_L2LFC)); |
| gen_store_spr(sprn, t0); |
| tcg_temp_free(t0); |
| } |
| |
| void spr_write_booke206_mmucsr0(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_booke206_tlbflush(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| void spr_write_booke_pid(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv_i32 t0 = tcg_const_i32(sprn); |
| gen_helper_booke_setpid(cpu_env, t0, cpu_gpr[gprn]); |
| tcg_temp_free_i32(t0); |
| } |
| void spr_write_eplc(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_booke_set_eplc(cpu_env, cpu_gpr[gprn]); |
| } |
| void spr_write_epsc(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_booke_set_epsc(cpu_env, cpu_gpr[gprn]); |
| } |
| |
| #endif |
| |
| #if !defined(CONFIG_USER_ONLY) |
| void spr_write_mas73(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv val = tcg_temp_new(); |
| tcg_gen_ext32u_tl(val, cpu_gpr[gprn]); |
| gen_store_spr(SPR_BOOKE_MAS3, val); |
| tcg_gen_shri_tl(val, cpu_gpr[gprn], 32); |
| gen_store_spr(SPR_BOOKE_MAS7, val); |
| tcg_temp_free(val); |
| } |
| |
| void spr_read_mas73(DisasContext *ctx, int gprn, int sprn) |
| { |
| TCGv mas7 = tcg_temp_new(); |
| TCGv mas3 = tcg_temp_new(); |
| gen_load_spr(mas7, SPR_BOOKE_MAS7); |
| tcg_gen_shli_tl(mas7, mas7, 32); |
| gen_load_spr(mas3, SPR_BOOKE_MAS3); |
| tcg_gen_or_tl(cpu_gpr[gprn], mas3, mas7); |
| tcg_temp_free(mas3); |
| tcg_temp_free(mas7); |
| } |
| |
| #endif |
| |
| #ifdef TARGET_PPC64 |
| static void gen_fscr_facility_check(DisasContext *ctx, int facility_sprn, |
| int bit, int sprn, int cause) |
| { |
| TCGv_i32 t1 = tcg_const_i32(bit); |
| TCGv_i32 t2 = tcg_const_i32(sprn); |
| TCGv_i32 t3 = tcg_const_i32(cause); |
| |
| gen_helper_fscr_facility_check(cpu_env, t1, t2, t3); |
| |
| tcg_temp_free_i32(t3); |
| tcg_temp_free_i32(t2); |
| tcg_temp_free_i32(t1); |
| } |
| |
| static void gen_msr_facility_check(DisasContext *ctx, int facility_sprn, |
| int bit, int sprn, int cause) |
| { |
| TCGv_i32 t1 = tcg_const_i32(bit); |
| TCGv_i32 t2 = tcg_const_i32(sprn); |
| TCGv_i32 t3 = tcg_const_i32(cause); |
| |
| gen_helper_msr_facility_check(cpu_env, t1, t2, t3); |
| |
| tcg_temp_free_i32(t3); |
| tcg_temp_free_i32(t2); |
| tcg_temp_free_i32(t1); |
| } |
| |
| void spr_read_prev_upper32(DisasContext *ctx, int gprn, int sprn) |
| { |
| TCGv spr_up = tcg_temp_new(); |
| TCGv spr = tcg_temp_new(); |
| |
| gen_load_spr(spr, sprn - 1); |
| tcg_gen_shri_tl(spr_up, spr, 32); |
| tcg_gen_ext32u_tl(cpu_gpr[gprn], spr_up); |
| |
| tcg_temp_free(spr); |
| tcg_temp_free(spr_up); |
| } |
| |
| void spr_write_prev_upper32(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv spr = tcg_temp_new(); |
| |
| gen_load_spr(spr, sprn - 1); |
| tcg_gen_deposit_tl(spr, spr, cpu_gpr[gprn], 32, 32); |
| gen_store_spr(sprn - 1, spr); |
| |
| tcg_temp_free(spr); |
| } |
| |
| #if !defined(CONFIG_USER_ONLY) |
| void spr_write_hmer(DisasContext *ctx, int sprn, int gprn) |
| { |
| TCGv hmer = tcg_temp_new(); |
| |
| gen_load_spr(hmer, sprn); |
| tcg_gen_and_tl(hmer, cpu_gpr[gprn], hmer); |
| gen_store_spr(sprn, hmer); |
| spr_store_dump_spr(sprn); |
| tcg_temp_free(hmer); |
| } |
| |
| void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]); |
| } |
| #endif /* !defined(CONFIG_USER_ONLY) */ |
| |
| void spr_read_tar(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); |
| spr_read_generic(ctx, gprn, sprn); |
| } |
| |
| void spr_write_tar(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_TAR, sprn, FSCR_IC_TAR); |
| spr_write_generic(ctx, sprn, gprn); |
| } |
| |
| void spr_read_tm(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); |
| spr_read_generic(ctx, gprn, sprn); |
| } |
| |
| void spr_write_tm(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); |
| spr_write_generic(ctx, sprn, gprn); |
| } |
| |
| void spr_read_tm_upper32(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); |
| spr_read_prev_upper32(ctx, gprn, sprn); |
| } |
| |
| void spr_write_tm_upper32(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_msr_facility_check(ctx, SPR_FSCR, MSR_TM, sprn, FSCR_IC_TM); |
| spr_write_prev_upper32(ctx, sprn, gprn); |
| } |
| |
| void spr_read_ebb(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); |
| spr_read_generic(ctx, gprn, sprn); |
| } |
| |
| void spr_write_ebb(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); |
| spr_write_generic(ctx, sprn, gprn); |
| } |
| |
| void spr_read_ebb_upper32(DisasContext *ctx, int gprn, int sprn) |
| { |
| gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); |
| spr_read_prev_upper32(ctx, gprn, sprn); |
| } |
| |
| void spr_write_ebb_upper32(DisasContext *ctx, int sprn, int gprn) |
| { |
| gen_fscr_facility_check(ctx, SPR_FSCR, FSCR_EBB, sprn, FSCR_IC_EBB); |
| spr_write_prev_upper32(ctx, sprn, gprn); |
| } |
| #endif |
| |
| #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ |
| GEN_OPCODE(name, opc1, opc2, opc3, inval, type, PPC_NONE) |
| |
| #define GEN_HANDLER_E(name, opc1, opc2, opc3, inval, type, type2) \ |
| GEN_OPCODE(name, opc1, opc2, opc3, inval, type, type2) |
| |
| #define GEN_HANDLER2(name, onam, opc1, opc2, opc3, inval, type) \ |
| GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, PPC_NONE) |
| |
| #define GEN_HANDLER2_E(name, onam, opc1, opc2, opc3, inval, type, type2) \ |
| GEN_OPCODE2(name, onam, opc1, opc2, opc3, inval, type, type2) |
| |
| #define GEN_HANDLER_E_2(name, opc1, opc2, opc3, opc4, inval, type, type2) \ |
| GEN_OPCODE3(name, opc1, opc2, opc3, opc4, inval, type, type2) |
| |
| #define GEN_HANDLER2_E_2(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) \ |
| GEN_OPCODE4(name, onam, opc1, opc2, opc3, opc4, inval, typ, typ2) |
| |
| typedef struct opcode_t { |
| unsigned char opc1, opc2, opc3, opc4; |
| #if HOST_LONG_BITS == 64 /* Explicitly align to 64 bits */ |
| unsigned char pad[4]; |
| #endif |
| opc_handler_t handler; |
| const char *oname; |
| } opcode_t; |
| |
| /* Helpers for priv. check */ |
| #define GEN_PRIV \ |
| do { \ |
| gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); return; \ |
| } while (0) |
| |
| #if defined(CONFIG_USER_ONLY) |
| #define CHK_HV GEN_PRIV |
| #define CHK_SV GEN_PRIV |
| #define CHK_HVRM GEN_PRIV |
| #else |
| #define CHK_HV \ |
| do { \ |
| if (unlikely(ctx->pr || !ctx->hv)) { \ |
| GEN_PRIV; \ |
| } \ |
| } while (0) |
| #define CHK_SV \ |
| do { \ |
| if (unlikely(ctx->pr)) { \ |
| GEN_PRIV; \ |
| } \ |
| } while (0) |
| #define CHK_HVRM \ |
| do { \ |
| if (unlikely(ctx->pr || !ctx->hv || ctx->dr)) { \ |
| GEN_PRIV; \ |
| } \ |
| } while (0) |
| #endif |
| |
| #define CHK_NONE |
| |
| /*****************************************************************************/ |
| /* PowerPC instructions table */ |
| |
| #define GEN_OPCODE(name, op1, op2, op3, invl, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .opc4 = 0xff, \ |
| .handler = { \ |
| .inval1 = invl, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| }, \ |
| .oname = stringify(name), \ |
| } |
| #define GEN_OPCODE_DUAL(name, op1, op2, op3, invl1, invl2, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .opc4 = 0xff, \ |
| .handler = { \ |
| .inval1 = invl1, \ |
| .inval2 = invl2, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| }, \ |
| .oname = stringify(name), \ |
| } |
| #define GEN_OPCODE2(name, onam, op1, op2, op3, invl, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .opc4 = 0xff, \ |
| .handler = { \ |
| .inval1 = invl, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| }, \ |
| .oname = onam, \ |
| } |
| #define GEN_OPCODE3(name, op1, op2, op3, op4, invl, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .opc4 = op4, \ |
| .handler = { \ |
| .inval1 = invl, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| }, \ |
| .oname = stringify(name), \ |
| } |
| #define GEN_OPCODE4(name, onam, op1, op2, op3, op4, invl, _typ, _typ2) \ |
| { \ |
| .opc1 = op1, \ |
| .opc2 = op2, \ |
| .opc3 = op3, \ |
| .opc4 = op4, \ |
| .handler = { \ |
| .inval1 = invl, \ |
| .type = _typ, \ |
| .type2 = _typ2, \ |
| .handler = &gen_##name, \ |
| }, \ |
| .oname = onam, \ |
| } |
| |
| /* Invalid instruction */ |
| static void gen_invalid(DisasContext *ctx) |
| { |
| gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); |
| } |
| |
| static opc_handler_t invalid_handler = { |
| .inval1 = 0xFFFFFFFF, |
| .inval2 = 0xFFFFFFFF, |
| .type = PPC_NONE, |
| .type2 = PPC_NONE, |
| .handler = gen_invalid, |
| }; |
| |
| /*** Integer comparison ***/ |
| |
| static inline void gen_op_cmp(TCGv arg0, TCGv arg1, int s, int crf) |
| { |
| TCGv t0 = tcg_temp_new(); |
| TCGv t1 = tcg_temp_new(); |
| TCGv_i32 t = tcg_temp_new_i32(); |
| |
| tcg_gen_movi_tl(t0, CRF_EQ); |
| tcg_gen_movi_tl(t1, CRF_LT); |
| tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), |
| t0, arg0, arg1, t1, t0); |
| tcg_gen_movi_tl(t1, CRF_GT); |
| tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), |
| t0, arg0, arg1, t1, t0); |
| |
| tcg_gen_trunc_tl_i32(t, t0); |
| tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); |
| tcg_gen_or_i32(cpu_crf[crf], cpu_crf[crf], t); |
| |
| tcg_temp_free(t0); |
| tcg_temp_free(t1); |
| tcg_temp_free_i32(t); |
| } |
| |
| static inline void gen_op_cmpi(TCGv arg0, target_ulong arg1, int s, int crf) |
| { |
| TCGv t0 = tcg_const_tl(arg1); |
| gen_op_cmp(arg0, t0, s, crf); |
| tcg_temp_free(t0); |
| } |
| |
| static inline void gen_op_cmp32(TCGv arg0, TCGv arg1, int s, int crf) |
| { |
| TCGv t0, t1; |
| t0 = tcg_temp_new(); |
| t1 = tcg_temp_new(); |
| if (s) { |
| tcg_gen_ext32s_tl(t0, arg0); |
| tcg_gen_ext32s_tl(t1, arg1); |
| } else { |
| tcg_gen_ext32u_tl(t0, arg0); |
| tcg_gen_ext32u_tl(t1, arg1); |
| } |
| gen_op_cmp(t0, t1, s, crf); |
| tcg_temp_free(t1); |
| tcg_temp_free(t0); |
| } |
| |
| static inline void gen_op_cmpi32(TCGv arg0, target_ulong arg1, int s, int crf) |
| { |
| TCGv t0 = tcg_const_tl(arg1); |
| gen_op_cmp32(arg0, t0, s, crf); |
| tcg_temp_free(t0); |
| } |
| |
| static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg) |
| { |
| if (NARROW_MODE(ctx)) { |
| gen_op_cmpi32(reg, 0, 1, 0); |
| } else { |
| gen_op_cmpi(reg, 0, 1, 0); |
| } |
| } |
| |
| /* cmprb - range comparison: isupper, isaplha, islower*/ |
| static void gen_cmprb(DisasContext *ctx) |
| { |
| TCGv_i32 src1 = tcg_temp_new_i32(); |
| TCGv_i32 src2 = tcg_temp_new_i32(); |
| TCGv_i32 src2lo = tcg_temp_new_i32(); |
| TCGv_i32 src2hi = tcg_temp_new_i32(); |
| TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)]; |
| |
| tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]); |
| tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]); |
| |
| tcg_gen_andi_i32(src1, src1, 0xFF); |
| tcg_gen_ext8u_i32(src2lo, src2); |
| tcg_gen_shri_i32(src2, src2, 8); |
| tcg_gen_ext8u_i32(src2hi, src2); |
| |
| tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); |
| tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); |
| tcg_gen_and_i32(crf, src2lo, src2hi); |
| |
| if (ctx->opcode & 0x00200000) { |
| tcg_gen_shri_i32(src2, src2, 8); |
| tcg_gen_ext8u_i32(src2lo, src2); |
| tcg_gen_shri_i32(src2, src2, 8); |
| tcg_gen_ext8u_i32(src2hi, src2); |
| tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); |
| tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); |
| tcg_gen_and_i32(src2lo, src2lo, src2hi); |
| tcg_gen_or_i32(crf, crf, src2lo); |
| } |
| tcg_gen_shli_i32(crf, crf, CRF_GT_BIT); |
| tcg_temp_free_i32(src1); |
| tcg_temp_free_i32(src2); |
| tcg_temp_free_i32(src2lo); |
| tcg_temp_free_i32(src2hi); |
| } |
| |
| #if defined(TARGET_PPC64) |
| /* cmpeqb */ |
| static void gen_cmpeqb(DisasContext *ctx) |
| { |
| gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| } |
| #endif |
| |
| /* isel (PowerPC 2.03 specification) */ |
| static void gen_isel(DisasContext *ctx) |
| { |
| uint32_t bi = rC(ctx->opcode); |
| uint32_t mask = 0x08 >> (bi & 0x03); |
| TCGv t0 = tcg_temp_new(); |
| TCGv zr; |
| |
| tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]); |
| tcg_gen_andi_tl(t0, t0, mask); |
| |
| zr = tcg_const_tl(0); |
| tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, |
| rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, |
| cpu_gpr[rB(ctx->opcode)]); |
| tcg_temp_free(zr); |
| tcg_temp_free(t0); |
| } |
| |
| /* cmpb: PowerPC 2.05 specification */ |
| static void gen_cmpb(DisasContext *ctx) |
| { |
| gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| } |
| |
| /*** Integer arithmetic ***/ |
| |
| static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, |
| TCGv arg1, TCGv arg2, int sub) |
| { |
| TCGv t0 = tcg_temp_new(); |
| |
| tcg_gen_xor_tl(cpu_ov, arg0, arg2); |
| tcg_gen_xor_tl(t0, arg1, arg2); |
| if (sub) { |
| tcg_gen_and_tl(cpu_ov, cpu_ov, t0); |
| } else { |
| tcg_gen_andc_tl(cpu_ov, cpu_ov, t0); |
| } |
| tcg_temp_free(t0); |
| if (NARROW_MODE(ctx)) { |
| tcg_gen_extract_tl(cpu_ov, cpu_ov, 31, 1); |
| if (is_isa300(ctx)) { |
| tcg_gen_mov_tl(cpu_ov32, cpu_ov); |
| } |
| } else { |
| if (is_isa300(ctx)) { |
| tcg_gen_extract_tl(cpu_ov32, cpu_ov, 31, 1); |
| } |
| tcg_gen_extract_tl(cpu_ov, cpu_ov, TARGET_LONG_BITS - 1, 1); |
| } |
| tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); |
| } |
| |
| static inline void gen_op_arith_compute_ca32(DisasContext *ctx, |
| TCGv res, TCGv arg0, TCGv arg1, |
| TCGv ca32, int sub) |
| { |
| TCGv t0; |
| |
| if (!is_isa300(ctx)) { |
| return; |
| } |
| |
| t0 = tcg_temp_new(); |
| if (sub) { |
| tcg_gen_eqv_tl(t0, arg0, arg1); |
| } else { |
| tcg_gen_xor_tl(t0, arg0, arg1); |
| } |
| tcg_gen_xor_tl(t0, t0, res); |
| tcg_gen_extract_tl(ca32, t0, 32, 1); |
| tcg_temp_free(t0); |
| } |
| |
| /* Common add function */ |
| static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, TCGv ca, TCGv ca32, |
| bool add_ca, bool compute_ca, |
| bool compute_ov, bool compute_rc0) |
| { |
| TCGv t0 = ret; |
| |
| if (compute_ca || compute_ov) { |
| t0 = tcg_temp_new(); |
| } |
| |
| if (compute_ca) { |
| if (NARROW_MODE(ctx)) { |
| /* |
| * Caution: a non-obvious corner case of the spec is that |
| * we must produce the *entire* 64-bit addition, but |
| * produce the carry into bit 32. |
| */ |
| TCGv t1 = tcg_temp_new(); |
| tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */ |
| tcg_gen_add_tl(t0, arg1, arg2); |
| if (add_ca) { |
| tcg_gen_add_tl(t0, t0, ca); |
| } |
| tcg_gen_xor_tl(ca, t0, t1); /* bits changed w/ carry */ |
| tcg_temp_free(t1); |
| tcg_gen_extract_tl(ca, ca, 32, 1); |
| if (is_isa300(ctx)) { |
| tcg_gen_mov_tl(ca32, ca); |
| } |
| } else { |
| TCGv zero = tcg_const_tl(0); |
| if (add_ca) { |
| tcg_gen_add2_tl(t0, ca, arg1, zero, ca, zero); |
| tcg_gen_add2_tl(t0, ca, t0, ca, arg2, zero); |
| } else { |
| tcg_gen_add2_tl(t0, ca, arg1, zero, arg2, zero); |
| } |
| gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, ca32, 0); |
| tcg_temp_free(zero); |
| } |
| } else { |
| tcg_gen_add_tl(t0, arg1, arg2); |
| if (add_ca) { |
| tcg_gen_add_tl(t0, t0, ca); |
| } |
| } |
| |
| if (compute_ov) { |
| gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 0); |
| } |
| if (unlikely(compute_rc0)) { |
| gen_set_Rc0(ctx, t0); |
| } |
| |
| if (t0 != ret) { |
| tcg_gen_mov_tl(ret, t0); |
| tcg_temp_free(t0); |
| } |
| } |
| /* Add functions with two operands */ |
| #define GEN_INT_ARITH_ADD(name, opc3, ca, add_ca, compute_ca, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| ca, glue(ca, 32), \ |
| add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
| } |
| /* Add functions with one operand and one immediate */ |
| #define GEN_INT_ARITH_ADD_CONST(name, opc3, const_val, ca, \ |
| add_ca, compute_ca, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| TCGv t0 = tcg_const_tl(const_val); \ |
| gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], t0, \ |
| ca, glue(ca, 32), \ |
| add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
| tcg_temp_free(t0); \ |
| } |
| |
| /* add add. addo addo. */ |
| GEN_INT_ARITH_ADD(add, 0x08, cpu_ca, 0, 0, 0) |
| GEN_INT_ARITH_ADD(addo, 0x18, cpu_ca, 0, 0, 1) |
| /* addc addc. addco addco. */ |
| GEN_INT_ARITH_ADD(addc, 0x00, cpu_ca, 0, 1, 0) |
| GEN_INT_ARITH_ADD(addco, 0x10, cpu_ca, 0, 1, 1) |
| /* adde adde. addeo addeo. */ |
| GEN_INT_ARITH_ADD(adde, 0x04, cpu_ca, 1, 1, 0) |
| GEN_INT_ARITH_ADD(addeo, 0x14, cpu_ca, 1, 1, 1) |
| /* addme addme. addmeo addmeo. */ |
| GEN_INT_ARITH_ADD_CONST(addme, 0x07, -1LL, cpu_ca, 1, 1, 0) |
| GEN_INT_ARITH_ADD_CONST(addmeo, 0x17, -1LL, cpu_ca, 1, 1, 1) |
| /* addex */ |
| GEN_INT_ARITH_ADD(addex, 0x05, cpu_ov, 1, 1, 0); |
| /* addze addze. addzeo addzeo.*/ |
| GEN_INT_ARITH_ADD_CONST(addze, 0x06, 0, cpu_ca, 1, 1, 0) |
| GEN_INT_ARITH_ADD_CONST(addzeo, 0x16, 0, cpu_ca, 1, 1, 1) |
| /* addic addic.*/ |
| static inline void gen_op_addic(DisasContext *ctx, bool compute_rc0) |
| { |
| TCGv c = tcg_const_tl(SIMM(ctx->opcode)); |
| gen_op_arith_add(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| c, cpu_ca, cpu_ca32, 0, 1, 0, compute_rc0); |
| tcg_temp_free(c); |
| } |
| |
| static void gen_addic(DisasContext *ctx) |
| { |
| gen_op_addic(ctx, 0); |
| } |
| |
| static void gen_addic_(DisasContext *ctx) |
| { |
| gen_op_addic(ctx, 1); |
| } |
| |
| static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, int sign, int compute_ov) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| TCGv_i32 t2 = tcg_temp_new_i32(); |
| TCGv_i32 t3 = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t0, arg1); |
| tcg_gen_trunc_tl_i32(t1, arg2); |
| if (sign) { |
| tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN); |
| tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1); |
| tcg_gen_and_i32(t2, t2, t3); |
| tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0); |
| tcg_gen_or_i32(t2, t2, t3); |
| tcg_gen_movi_i32(t3, 0); |
| tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); |
| tcg_gen_div_i32(t3, t0, t1); |
| tcg_gen_extu_i32_tl(ret, t3); |
| } else { |
| tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t1, 0); |
| tcg_gen_movi_i32(t3, 0); |
| tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); |
| tcg_gen_divu_i32(t3, t0, t1); |
| tcg_gen_extu_i32_tl(ret, t3); |
| } |
| if (compute_ov) { |
| tcg_gen_extu_i32_tl(cpu_ov, t2); |
| if (is_isa300(ctx)) { |
| tcg_gen_extu_i32_tl(cpu_ov32, t2); |
| } |
| tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); |
| } |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| tcg_temp_free_i32(t2); |
| tcg_temp_free_i32(t3); |
| |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, ret); |
| } |
| } |
| /* Div functions */ |
| #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| sign, compute_ov); \ |
| } |
| /* divwu divwu. divwuo divwuo. */ |
| GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0); |
| GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1); |
| /* divw divw. divwo divwo. */ |
| GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0); |
| GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); |
| |
| /* div[wd]eu[o][.] */ |
| #define GEN_DIVE(name, hlpr, compute_ov) \ |
| static void gen_##name(DisasContext *ctx) \ |
| { \ |
| TCGv_i32 t0 = tcg_const_i32(compute_ov); \ |
| gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], cpu_env, \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ |
| tcg_temp_free_i32(t0); \ |
| if (unlikely(Rc(ctx->opcode) != 0)) { \ |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ |
| } \ |
| } |
| |
| GEN_DIVE(divweu, divweu, 0); |
| GEN_DIVE(divweuo, divweu, 1); |
| GEN_DIVE(divwe, divwe, 0); |
| GEN_DIVE(divweo, divwe, 1); |
| |
| #if defined(TARGET_PPC64) |
| static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, int sign, int compute_ov) |
| { |
| TCGv_i64 t0 = tcg_temp_new_i64(); |
| TCGv_i64 t1 = tcg_temp_new_i64(); |
| TCGv_i64 t2 = tcg_temp_new_i64(); |
| TCGv_i64 t3 = tcg_temp_new_i64(); |
| |
| tcg_gen_mov_i64(t0, arg1); |
| tcg_gen_mov_i64(t1, arg2); |
| if (sign) { |
| tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN); |
| tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1); |
| tcg_gen_and_i64(t2, t2, t3); |
| tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0); |
| tcg_gen_or_i64(t2, t2, t3); |
| tcg_gen_movi_i64(t3, 0); |
| tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); |
| tcg_gen_div_i64(ret, t0, t1); |
| } else { |
| tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t1, 0); |
| tcg_gen_movi_i64(t3, 0); |
| tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); |
| tcg_gen_divu_i64(ret, t0, t1); |
| } |
| if (compute_ov) { |
| tcg_gen_mov_tl(cpu_ov, t2); |
| if (is_isa300(ctx)) { |
| tcg_gen_mov_tl(cpu_ov32, t2); |
| } |
| tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); |
| } |
| tcg_temp_free_i64(t0); |
| tcg_temp_free_i64(t1); |
| tcg_temp_free_i64(t2); |
| tcg_temp_free_i64(t3); |
| |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, ret); |
| } |
| } |
| |
| #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| sign, compute_ov); \ |
| } |
| /* divdu divdu. divduo divduo. */ |
| GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0); |
| GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1); |
| /* divd divd. divdo divdo. */ |
| GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0); |
| GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1); |
| |
| GEN_DIVE(divdeu, divdeu, 0); |
| GEN_DIVE(divdeuo, divdeu, 1); |
| GEN_DIVE(divde, divde, 0); |
| GEN_DIVE(divdeo, divde, 1); |
| #endif |
| |
| static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, int sign) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t0, arg1); |
| tcg_gen_trunc_tl_i32(t1, arg2); |
| if (sign) { |
| TCGv_i32 t2 = tcg_temp_new_i32(); |
| TCGv_i32 t3 = tcg_temp_new_i32(); |
| tcg_gen_setcondi_i32(TCG_COND_EQ, t2, t0, INT_MIN); |
| tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, -1); |
| tcg_gen_and_i32(t2, t2, t3); |
| tcg_gen_setcondi_i32(TCG_COND_EQ, t3, t1, 0); |
| tcg_gen_or_i32(t2, t2, t3); |
| tcg_gen_movi_i32(t3, 0); |
| tcg_gen_movcond_i32(TCG_COND_NE, t1, t2, t3, t2, t1); |
| tcg_gen_rem_i32(t3, t0, t1); |
| tcg_gen_ext_i32_tl(ret, t3); |
| tcg_temp_free_i32(t2); |
| tcg_temp_free_i32(t3); |
| } else { |
| TCGv_i32 t2 = tcg_const_i32(1); |
| TCGv_i32 t3 = tcg_const_i32(0); |
| tcg_gen_movcond_i32(TCG_COND_EQ, t1, t1, t3, t2, t1); |
| tcg_gen_remu_i32(t3, t0, t1); |
| tcg_gen_extu_i32_tl(ret, t3); |
| tcg_temp_free_i32(t2); |
| tcg_temp_free_i32(t3); |
| } |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| } |
| |
| #define GEN_INT_ARITH_MODW(name, opc3, sign) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| sign); \ |
| } |
| |
| GEN_INT_ARITH_MODW(moduw, 0x08, 0); |
| GEN_INT_ARITH_MODW(modsw, 0x18, 1); |
| |
| #if defined(TARGET_PPC64) |
| static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, int sign) |
| { |
| TCGv_i64 t0 = tcg_temp_new_i64(); |
| TCGv_i64 t1 = tcg_temp_new_i64(); |
| |
| tcg_gen_mov_i64(t0, arg1); |
| tcg_gen_mov_i64(t1, arg2); |
| if (sign) { |
| TCGv_i64 t2 = tcg_temp_new_i64(); |
| TCGv_i64 t3 = tcg_temp_new_i64(); |
| tcg_gen_setcondi_i64(TCG_COND_EQ, t2, t0, INT64_MIN); |
| tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, -1); |
| tcg_gen_and_i64(t2, t2, t3); |
| tcg_gen_setcondi_i64(TCG_COND_EQ, t3, t1, 0); |
| tcg_gen_or_i64(t2, t2, t3); |
| tcg_gen_movi_i64(t3, 0); |
| tcg_gen_movcond_i64(TCG_COND_NE, t1, t2, t3, t2, t1); |
| tcg_gen_rem_i64(ret, t0, t1); |
| tcg_temp_free_i64(t2); |
| tcg_temp_free_i64(t3); |
| } else { |
| TCGv_i64 t2 = tcg_const_i64(1); |
| TCGv_i64 t3 = tcg_const_i64(0); |
| tcg_gen_movcond_i64(TCG_COND_EQ, t1, t1, t3, t2, t1); |
| tcg_gen_remu_i64(ret, t0, t1); |
| tcg_temp_free_i64(t2); |
| tcg_temp_free_i64(t3); |
| } |
| tcg_temp_free_i64(t0); |
| tcg_temp_free_i64(t1); |
| } |
| |
| #define GEN_INT_ARITH_MODD(name, opc3, sign) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| sign); \ |
| } |
| |
| GEN_INT_ARITH_MODD(modud, 0x08, 0); |
| GEN_INT_ARITH_MODD(modsd, 0x18, 1); |
| #endif |
| |
| /* mulhw mulhw. */ |
| static void gen_mulhw(DisasContext *ctx) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); |
| tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_muls2_i32(t0, t1, t0, t1); |
| tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| /* mulhwu mulhwu. */ |
| static void gen_mulhwu(DisasContext *ctx) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); |
| tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_mulu2_i32(t0, t1, t0, t1); |
| tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| /* mullw mullw. */ |
| static void gen_mullw(DisasContext *ctx) |
| { |
| #if defined(TARGET_PPC64) |
| TCGv_i64 t0, t1; |
| t0 = tcg_temp_new_i64(); |
| t1 = tcg_temp_new_i64(); |
| tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); |
| tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); |
| tcg_temp_free(t0); |
| tcg_temp_free(t1); |
| #else |
| tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| #endif |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| /* mullwo mullwo. */ |
| static void gen_mullwo(DisasContext *ctx) |
| { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); |
| tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_muls2_i32(t0, t1, t0, t1); |
| #if defined(TARGET_PPC64) |
| tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); |
| #else |
| tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0); |
| #endif |
| |
| tcg_gen_sari_i32(t0, t0, 31); |
| tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1); |
| tcg_gen_extu_i32_tl(cpu_ov, t0); |
| if (is_isa300(ctx)) { |
| tcg_gen_mov_tl(cpu_ov32, cpu_ov); |
| } |
| tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); |
| |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| /* mulli */ |
| static void gen_mulli(DisasContext *ctx) |
| { |
| tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| SIMM(ctx->opcode)); |
| } |
| |
| #if defined(TARGET_PPC64) |
| /* mulhd mulhd. */ |
| static void gen_mulhd(DisasContext *ctx) |
| { |
| TCGv lo = tcg_temp_new(); |
| tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)], |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
| tcg_temp_free(lo); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| /* mulhdu mulhdu. */ |
| static void gen_mulhdu(DisasContext *ctx) |
| { |
| TCGv lo = tcg_temp_new(); |
| tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)], |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
| tcg_temp_free(lo); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| /* mulld mulld. */ |
| static void gen_mulld(DisasContext *ctx) |
| { |
| tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| /* mulldo mulldo. */ |
| static void gen_mulldo(DisasContext *ctx) |
| { |
| TCGv_i64 t0 = tcg_temp_new_i64(); |
| TCGv_i64 t1 = tcg_temp_new_i64(); |
| |
| tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0); |
| |
| tcg_gen_sari_i64(t0, t0, 63); |
| tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1); |
| if (is_isa300(ctx)) { |
| tcg_gen_mov_tl(cpu_ov32, cpu_ov); |
| } |
| tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); |
| |
| tcg_temp_free_i64(t0); |
| tcg_temp_free_i64(t1); |
| |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| #endif |
| |
| /* Common subf function */ |
| static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, |
| TCGv arg2, bool add_ca, bool compute_ca, |
| bool compute_ov, bool compute_rc0) |
| { |
| TCGv t0 = ret; |
| |
| if (compute_ca || compute_ov) { |
| t0 = tcg_temp_new(); |
| } |
| |
| if (compute_ca) { |
| /* dest = ~arg1 + arg2 [+ ca]. */ |
| if (NARROW_MODE(ctx)) { |
| /* |
| * Caution: a non-obvious corner case of the spec is that |
| * we must produce the *entire* 64-bit addition, but |
| * produce the carry into bit 32. |
| */ |
| TCGv inv1 = tcg_temp_new(); |
| TCGv t1 = tcg_temp_new(); |
| tcg_gen_not_tl(inv1, arg1); |
| if (add_ca) { |
| tcg_gen_add_tl(t0, arg2, cpu_ca); |
| } else { |
| tcg_gen_addi_tl(t0, arg2, 1); |
| } |
| tcg_gen_xor_tl(t1, arg2, inv1); /* add without carry */ |
| tcg_gen_add_tl(t0, t0, inv1); |
| tcg_temp_free(inv1); |
| tcg_gen_xor_tl(cpu_ca, t0, t1); /* bits changes w/ carry */ |
| tcg_temp_free(t1); |
| tcg_gen_extract_tl(cpu_ca, cpu_ca, 32, 1); |
| if (is_isa300(ctx)) { |
| tcg_gen_mov_tl(cpu_ca32, cpu_ca); |
| } |
| } else if (add_ca) { |
| TCGv zero, inv1 = tcg_temp_new(); |
| tcg_gen_not_tl(inv1, arg1); |
| zero = tcg_const_tl(0); |
| tcg_gen_add2_tl(t0, cpu_ca, arg2, zero, cpu_ca, zero); |
| tcg_gen_add2_tl(t0, cpu_ca, t0, cpu_ca, inv1, zero); |
| gen_op_arith_compute_ca32(ctx, t0, inv1, arg2, cpu_ca32, 0); |
| tcg_temp_free(zero); |
| tcg_temp_free(inv1); |
| } else { |
| tcg_gen_setcond_tl(TCG_COND_GEU, cpu_ca, arg2, arg1); |
| tcg_gen_sub_tl(t0, arg2, arg1); |
| gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1); |
| } |
| } else if (add_ca) { |
| /* |
| * Since we're ignoring carry-out, we can simplify the |
| * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. |
| */ |
| tcg_gen_sub_tl(t0, arg2, arg1); |
| tcg_gen_add_tl(t0, t0, cpu_ca); |
| tcg_gen_subi_tl(t0, t0, 1); |
| } else { |
| tcg_gen_sub_tl(t0, arg2, arg1); |
| } |
| |
| if (compute_ov) { |
| gen_op_arith_compute_ov(ctx, t0, arg1, arg2, 1); |
| } |
| if (unlikely(compute_rc0)) { |
| gen_set_Rc0(ctx, t0); |
| } |
| |
| if (t0 != ret) { |
| tcg_gen_mov_tl(ret, t0); |
| tcg_temp_free(t0); |
| } |
| } |
| /* Sub functions with Two operands functions */ |
| #define GEN_INT_ARITH_SUBF(name, opc3, add_ca, compute_ca, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ |
| add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
| } |
| /* Sub functions with one operand and one immediate */ |
| #define GEN_INT_ARITH_SUBF_CONST(name, opc3, const_val, \ |
| add_ca, compute_ca, compute_ov) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| TCGv t0 = tcg_const_tl(const_val); \ |
| gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], \ |
| cpu_gpr[rA(ctx->opcode)], t0, \ |
| add_ca, compute_ca, compute_ov, Rc(ctx->opcode)); \ |
| tcg_temp_free(t0); \ |
| } |
| /* subf subf. subfo subfo. */ |
| GEN_INT_ARITH_SUBF(subf, 0x01, 0, 0, 0) |
| GEN_INT_ARITH_SUBF(subfo, 0x11, 0, 0, 1) |
| /* subfc subfc. subfco subfco. */ |
| GEN_INT_ARITH_SUBF(subfc, 0x00, 0, 1, 0) |
| GEN_INT_ARITH_SUBF(subfco, 0x10, 0, 1, 1) |
| /* subfe subfe. subfeo subfo. */ |
| GEN_INT_ARITH_SUBF(subfe, 0x04, 1, 1, 0) |
| GEN_INT_ARITH_SUBF(subfeo, 0x14, 1, 1, 1) |
| /* subfme subfme. subfmeo subfmeo. */ |
| GEN_INT_ARITH_SUBF_CONST(subfme, 0x07, -1LL, 1, 1, 0) |
| GEN_INT_ARITH_SUBF_CONST(subfmeo, 0x17, -1LL, 1, 1, 1) |
| /* subfze subfze. subfzeo subfzeo.*/ |
| GEN_INT_ARITH_SUBF_CONST(subfze, 0x06, 0, 1, 1, 0) |
| GEN_INT_ARITH_SUBF_CONST(subfzeo, 0x16, 0, 1, 1, 1) |
| |
| /* subfic */ |
| static void gen_subfic(DisasContext *ctx) |
| { |
| TCGv c = tcg_const_tl(SIMM(ctx->opcode)); |
| gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| c, 0, 1, 0, 0); |
| tcg_temp_free(c); |
| } |
| |
| /* neg neg. nego nego. */ |
| static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) |
| { |
| TCGv zero = tcg_const_tl(0); |
| gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], |
| zero, 0, 0, compute_ov, Rc(ctx->opcode)); |
| tcg_temp_free(zero); |
| } |
| |
| static void gen_neg(DisasContext *ctx) |
| { |
| tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); |
| if (unlikely(Rc(ctx->opcode))) { |
| gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| |
| static void gen_nego(DisasContext *ctx) |
| { |
| gen_op_arith_neg(ctx, 1); |
| } |
| |
| /*** Integer logical ***/ |
| #define GEN_LOGICAL2(name, tcg_op, opc, type) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \ |
| cpu_gpr[rB(ctx->opcode)]); \ |
| if (unlikely(Rc(ctx->opcode) != 0)) \ |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ |
| } |
| |
| #define GEN_LOGICAL1(name, tcg_op, opc, type) \ |
| static void glue(gen_, name)(DisasContext *ctx) \ |
| { \ |
| tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \ |
| if (unlikely(Rc(ctx->opcode) != 0)) \ |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ |
| } |
| |
| /* and & and. */ |
| GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER); |
| /* andc & andc. */ |
| GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER); |
| |
| /* andi. */ |
| static void gen_andi_(DisasContext *ctx) |
| { |
| tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], |
| UIMM(ctx->opcode)); |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| |
| /* andis. */ |
| static void gen_andis_(DisasContext *ctx) |
| { |
| tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], |
| UIMM(ctx->opcode) << 16); |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| |
| /* cntlzw */ |
| static void gen_cntlzw(DisasContext *ctx) |
| { |
| TCGv_i32 t = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); |
| tcg_gen_clzi_i32(t, t, 32); |
| tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); |
| tcg_temp_free_i32(t); |
| |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| } |
| |
| /* cnttzw */ |
| static void gen_cnttzw(DisasContext *ctx) |
| { |
| TCGv_i32 t = tcg_temp_new_i32(); |
| |
| tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); |
| tcg_gen_ctzi_i32(t, t, 32); |
| tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); |
| tcg_temp_free_i32(t); |
| |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| } |
| |
| /* eqv & eqv. */ |
| GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER); |
| /* extsb & extsb. */ |
| GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER); |
| /* extsh & extsh. */ |
| GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER); |
| /* nand & nand. */ |
| GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER); |
| /* nor & nor. */ |
| GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER); |
| |
| #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) |
| static void gen_pause(DisasContext *ctx) |
| { |
| TCGv_i32 t0 = tcg_const_i32(0); |
| tcg_gen_st_i32(t0, cpu_env, |
| -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); |
| tcg_temp_free_i32(t0); |
| |
| /* Stop translation, this gives other CPUs a chance to run */ |
| gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); |
| } |
| #endif /* defined(TARGET_PPC64) */ |
| |
| /* or & or. */ |
| static void gen_or(DisasContext *ctx) |
| { |
| int rs, ra, rb; |
| |
| rs = rS(ctx->opcode); |
| ra = rA(ctx->opcode); |
| rb = rB(ctx->opcode); |
| /* Optimisation for mr. ri case */ |
| if (rs != ra || rs != rb) { |
| if (rs != rb) { |
| tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); |
| } else { |
| tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[ra]); |
| } |
| } else if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rs]); |
| #if defined(TARGET_PPC64) |
| } else if (rs != 0) { /* 0 is nop */ |
| int prio = 0; |
| |
| switch (rs) { |
| case 1: |
| /* Set process priority to low */ |
| prio = 2; |
| break; |
| case 6: |
| /* Set process priority to medium-low */ |
| prio = 3; |
| break; |
| case 2: |
| /* Set process priority to normal */ |
| prio = 4; |
| break; |
| #if !defined(CONFIG_USER_ONLY) |
| case 31: |
| if (!ctx->pr) { |
| /* Set process priority to very low */ |
| prio = 1; |
| } |
| break; |
| case 5: |
| if (!ctx->pr) { |
| /* Set process priority to medium-hight */ |
| prio = 5; |
| } |
| break; |
| case 3: |
| if (!ctx->pr) { |
| /* Set process priority to high */ |
| prio = 6; |
| } |
| break; |
| case 7: |
| if (ctx->hv && !ctx->pr) { |
| /* Set process priority to very high */ |
| prio = 7; |
| } |
| break; |
| #endif |
| default: |
| break; |
| } |
| if (prio) { |
| TCGv t0 = tcg_temp_new(); |
| gen_load_spr(t0, SPR_PPR); |
| tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL); |
| tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50); |
| gen_store_spr(SPR_PPR, t0); |
| tcg_temp_free(t0); |
| } |
| #if !defined(CONFIG_USER_ONLY) |
| /* |
| * Pause out of TCG otherwise spin loops with smt_low eat too |
| * much CPU and the kernel hangs. This applies to all |
| * encodings other than no-op, e.g., miso(rs=26), yield(27), |
| * mdoio(29), mdoom(30), and all currently undefined. |
| */ |
| gen_pause(ctx); |
| #endif |
| #endif |
| } |
| } |
| /* orc & orc. */ |
| GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER); |
| |
| /* xor & xor. */ |
| static void gen_xor(DisasContext *ctx) |
| { |
| /* Optimisation for "set to zero" case */ |
| if (rS(ctx->opcode) != rB(ctx->opcode)) { |
| tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], |
| cpu_gpr[rB(ctx->opcode)]); |
| } else { |
| tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| } |
| |
| /* ori */ |
| static void gen_ori(DisasContext *ctx) |
| { |
| target_ulong uimm = UIMM(ctx->opcode); |
| |
| if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
| return; |
| } |
| tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); |
| } |
| |
| /* oris */ |
| static void gen_oris(DisasContext *ctx) |
| { |
| target_ulong uimm = UIMM(ctx->opcode); |
| |
| if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
| /* NOP */ |
| return; |
| } |
| tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], |
| uimm << 16); |
| } |
| |
| /* xori */ |
| static void gen_xori(DisasContext *ctx) |
| { |
| target_ulong uimm = UIMM(ctx->opcode); |
| |
| if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
| /* NOP */ |
| return; |
| } |
| tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); |
| } |
| |
| /* xoris */ |
| static void gen_xoris(DisasContext *ctx) |
| { |
| target_ulong uimm = UIMM(ctx->opcode); |
| |
| if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { |
| /* NOP */ |
| return; |
| } |
| tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], |
| uimm << 16); |
| } |
| |
| /* popcntb : PowerPC 2.03 specification */ |
| static void gen_popcntb(DisasContext *ctx) |
| { |
| gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
| } |
| |
| static void gen_popcntw(DisasContext *ctx) |
| { |
| #if defined(TARGET_PPC64) |
| gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
| #else |
| tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
| #endif |
| } |
| |
| #if defined(TARGET_PPC64) |
| /* popcntd: PowerPC 2.06 specification */ |
| static void gen_popcntd(DisasContext *ctx) |
| { |
| tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); |
| } |
| #endif |
| |
| /* prtyw: PowerPC 2.05 specification */ |
| static void gen_prtyw(DisasContext *ctx) |
| { |
| TCGv ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv rs = cpu_gpr[rS(ctx->opcode)]; |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_shri_tl(t0, rs, 16); |
| tcg_gen_xor_tl(ra, rs, t0); |
| tcg_gen_shri_tl(t0, ra, 8); |
| tcg_gen_xor_tl(ra, ra, t0); |
| tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL); |
| tcg_temp_free(t0); |
| } |
| |
| #if defined(TARGET_PPC64) |
| /* prtyd: PowerPC 2.05 specification */ |
| static void gen_prtyd(DisasContext *ctx) |
| { |
| TCGv ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv rs = cpu_gpr[rS(ctx->opcode)]; |
| TCGv t0 = tcg_temp_new(); |
| tcg_gen_shri_tl(t0, rs, 32); |
| tcg_gen_xor_tl(ra, rs, t0); |
| tcg_gen_shri_tl(t0, ra, 16); |
| tcg_gen_xor_tl(ra, ra, t0); |
| tcg_gen_shri_tl(t0, ra, 8); |
| tcg_gen_xor_tl(ra, ra, t0); |
| tcg_gen_andi_tl(ra, ra, 1); |
| tcg_temp_free(t0); |
| } |
| #endif |
| |
| #if defined(TARGET_PPC64) |
| /* bpermd */ |
| static void gen_bpermd(DisasContext *ctx) |
| { |
| gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)], |
| cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); |
| } |
| #endif |
| |
| #if defined(TARGET_PPC64) |
| /* extsw & extsw. */ |
| GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B); |
| |
| /* cntlzd */ |
| static void gen_cntlzd(DisasContext *ctx) |
| { |
| tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| } |
| |
| /* cnttzd */ |
| static void gen_cnttzd(DisasContext *ctx) |
| { |
| tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); |
| } |
| } |
| |
| /* darn */ |
| static void gen_darn(DisasContext *ctx) |
| { |
| int l = L(ctx->opcode); |
| |
| if (l > 2) { |
| tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1); |
| } else { |
| gen_icount_io_start(ctx); |
| if (l == 0) { |
| gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]); |
| } else { |
| /* Return 64-bit random for both CRN and RRN */ |
| gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]); |
| } |
| } |
| } |
| #endif |
| |
| /*** Integer rotate ***/ |
| |
| /* rlwimi & rlwimi. */ |
| static void gen_rlwimi(DisasContext *ctx) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| uint32_t sh = SH(ctx->opcode); |
| uint32_t mb = MB(ctx->opcode); |
| uint32_t me = ME(ctx->opcode); |
| |
| if (sh == (31 - me) && mb <= me) { |
| tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); |
| } else { |
| target_ulong mask; |
| bool mask_in_32b = true; |
| TCGv t1; |
| |
| #if defined(TARGET_PPC64) |
| mb += 32; |
| me += 32; |
| #endif |
| mask = MASK(mb, me); |
| |
| #if defined(TARGET_PPC64) |
| if (mask > 0xffffffffu) { |
| mask_in_32b = false; |
| } |
| #endif |
| t1 = tcg_temp_new(); |
| if (mask_in_32b) { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| tcg_gen_trunc_tl_i32(t0, t_rs); |
| tcg_gen_rotli_i32(t0, t0, sh); |
| tcg_gen_extu_i32_tl(t1, t0); |
| tcg_temp_free_i32(t0); |
| } else { |
| #if defined(TARGET_PPC64) |
| tcg_gen_deposit_i64(t1, t_rs, t_rs, 32, 32); |
| tcg_gen_rotli_i64(t1, t1, sh); |
| #else |
| g_assert_not_reached(); |
| #endif |
| } |
| |
| tcg_gen_andi_tl(t1, t1, mask); |
| tcg_gen_andi_tl(t_ra, t_ra, ~mask); |
| tcg_gen_or_tl(t_ra, t_ra, t1); |
| tcg_temp_free(t1); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| /* rlwinm & rlwinm. */ |
| static void gen_rlwinm(DisasContext *ctx) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| int sh = SH(ctx->opcode); |
| int mb = MB(ctx->opcode); |
| int me = ME(ctx->opcode); |
| int len = me - mb + 1; |
| int rsh = (32 - sh) & 31; |
| |
| if (sh != 0 && len > 0 && me == (31 - sh)) { |
| tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len); |
| } else if (me == 31 && rsh + len <= 32) { |
| tcg_gen_extract_tl(t_ra, t_rs, rsh, len); |
| } else { |
| target_ulong mask; |
| bool mask_in_32b = true; |
| #if defined(TARGET_PPC64) |
| mb += 32; |
| me += 32; |
| #endif |
| mask = MASK(mb, me); |
| #if defined(TARGET_PPC64) |
| if (mask > 0xffffffffu) { |
| mask_in_32b = false; |
| } |
| #endif |
| if (mask_in_32b) { |
| if (sh == 0) { |
| tcg_gen_andi_tl(t_ra, t_rs, mask); |
| } else { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| tcg_gen_trunc_tl_i32(t0, t_rs); |
| tcg_gen_rotli_i32(t0, t0, sh); |
| tcg_gen_andi_i32(t0, t0, mask); |
| tcg_gen_extu_i32_tl(t_ra, t0); |
| tcg_temp_free_i32(t0); |
| } |
| } else { |
| #if defined(TARGET_PPC64) |
| tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); |
| tcg_gen_rotli_i64(t_ra, t_ra, sh); |
| tcg_gen_andi_i64(t_ra, t_ra, mask); |
| #else |
| g_assert_not_reached(); |
| #endif |
| } |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| /* rlwnm & rlwnm. */ |
| static void gen_rlwnm(DisasContext *ctx) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; |
| uint32_t mb = MB(ctx->opcode); |
| uint32_t me = ME(ctx->opcode); |
| target_ulong mask; |
| bool mask_in_32b = true; |
| |
| #if defined(TARGET_PPC64) |
| mb += 32; |
| me += 32; |
| #endif |
| mask = MASK(mb, me); |
| |
| #if defined(TARGET_PPC64) |
| if (mask > 0xffffffffu) { |
| mask_in_32b = false; |
| } |
| #endif |
| if (mask_in_32b) { |
| TCGv_i32 t0 = tcg_temp_new_i32(); |
| TCGv_i32 t1 = tcg_temp_new_i32(); |
| tcg_gen_trunc_tl_i32(t0, t_rb); |
| tcg_gen_trunc_tl_i32(t1, t_rs); |
| tcg_gen_andi_i32(t0, t0, 0x1f); |
| tcg_gen_rotl_i32(t1, t1, t0); |
| tcg_gen_extu_i32_tl(t_ra, t1); |
| tcg_temp_free_i32(t0); |
| tcg_temp_free_i32(t1); |
| } else { |
| #if defined(TARGET_PPC64) |
| TCGv_i64 t0 = tcg_temp_new_i64(); |
| tcg_gen_andi_i64(t0, t_rb, 0x1f); |
| tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32); |
| tcg_gen_rotl_i64(t_ra, t_ra, t0); |
| tcg_temp_free_i64(t0); |
| #else |
| g_assert_not_reached(); |
| #endif |
| } |
| |
| tcg_gen_andi_tl(t_ra, t_ra, mask); |
| |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| #if defined(TARGET_PPC64) |
| #define GEN_PPC64_R2(name, opc1, opc2) \ |
| static void glue(gen_, name##0)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 0); \ |
| } \ |
| \ |
| static void glue(gen_, name##1)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 1); \ |
| } |
| #define GEN_PPC64_R4(name, opc1, opc2) \ |
| static void glue(gen_, name##0)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 0, 0); \ |
| } \ |
| \ |
| static void glue(gen_, name##1)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 0, 1); \ |
| } \ |
| \ |
| static void glue(gen_, name##2)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 1, 0); \ |
| } \ |
| \ |
| static void glue(gen_, name##3)(DisasContext *ctx) \ |
| { \ |
| gen_##name(ctx, 1, 1); \ |
| } |
| |
| static void gen_rldinm(DisasContext *ctx, int mb, int me, int sh) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| int len = me - mb + 1; |
| int rsh = (64 - sh) & 63; |
| |
| if (sh != 0 && len > 0 && me == (63 - sh)) { |
| tcg_gen_deposit_z_tl(t_ra, t_rs, sh, len); |
| } else if (me == 63 && rsh + len <= 64) { |
| tcg_gen_extract_tl(t_ra, t_rs, rsh, len); |
| } else { |
| tcg_gen_rotli_tl(t_ra, t_rs, sh); |
| tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| /* rldicl - rldicl. */ |
| static inline void gen_rldicl(DisasContext *ctx, int mbn, int shn) |
| { |
| uint32_t sh, mb; |
| |
| sh = SH(ctx->opcode) | (shn << 5); |
| mb = MB(ctx->opcode) | (mbn << 5); |
| gen_rldinm(ctx, mb, 63, sh); |
| } |
| GEN_PPC64_R4(rldicl, 0x1E, 0x00); |
| |
| /* rldicr - rldicr. */ |
| static inline void gen_rldicr(DisasContext *ctx, int men, int shn) |
| { |
| uint32_t sh, me; |
| |
| sh = SH(ctx->opcode) | (shn << 5); |
| me = MB(ctx->opcode) | (men << 5); |
| gen_rldinm(ctx, 0, me, sh); |
| } |
| GEN_PPC64_R4(rldicr, 0x1E, 0x02); |
| |
| /* rldic - rldic. */ |
| static inline void gen_rldic(DisasContext *ctx, int mbn, int shn) |
| { |
| uint32_t sh, mb; |
| |
| sh = SH(ctx->opcode) | (shn << 5); |
| mb = MB(ctx->opcode) | (mbn << 5); |
| gen_rldinm(ctx, mb, 63 - sh, sh); |
| } |
| GEN_PPC64_R4(rldic, 0x1E, 0x04); |
| |
| static void gen_rldnm(DisasContext *ctx, int mb, int me) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| TCGv t_rb = cpu_gpr[rB(ctx->opcode)]; |
| TCGv t0; |
| |
| t0 = tcg_temp_new(); |
| tcg_gen_andi_tl(t0, t_rb, 0x3f); |
| tcg_gen_rotl_tl(t_ra, t_rs, t0); |
| tcg_temp_free(t0); |
| |
| tcg_gen_andi_tl(t_ra, t_ra, MASK(mb, me)); |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| } |
| } |
| |
| /* rldcl - rldcl. */ |
| static inline void gen_rldcl(DisasContext *ctx, int mbn) |
| { |
| uint32_t mb; |
| |
| mb = MB(ctx->opcode) | (mbn << 5); |
| gen_rldnm(ctx, mb, 63); |
| } |
| GEN_PPC64_R2(rldcl, 0x1E, 0x08); |
| |
| /* rldcr - rldcr. */ |
| static inline void gen_rldcr(DisasContext *ctx, int men) |
| { |
| uint32_t me; |
| |
| me = MB(ctx->opcode) | (men << 5); |
| gen_rldnm(ctx, 0, me); |
| } |
| GEN_PPC64_R2(rldcr, 0x1E, 0x09); |
| |
| /* rldimi - rldimi. */ |
| static void gen_rldimi(DisasContext *ctx, int mbn, int shn) |
| { |
| TCGv t_ra = cpu_gpr[rA(ctx->opcode)]; |
| TCGv t_rs = cpu_gpr[rS(ctx->opcode)]; |
| uint32_t sh = SH(ctx->opcode) | (shn << 5); |
| uint32_t mb = MB(ctx->opcode) | (mbn << 5); |
| uint32_t me = 63 - sh; |
| |
| if (mb <= me) { |
| tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); |
| } else { |
| target_ulong mask = MASK(mb, me); |
| TCGv t1 = tcg_temp_new(); |
| |
| tcg_gen_rotli_tl(t1, t_rs, sh); |
| tcg_gen_andi_tl(t1, t1, mask); |
| tcg_gen_andi_tl(t_ra, t_ra, ~mask); |
| tcg_gen_or_tl(t_ra, t_ra, t1); |
| tcg_temp_free(t1); |
| } |
| if (unlikely(Rc(ctx->opcode) != 0)) { |
| gen_set_Rc0(ctx, t_ra); |
| |