qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

target/ppc: convert to DisasContextBase

A couple of notes:

- removed ctx->nip in favour of base->pc_next. Yes, it is annoying,
but didn't want to waste its 4 bytes.

- ctx->singlestep_enabled does a lot more than
base.singlestep_enabled; this confused me at first.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>

authored by

Emilio G. Cota and committed by
David Gibson
b6bac4bc 5d0fb150

+91 -86
+67 -62
target/ppc/translate.c
··· 31 31 #include "exec/helper-gen.h" 32 32 33 33 #include "trace-tcg.h" 34 + #include "exec/translator.h" 34 35 #include "exec/log.h" 35 36 36 37 ··· 187 188 188 189 /* internal defines */ 189 190 struct DisasContext { 190 - struct TranslationBlock *tb; 191 - target_ulong nip; 191 + DisasContextBase base; 192 192 uint32_t opcode; 193 193 uint32_t exception; 194 194 /* Routine used to access memory */ ··· 275 275 * the faulting instruction 276 276 */ 277 277 if (ctx->exception == POWERPC_EXCP_NONE) { 278 - gen_update_nip(ctx, ctx->nip - 4); 278 + gen_update_nip(ctx, ctx->base.pc_next - 4); 279 279 } 280 280 t0 = tcg_const_i32(excp); 281 281 t1 = tcg_const_i32(error); ··· 293 293 * the faulting instruction 294 294 */ 295 295 if (ctx->exception == POWERPC_EXCP_NONE) { 296 - gen_update_nip(ctx, ctx->nip - 4); 296 + gen_update_nip(ctx, ctx->base.pc_next - 4); 297 297 } 298 298 t0 = tcg_const_i32(excp); 299 299 gen_helper_raise_exception(cpu_env, t0); ··· 322 322 */ 323 323 if ((ctx->exception != POWERPC_EXCP_BRANCH) && 324 324 (ctx->exception != POWERPC_EXCP_SYNC)) { 325 - gen_update_nip(ctx, ctx->nip); 325 + gen_update_nip(ctx, ctx->base.pc_next); 326 326 } 327 327 t0 = tcg_const_i32(EXCP_DEBUG); 328 328 gen_helper_raise_exception(cpu_env, t0); ··· 349 349 /* Stop translation */ 350 350 static inline void gen_stop_exception(DisasContext *ctx) 351 351 { 352 - gen_update_nip(ctx, ctx->nip); 352 + gen_update_nip(ctx, ctx->base.pc_next); 353 353 ctx->exception = POWERPC_EXCP_STOP; 354 354 } 355 355 ··· 978 978 { 979 979 target_long d = DX(ctx->opcode); 980 980 981 - tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], ctx->nip + (d << 16)); 981 + tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], ctx->base.pc_next + (d << 16)); 982 982 } 983 983 984 984 static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, ··· 1580 1580 tcg_temp_free_i32(t0); 1581 1581 1582 1582 /* Stop translation, this gives other CPUs a chance to run */ 1583 - gen_exception_nip(ctx, EXCP_HLT, ctx->nip); 1583 + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 1584 1584 } 1585 1585 #endif /* defined(TARGET_PPC64) */ 1586 1586 ··· 2397 2397 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, l1); 2398 2398 t1 = tcg_const_i32(POWERPC_EXCP_ALIGN); 2399 2399 t2 = tcg_const_i32(ctx->opcode & 0x03FF0000); 2400 - gen_update_nip(ctx, ctx->nip - 4); 2400 + gen_update_nip(ctx, ctx->base.pc_next - 4); 2401 2401 gen_helper_raise_exception_err(cpu_env, t1, t2); 2402 2402 tcg_temp_free_i32(t1); 2403 2403 tcg_temp_free_i32(t2); ··· 3322 3322 -offsetof(PowerPCCPU, env) + offsetof(CPUState, halted)); 3323 3323 tcg_temp_free_i32(t0); 3324 3324 /* Stop translation, as the CPU is supposed to sleep from now */ 3325 - gen_exception_nip(ctx, EXCP_HLT, ctx->nip); 3325 + gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); 3326 3326 } 3327 3327 3328 3328 #if defined(TARGET_PPC64) ··· 3407 3407 } 3408 3408 3409 3409 #ifndef CONFIG_USER_ONLY 3410 - return (ctx->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); 3410 + return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); 3411 3411 #else 3412 3412 return true; 3413 3413 #endif ··· 3422 3422 if (use_goto_tb(ctx, dest)) { 3423 3423 tcg_gen_goto_tb(n); 3424 3424 tcg_gen_movi_tl(cpu_nip, dest & ~3); 3425 - tcg_gen_exit_tb((uintptr_t)ctx->tb + n); 3425 + tcg_gen_exit_tb((uintptr_t)ctx->base.tb + n); 3426 3426 } else { 3427 3427 tcg_gen_movi_tl(cpu_nip, dest & ~3); 3428 3428 if (unlikely(ctx->singlestep_enabled)) { ··· 3458 3458 li = LI(ctx->opcode); 3459 3459 li = (li ^ 0x02000000) - 0x02000000; 3460 3460 if (likely(AA(ctx->opcode) == 0)) { 3461 - target = ctx->nip + li - 4; 3461 + target = ctx->base.pc_next + li - 4; 3462 3462 } else { 3463 3463 target = li; 3464 3464 } 3465 3465 if (LK(ctx->opcode)) { 3466 - gen_setlr(ctx, ctx->nip); 3466 + gen_setlr(ctx, ctx->base.pc_next); 3467 3467 } 3468 - gen_update_cfar(ctx, ctx->nip - 4); 3468 + gen_update_cfar(ctx, ctx->base.pc_next - 4); 3469 3469 gen_goto_tb(ctx, 0, target); 3470 3470 } 3471 3471 ··· 3493 3493 target = NULL; 3494 3494 } 3495 3495 if (LK(ctx->opcode)) 3496 - gen_setlr(ctx, ctx->nip); 3496 + gen_setlr(ctx, ctx->base.pc_next); 3497 3497 l1 = gen_new_label(); 3498 3498 if ((bo & 0x4) == 0) { 3499 3499 /* Decrement and test CTR */ ··· 3530 3530 } 3531 3531 tcg_temp_free_i32(temp); 3532 3532 } 3533 - gen_update_cfar(ctx, ctx->nip - 4); 3533 + gen_update_cfar(ctx, ctx->base.pc_next - 4); 3534 3534 if (type == BCOND_IM) { 3535 3535 target_ulong li = (target_long)((int16_t)(BD(ctx->opcode))); 3536 3536 if (likely(AA(ctx->opcode) == 0)) { 3537 - gen_goto_tb(ctx, 0, ctx->nip + li - 4); 3537 + gen_goto_tb(ctx, 0, ctx->base.pc_next + li - 4); 3538 3538 } else { 3539 3539 gen_goto_tb(ctx, 0, li); 3540 3540 } ··· 3549 3549 } 3550 3550 if ((bo & 0x14) != 0x14) { 3551 3551 gen_set_label(l1); 3552 - gen_goto_tb(ctx, 1, ctx->nip); 3552 + gen_goto_tb(ctx, 1, ctx->base.pc_next); 3553 3553 } 3554 3554 } 3555 3555 ··· 3645 3645 } 3646 3646 /* Restore CPU state */ 3647 3647 CHK_SV; 3648 - gen_update_cfar(ctx, ctx->nip - 4); 3648 + gen_update_cfar(ctx, ctx->base.pc_next - 4); 3649 3649 gen_helper_rfi(cpu_env); 3650 3650 gen_sync_exception(ctx); 3651 3651 #endif ··· 3659 3659 #else 3660 3660 /* Restore CPU state */ 3661 3661 CHK_SV; 3662 - gen_update_cfar(ctx, ctx->nip - 4); 3662 + gen_update_cfar(ctx, ctx->base.pc_next - 4); 3663 3663 gen_helper_rfid(cpu_env); 3664 3664 gen_sync_exception(ctx); 3665 3665 #endif ··· 3934 3934 */ 3935 3935 if (sprn != SPR_PVR) { 3936 3936 fprintf(stderr, "Trying to read privileged spr %d (0x%03x) at " 3937 - TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); 3937 + TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); 3938 3938 if (qemu_log_separate()) { 3939 3939 qemu_log("Trying to read privileged spr %d (0x%03x) at " 3940 - TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); 3940 + TARGET_FMT_lx "\n", sprn, sprn, 3941 + ctx->base.pc_next - 4); 3941 3942 } 3942 3943 } 3943 3944 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); ··· 3951 3952 } 3952 3953 /* Not defined */ 3953 3954 fprintf(stderr, "Trying to read invalid spr %d (0x%03x) at " 3954 - TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); 3955 + TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); 3955 3956 if (qemu_log_separate()) { 3956 3957 qemu_log("Trying to read invalid spr %d (0x%03x) at " 3957 - TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); 3958 + TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); 3958 3959 } 3959 3960 3960 3961 /* The behaviour depends on MSR:PR and SPR# bit 0x10, ··· 4030 4031 * if we enter power saving mode, we will exit the loop 4031 4032 * directly from ppc_store_msr 4032 4033 */ 4033 - gen_update_nip(ctx, ctx->nip); 4034 + gen_update_nip(ctx, ctx->base.pc_next); 4034 4035 gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]); 4035 4036 /* Must stop the translation as machine state (may have) changed */ 4036 4037 /* Note that mtmsr is not always defined as context-synchronizing */ ··· 4059 4060 * if we enter power saving mode, we will exit the loop 4060 4061 * directly from ppc_store_msr 4061 4062 */ 4062 - gen_update_nip(ctx, ctx->nip); 4063 + gen_update_nip(ctx, ctx->base.pc_next); 4063 4064 #if defined(TARGET_PPC64) 4064 4065 tcg_gen_deposit_tl(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32); 4065 4066 #else ··· 4097 4098 } else { 4098 4099 /* Privilege exception */ 4099 4100 fprintf(stderr, "Trying to write privileged spr %d (0x%03x) at " 4100 - TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); 4101 + TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); 4101 4102 if (qemu_log_separate()) { 4102 4103 qemu_log("Trying to write privileged spr %d (0x%03x) at " 4103 - TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); 4104 + TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); 4104 4105 } 4105 4106 gen_priv_exception(ctx, POWERPC_EXCP_PRIV_REG); 4106 4107 } ··· 4115 4116 /* Not defined */ 4116 4117 if (qemu_log_separate()) { 4117 4118 qemu_log("Trying to write invalid spr %d (0x%03x) at " 4118 - TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); 4119 + TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); 4119 4120 } 4120 4121 fprintf(stderr, "Trying to write invalid spr %d (0x%03x) at " 4121 - TARGET_FMT_lx "\n", sprn, sprn, ctx->nip - 4); 4122 + TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); 4122 4123 4123 4124 4124 4125 /* The behaviour depends on MSR:PR and SPR# bit 0x10, ··· 7212 7213 CPUPPCState *env = cs->env_ptr; 7213 7214 DisasContext ctx, *ctxp = &ctx; 7214 7215 opc_handler_t **table, *handler; 7215 - target_ulong pc_start; 7216 - int num_insns; 7217 7216 int max_insns; 7218 7217 7219 - pc_start = tb->pc; 7220 - ctx.nip = pc_start; 7221 - ctx.tb = tb; 7218 + ctx.base.singlestep_enabled = cs->singlestep_enabled; 7219 + ctx.base.tb = tb; 7220 + ctx.base.pc_first = tb->pc; 7221 + ctx.base.pc_next = tb->pc; /* nip */ 7222 + ctx.base.num_insns = 0; 7223 + 7222 7224 ctx.exception = POWERPC_EXCP_NONE; 7223 7225 ctx.spr_cb = env->spr_cb; 7224 7226 ctx.pr = msr_pr; ··· 7270 7272 ctx.singlestep_enabled = 0; 7271 7273 if ((env->flags & POWERPC_FLAG_BE) && msr_be) 7272 7274 ctx.singlestep_enabled |= CPU_BRANCH_STEP; 7273 - if (unlikely(cs->singlestep_enabled)) { 7275 + if (unlikely(ctx.base.singlestep_enabled)) { 7274 7276 ctx.singlestep_enabled |= GDBSTUB_SINGLE_STEP; 7275 7277 } 7276 7278 #if defined (DO_SINGLE_STEP) && 0 7277 7279 /* Single step trace mode */ 7278 7280 msr_se = 1; 7279 7281 #endif 7280 - num_insns = 0; 7282 + ctx.base.num_insns = 0; 7281 7283 max_insns = tb_cflags(tb) & CF_COUNT_MASK; 7282 7284 if (max_insns == 0) { 7283 7285 max_insns = CF_COUNT_MASK; ··· 7290 7292 tcg_clear_temp_count(); 7291 7293 /* Set env in case of segfault during code fetch */ 7292 7294 while (ctx.exception == POWERPC_EXCP_NONE && !tcg_op_buf_full()) { 7293 - tcg_gen_insn_start(ctx.nip); 7294 - num_insns++; 7295 + tcg_gen_insn_start(ctx.base.pc_next); 7296 + ctx.base.num_insns++; 7295 7297 7296 - if (unlikely(cpu_breakpoint_test(cs, ctx.nip, BP_ANY))) { 7298 + if (unlikely(cpu_breakpoint_test(cs, ctx.base.pc_next, BP_ANY))) { 7297 7299 gen_debug_exception(ctxp); 7298 7300 /* The address covered by the breakpoint must be included in 7299 7301 [tb->pc, tb->pc + tb->size) in order to for it to be 7300 7302 properly cleared -- thus we increment the PC here so that 7301 7303 the logic setting tb->size below does the right thing. */ 7302 - ctx.nip += 4; 7304 + ctx.base.pc_next += 4; 7303 7305 break; 7304 7306 } 7305 7307 7306 7308 LOG_DISAS("----------------\n"); 7307 7309 LOG_DISAS("nip=" TARGET_FMT_lx " super=%d ir=%d\n", 7308 - ctx.nip, ctx.mem_idx, (int)msr_ir); 7309 - if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) 7310 + ctx.base.pc_next, ctx.mem_idx, (int)msr_ir); 7311 + if (ctx.base.num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) { 7310 7312 gen_io_start(); 7313 + } 7311 7314 if (unlikely(need_byteswap(&ctx))) { 7312 - ctx.opcode = bswap32(cpu_ldl_code(env, ctx.nip)); 7315 + ctx.opcode = bswap32(cpu_ldl_code(env, ctx.base.pc_next)); 7313 7316 } else { 7314 - ctx.opcode = cpu_ldl_code(env, ctx.nip); 7317 + ctx.opcode = cpu_ldl_code(env, ctx.base.pc_next); 7315 7318 } 7316 7319 LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", 7317 7320 ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode), 7318 7321 opc3(ctx.opcode), opc4(ctx.opcode), 7319 7322 ctx.le_mode ? "little" : "big"); 7320 - ctx.nip += 4; 7323 + ctx.base.pc_next += 4; 7321 7324 table = env->opcodes; 7322 7325 handler = table[opc1(ctx.opcode)]; 7323 7326 if (is_indirect_opcode(handler)) { ··· 7339 7342 TARGET_FMT_lx " %d\n", 7340 7343 opc1(ctx.opcode), opc2(ctx.opcode), 7341 7344 opc3(ctx.opcode), opc4(ctx.opcode), 7342 - ctx.opcode, ctx.nip - 4, (int)msr_ir); 7345 + ctx.opcode, ctx.base.pc_next - 4, (int)msr_ir); 7343 7346 } else { 7344 7347 uint32_t inval; 7345 7348 ··· 7355 7358 TARGET_FMT_lx "\n", ctx.opcode & inval, 7356 7359 opc1(ctx.opcode), opc2(ctx.opcode), 7357 7360 opc3(ctx.opcode), opc4(ctx.opcode), 7358 - ctx.opcode, ctx.nip - 4); 7361 + ctx.opcode, ctx.base.pc_next - 4); 7359 7362 gen_inval_exception(ctxp, POWERPC_EXCP_INVAL_INVAL); 7360 7363 break; 7361 7364 } ··· 7366 7369 #endif 7367 7370 /* Check trace mode exceptions */ 7368 7371 if (unlikely(ctx.singlestep_enabled & CPU_SINGLE_STEP && 7369 - (ctx.nip <= 0x100 || ctx.nip > 0xF00) && 7372 + (ctx.base.pc_next <= 0x100 || ctx.base.pc_next > 0xF00) && 7370 7373 ctx.exception != POWERPC_SYSCALL && 7371 7374 ctx.exception != POWERPC_EXCP_TRAP && 7372 7375 ctx.exception != POWERPC_EXCP_BRANCH)) { 7373 - gen_exception_nip(ctxp, POWERPC_EXCP_TRACE, ctx.nip); 7374 - } else if (unlikely(((ctx.nip & (TARGET_PAGE_SIZE - 1)) == 0) || 7375 - (cs->singlestep_enabled) || 7376 + gen_exception_nip(ctxp, POWERPC_EXCP_TRACE, ctx.base.pc_next); 7377 + } else if (unlikely(((ctx.base.pc_next & (TARGET_PAGE_SIZE - 1)) 7378 + == 0) || 7379 + (ctx.base.singlestep_enabled) || 7376 7380 singlestep || 7377 - num_insns >= max_insns)) { 7381 + ctx.base.num_insns >= max_insns)) { 7378 7382 /* if we reach a page boundary or are single stepping, stop 7379 7383 * generation 7380 7384 */ ··· 7390 7394 if (tb_cflags(tb) & CF_LAST_IO) 7391 7395 gen_io_end(); 7392 7396 if (ctx.exception == POWERPC_EXCP_NONE) { 7393 - gen_goto_tb(&ctx, 0, ctx.nip); 7397 + gen_goto_tb(&ctx, 0, ctx.base.pc_next); 7394 7398 } else if (ctx.exception != POWERPC_EXCP_BRANCH) { 7395 - if (unlikely(cs->singlestep_enabled)) { 7399 + if (unlikely(ctx.base.singlestep_enabled)) { 7396 7400 gen_debug_exception(ctxp); 7397 7401 } 7398 7402 /* Generate the return instruction */ 7399 7403 tcg_gen_exit_tb(0); 7400 7404 } 7401 - gen_tb_end(tb, num_insns); 7405 + gen_tb_end(tb, ctx.base.num_insns); 7402 7406 7403 - tb->size = ctx.nip - pc_start; 7404 - tb->icount = num_insns; 7407 + tb->size = ctx.base.pc_next - ctx.base.pc_first; 7408 + tb->icount = ctx.base.num_insns; 7405 7409 7406 7410 #if defined(DEBUG_DISAS) 7407 7411 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM) 7408 - && qemu_log_in_addr_range(pc_start)) { 7412 + && qemu_log_in_addr_range(ctx.base.pc_first)) { 7409 7413 qemu_log_lock(); 7410 - qemu_log("IN: %s\n", lookup_symbol(pc_start)); 7411 - log_target_disas(cs, pc_start, ctx.nip - pc_start); 7414 + qemu_log("IN: %s\n", lookup_symbol(ctx.base.pc_first)); 7415 + log_target_disas(cs, ctx.base.pc_first, 7416 + ctx.base.pc_next - ctx.base.pc_first); 7412 7417 qemu_log("\n"); 7413 7418 qemu_log_unlock(); 7414 7419 }
+8 -8
target/ppc/translate/dfp-impl.inc.c
··· 15 15 gen_exception(ctx, POWERPC_EXCP_FPU); \ 16 16 return; \ 17 17 } \ 18 - gen_update_nip(ctx, ctx->nip - 4); \ 18 + gen_update_nip(ctx, ctx->base.pc_next - 4); \ 19 19 rd = gen_fprp_ptr(rD(ctx->opcode)); \ 20 20 ra = gen_fprp_ptr(rA(ctx->opcode)); \ 21 21 rb = gen_fprp_ptr(rB(ctx->opcode)); \ ··· 36 36 gen_exception(ctx, POWERPC_EXCP_FPU); \ 37 37 return; \ 38 38 } \ 39 - gen_update_nip(ctx, ctx->nip - 4); \ 39 + gen_update_nip(ctx, ctx->base.pc_next - 4); \ 40 40 ra = gen_fprp_ptr(rA(ctx->opcode)); \ 41 41 rb = gen_fprp_ptr(rB(ctx->opcode)); \ 42 42 gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ ··· 54 54 gen_exception(ctx, POWERPC_EXCP_FPU); \ 55 55 return; \ 56 56 } \ 57 - gen_update_nip(ctx, ctx->nip - 4); \ 57 + gen_update_nip(ctx, ctx->base.pc_next - 4); \ 58 58 uim = tcg_const_i32(UIMM5(ctx->opcode)); \ 59 59 rb = gen_fprp_ptr(rB(ctx->opcode)); \ 60 60 gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ ··· 72 72 gen_exception(ctx, POWERPC_EXCP_FPU); \ 73 73 return; \ 74 74 } \ 75 - gen_update_nip(ctx, ctx->nip - 4); \ 75 + gen_update_nip(ctx, ctx->base.pc_next - 4); \ 76 76 ra = gen_fprp_ptr(rA(ctx->opcode)); \ 77 77 dcm = tcg_const_i32(DCM(ctx->opcode)); \ 78 78 gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ ··· 90 90 gen_exception(ctx, POWERPC_EXCP_FPU); \ 91 91 return; \ 92 92 } \ 93 - gen_update_nip(ctx, ctx->nip - 4); \ 93 + gen_update_nip(ctx, ctx->base.pc_next - 4); \ 94 94 rt = gen_fprp_ptr(rD(ctx->opcode)); \ 95 95 rb = gen_fprp_ptr(rB(ctx->opcode)); \ 96 96 u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \ ··· 114 114 gen_exception(ctx, POWERPC_EXCP_FPU); \ 115 115 return; \ 116 116 } \ 117 - gen_update_nip(ctx, ctx->nip - 4); \ 117 + gen_update_nip(ctx, ctx->base.pc_next - 4); \ 118 118 rt = gen_fprp_ptr(rD(ctx->opcode)); \ 119 119 ra = gen_fprp_ptr(rA(ctx->opcode)); \ 120 120 rb = gen_fprp_ptr(rB(ctx->opcode)); \ ··· 137 137 gen_exception(ctx, POWERPC_EXCP_FPU); \ 138 138 return; \ 139 139 } \ 140 - gen_update_nip(ctx, ctx->nip - 4); \ 140 + gen_update_nip(ctx, ctx->base.pc_next - 4); \ 141 141 rt = gen_fprp_ptr(rD(ctx->opcode)); \ 142 142 rb = gen_fprp_ptr(rB(ctx->opcode)); \ 143 143 gen_helper_##name(cpu_env, rt, rb); \ ··· 157 157 gen_exception(ctx, POWERPC_EXCP_FPU); \ 158 158 return; \ 159 159 } \ 160 - gen_update_nip(ctx, ctx->nip - 4); \ 160 + gen_update_nip(ctx, ctx->base.pc_next - 4); \ 161 161 rt = gen_fprp_ptr(rD(ctx->opcode)); \ 162 162 rs = gen_fprp_ptr(fprfld(ctx->opcode)); \ 163 163 i32 = tcg_const_i32(i32fld(ctx->opcode)); \
+16 -16
target/ppc/translate_init.c
··· 179 179 #if !defined(CONFIG_USER_ONLY) 180 180 static void spr_read_decr(DisasContext *ctx, int gprn, int sprn) 181 181 { 182 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 182 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 183 183 gen_io_start(); 184 184 } 185 185 gen_helper_load_decr(cpu_gpr[gprn], cpu_env); 186 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 186 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 187 187 gen_io_end(); 188 188 gen_stop_exception(ctx); 189 189 } ··· 191 191 192 192 static void spr_write_decr(DisasContext *ctx, int sprn, int gprn) 193 193 { 194 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 194 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 195 195 gen_io_start(); 196 196 } 197 197 gen_helper_store_decr(cpu_env, cpu_gpr[gprn]); 198 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 198 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 199 199 gen_io_end(); 200 200 gen_stop_exception(ctx); 201 201 } ··· 206 206 /* Time base */ 207 207 static void spr_read_tbl(DisasContext *ctx, int gprn, int sprn) 208 208 { 209 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 209 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 210 210 gen_io_start(); 211 211 } 212 212 gen_helper_load_tbl(cpu_gpr[gprn], cpu_env); 213 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 213 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 214 214 gen_io_end(); 215 215 gen_stop_exception(ctx); 216 216 } ··· 218 218 219 219 static void spr_read_tbu(DisasContext *ctx, int gprn, int sprn) 220 220 { 221 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 221 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 222 222 gen_io_start(); 223 223 } 224 224 gen_helper_load_tbu(cpu_gpr[gprn], cpu_env); 225 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 225 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 226 226 gen_io_end(); 227 227 gen_stop_exception(ctx); 228 228 } ··· 243 243 #if !defined(CONFIG_USER_ONLY) 244 244 static void spr_write_tbl(DisasContext *ctx, int sprn, int gprn) 245 245 { 246 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 246 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 247 247 gen_io_start(); 248 248 } 249 249 gen_helper_store_tbl(cpu_env, cpu_gpr[gprn]); 250 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 250 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 251 251 gen_io_end(); 252 252 gen_stop_exception(ctx); 253 253 } ··· 255 255 256 256 static void spr_write_tbu(DisasContext *ctx, int sprn, int gprn) 257 257 { 258 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 258 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 259 259 gen_io_start(); 260 260 } 261 261 gen_helper_store_tbu(cpu_env, cpu_gpr[gprn]); 262 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 262 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 263 263 gen_io_end(); 264 264 gen_stop_exception(ctx); 265 265 } ··· 287 287 /* HDECR */ 288 288 static void spr_read_hdecr(DisasContext *ctx, int gprn, int sprn) 289 289 { 290 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 290 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 291 291 gen_io_start(); 292 292 } 293 293 gen_helper_load_hdecr(cpu_gpr[gprn], cpu_env); 294 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 294 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 295 295 gen_io_end(); 296 296 gen_stop_exception(ctx); 297 297 } ··· 299 299 300 300 static void spr_write_hdecr(DisasContext *ctx, int sprn, int gprn) 301 301 { 302 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 302 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 303 303 gen_io_start(); 304 304 } 305 305 gen_helper_store_hdecr(cpu_env, cpu_gpr[gprn]); 306 - if (tb_cflags(ctx->tb) & CF_USE_ICOUNT) { 306 + if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 307 307 gen_io_end(); 308 308 gen_stop_exception(ctx); 309 309 }