qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

target/alpha: Merge several flag bytes into ENV->FLAGS

The flags are arranged such that we can manipulate them either
a whole, or as individual bytes. The computation within
cpu_get_tb_cpu_state is now reduced to a single load and mask.

Tested-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>

+117 -99
-1
hw/alpha/dp264.c
··· 123 123 124 124 /* Start all cpus at the PALcode RESET entry point. */ 125 125 for (i = 0; i < smp_cpus; ++i) { 126 - cpus[i]->env.pal_mode = 1; 127 126 cpus[i]->env.pc = palcode_entry; 128 127 cpus[i]->env.palbr = palcode_entry; 129 128 }
+14 -11
linux-user/main.c
··· 3037 3037 abi_long sysret; 3038 3038 3039 3039 while (1) { 3040 + bool arch_interrupt = true; 3041 + 3040 3042 cpu_exec_start(cs); 3041 3043 trapnr = cpu_exec(cs); 3042 3044 cpu_exec_end(cs); 3043 3045 process_queued_cpu_work(cs); 3044 3046 3045 - /* All of the traps imply a transition through PALcode, which 3046 - implies an REI instruction has been executed. Which means 3047 - that the intr_flag should be cleared. */ 3048 - env->intr_flag = 0; 3049 - 3050 3047 switch (trapnr) { 3051 3048 case EXCP_RESET: 3052 3049 fprintf(stderr, "Reset requested. Exit\n"); ··· 3063 3060 exit(EXIT_FAILURE); 3064 3061 break; 3065 3062 case EXCP_MMFAULT: 3066 - env->lock_addr = -1; 3067 3063 info.si_signo = TARGET_SIGSEGV; 3068 3064 info.si_errno = 0; 3069 3065 info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID ··· 3072 3068 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); 3073 3069 break; 3074 3070 case EXCP_UNALIGN: 3075 - env->lock_addr = -1; 3076 3071 info.si_signo = TARGET_SIGBUS; 3077 3072 info.si_errno = 0; 3078 3073 info.si_code = TARGET_BUS_ADRALN; ··· 3081 3076 break; 3082 3077 case EXCP_OPCDEC: 3083 3078 do_sigill: 3084 - env->lock_addr = -1; 3085 3079 info.si_signo = TARGET_SIGILL; 3086 3080 info.si_errno = 0; 3087 3081 info.si_code = TARGET_ILL_ILLOPC; ··· 3089 3083 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); 3090 3084 break; 3091 3085 case EXCP_ARITH: 3092 - env->lock_addr = -1; 3093 3086 info.si_signo = TARGET_SIGFPE; 3094 3087 info.si_errno = 0; 3095 3088 info.si_code = TARGET_FPE_FLTINV; ··· 3100 3093 /* No-op. Linux simply re-enables the FPU. */ 3101 3094 break; 3102 3095 case EXCP_CALL_PAL: 3103 - env->lock_addr = -1; 3104 3096 switch (env->error_code) { 3105 3097 case 0x80: 3106 3098 /* BPT */ ··· 3197 3189 case EXCP_DEBUG: 3198 3190 info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP); 3199 3191 if (info.si_signo) { 3200 - env->lock_addr = -1; 3201 3192 info.si_errno = 0; 3202 3193 info.si_code = TARGET_TRAP_BRKPT; 3203 3194 queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); 3195 + } else { 3196 + arch_interrupt = false; 3204 3197 } 3205 3198 break; 3206 3199 case EXCP_INTERRUPT: ··· 3208 3201 break; 3209 3202 case EXCP_ATOMIC: 3210 3203 cpu_exec_step_atomic(cs); 3204 + arch_interrupt = false; 3211 3205 break; 3212 3206 default: 3213 3207 printf ("Unhandled trap: 0x%x\n", trapnr); ··· 3215 3209 exit(EXIT_FAILURE); 3216 3210 } 3217 3211 process_pending_signals (env); 3212 + 3213 + /* Most of the traps imply a transition through PALcode, which 3214 + implies an REI instruction has been executed. Which means 3215 + that RX and LOCK_ADDR should be cleared. But there are a 3216 + few exceptions for traps internal to QEMU. */ 3217 + if (arch_interrupt) { 3218 + env->flags &= ~ENV_FLAG_RX_FLAG; 3219 + env->lock_addr = -1; 3220 + } 3218 3221 } 3219 3222 } 3220 3223 #endif /* TARGET_ALPHA */
+4 -3
target/alpha/cpu.c
··· 276 276 277 277 alpha_translate_init(); 278 278 279 + env->lock_addr = -1; 279 280 #if defined(CONFIG_USER_ONLY) 280 - env->ps = PS_USER_MODE; 281 + env->flags = ENV_FLAG_PS_USER | ENV_FLAG_FEN; 281 282 cpu_alpha_store_fpcr(env, (FPCR_INVD | FPCR_DZED | FPCR_OVFD 282 283 | FPCR_UNFD | FPCR_INED | FPCR_DNOD 283 284 | FPCR_DYN_NORMAL)); 285 + #else 286 + env->flags = ENV_FLAG_PAL_MODE | ENV_FLAG_FEN; 284 287 #endif 285 - env->lock_addr = -1; 286 - env->fen = 1; 287 288 } 288 289 289 290 static void alpha_cpu_class_init(ObjectClass *oc, void *data)
+31 -39
target/alpha/cpu.h
··· 242 242 uint8_t fpcr_dyn_round; 243 243 uint8_t fpcr_flush_to_zero; 244 244 245 - /* The Internal Processor Registers. Some of these we assume always 246 - exist for use in user-mode. */ 247 - uint8_t ps; 248 - uint8_t intr_flag; 249 - uint8_t pal_mode; 250 - uint8_t fen; 245 + /* Mask of PALmode, Processor State et al. Most of this gets copied 246 + into the TranslatorBlock flags and controls code generation. */ 247 + uint32_t flags; 251 248 249 + /* The high 32-bits of the processor cycle counter. */ 252 250 uint32_t pcc_ofs; 253 251 254 252 /* These pass data from the exception logic in the translator and ··· 398 396 }; 399 397 400 398 /* Processor status constants. */ 401 - enum { 402 - /* Low 3 bits are interrupt mask level. */ 403 - PS_INT_MASK = 7, 399 + /* Low 3 bits are interrupt mask level. */ 400 + #define PS_INT_MASK 7u 401 + 402 + /* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes; 403 + The Unix PALcode only uses bit 4. */ 404 + #define PS_USER_MODE 8u 405 + 406 + /* CPUAlphaState->flags constants. These are layed out so that we 407 + can set or reset the pieces individually by assigning to the byte, 408 + or manipulated as a whole. */ 409 + 410 + #define ENV_FLAG_PAL_SHIFT 0 411 + #define ENV_FLAG_PS_SHIFT 8 412 + #define ENV_FLAG_RX_SHIFT 16 413 + #define ENV_FLAG_FEN_SHIFT 24 414 + 415 + #define ENV_FLAG_PAL_MODE (1u << ENV_FLAG_PAL_SHIFT) 416 + #define ENV_FLAG_PS_USER (PS_USER_MODE << ENV_FLAG_PS_SHIFT) 417 + #define ENV_FLAG_RX_FLAG (1u << ENV_FLAG_RX_SHIFT) 418 + #define ENV_FLAG_FEN (1u << ENV_FLAG_FEN_SHIFT) 404 419 405 - /* Bits 4 and 5 are the mmu mode. The VMS PALcode uses all 4 modes; 406 - The Unix PALcode only uses bit 4. */ 407 - PS_USER_MODE = 8 408 - }; 420 + #define ENV_FLAG_TB_MASK \ 421 + (ENV_FLAG_PAL_MODE | ENV_FLAG_PS_USER | ENV_FLAG_FEN) 409 422 410 423 static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch) 411 424 { 412 - if (env->pal_mode) { 413 - return MMU_KERNEL_IDX; 414 - } else if (env->ps & PS_USER_MODE) { 415 - return MMU_USER_IDX; 416 - } else { 417 - return MMU_KERNEL_IDX; 425 + int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX; 426 + if (env->flags & ENV_FLAG_PAL_MODE) { 427 + ret = MMU_KERNEL_IDX; 418 428 } 429 + return ret; 419 430 } 420 431 421 432 enum { ··· 482 493 int unused, unsigned size); 483 494 #endif 484 495 485 - /* Bits in TB->FLAGS that control how translation is processed. */ 486 - enum { 487 - TB_FLAGS_PAL_MODE = 1, 488 - TB_FLAGS_FEN = 2, 489 - TB_FLAGS_USER_MODE = 8, 490 - }; 491 - 492 496 static inline void cpu_get_tb_cpu_state(CPUAlphaState *env, target_ulong *pc, 493 497 target_ulong *cs_base, uint32_t *pflags) 494 498 { 495 - int flags = 0; 496 - 497 499 *pc = env->pc; 498 500 *cs_base = 0; 499 - 500 - if (env->pal_mode) { 501 - flags = TB_FLAGS_PAL_MODE; 502 - } else { 503 - flags = env->ps & PS_USER_MODE; 504 - } 505 - if (env->fen) { 506 - flags |= TB_FLAGS_FEN; 507 - } 508 - 509 - *pflags = flags; 501 + *pflags = env->flags & ENV_FLAG_TB_MASK; 510 502 } 511 503 512 504 #endif /* ALPHA_CPU_H */
+6 -6
target/alpha/helper.c
··· 81 81 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg) 82 82 { 83 83 #ifndef CONFIG_USER_ONLY 84 - if (env->pal_mode) { 84 + if (env->flags & ENV_FLAG_PAL_MODE) { 85 85 if (reg >= 8 && reg <= 14) { 86 86 return &env->shadow[reg - 8]; 87 87 } else if (reg == 25) { ··· 364 364 365 365 /* Remember where the exception happened. Emulate real hardware in 366 366 that the low bit of the PC indicates PALmode. */ 367 - env->exc_addr = env->pc | env->pal_mode; 367 + env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE); 368 368 369 369 /* Continue execution at the PALcode entry point. */ 370 370 env->pc = env->palbr + i; 371 371 372 372 /* Switch to PALmode. */ 373 - env->pal_mode = 1; 373 + env->flags |= ENV_FLAG_PAL_MODE; 374 374 #endif /* !USER_ONLY */ 375 375 } 376 376 ··· 381 381 int idx = -1; 382 382 383 383 /* We never take interrupts while in PALmode. */ 384 - if (env->pal_mode) { 384 + if (env->flags & ENV_FLAG_PAL_MODE) { 385 385 return false; 386 386 } 387 387 388 388 /* Fall through the switch, collecting the highest priority 389 389 interrupt that isn't masked by the processor status IPL. */ 390 390 /* ??? This hard-codes the OSF/1 interrupt levels. */ 391 - switch (env->ps & PS_INT_MASK) { 391 + switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) { 392 392 case 0 ... 3: 393 393 if (interrupt_request & CPU_INTERRUPT_HARD) { 394 394 idx = EXCP_DEV_INTERRUPT; ··· 432 432 int i; 433 433 434 434 cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n", 435 - env->pc, env->ps); 435 + env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8)); 436 436 for (i = 0; i < 31; i++) { 437 437 cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i, 438 438 linux_reg_names[i], cpu_alpha_load_gr(env, i));
+3 -7
target/alpha/machine.c
··· 48 48 VMSTATE_UINTTL(lock_addr, CPUAlphaState), 49 49 VMSTATE_UINTTL(lock_value, CPUAlphaState), 50 50 51 - VMSTATE_UINT8(ps, CPUAlphaState), 52 - VMSTATE_UINT8(intr_flag, CPUAlphaState), 53 - VMSTATE_UINT8(pal_mode, CPUAlphaState), 54 - VMSTATE_UINT8(fen, CPUAlphaState), 55 - 51 + VMSTATE_UINT32(flags, CPUAlphaState), 56 52 VMSTATE_UINT32(pcc_ofs, CPUAlphaState), 57 53 58 54 VMSTATE_UINTTL(trap_arg0, CPUAlphaState), ··· 74 70 75 71 static const VMStateDescription vmstate_env = { 76 72 .name = "env", 77 - .version_id = 2, 78 - .minimum_version_id = 2, 73 + .version_id = 3, 74 + .minimum_version_id = 3, 79 75 .fields = vmstate_env_fields, 80 76 }; 81 77
+59 -32
target/alpha/translate.c
··· 269 269 } 270 270 } 271 271 272 + static int get_flag_ofs(unsigned shift) 273 + { 274 + int ofs = offsetof(CPUAlphaState, flags); 275 + #ifdef HOST_WORDS_BIGENDIAN 276 + ofs += 3 - (shift / 8); 277 + #else 278 + ofs += shift / 8; 279 + #endif 280 + return ofs; 281 + } 282 + 283 + static void ld_flag_byte(TCGv val, unsigned shift) 284 + { 285 + tcg_gen_ld8u_i64(val, cpu_env, get_flag_ofs(shift)); 286 + } 287 + 288 + static void st_flag_byte(TCGv val, unsigned shift) 289 + { 290 + tcg_gen_st8_i64(val, cpu_env, get_flag_ofs(shift)); 291 + } 292 + 272 293 static void gen_excp_1(int exception, int error_code) 273 294 { 274 295 TCGv_i32 tmp1, tmp2; ··· 453 474 static bool in_superpage(DisasContext *ctx, int64_t addr) 454 475 { 455 476 #ifndef CONFIG_USER_ONLY 456 - return ((ctx->tbflags & TB_FLAGS_USER_MODE) == 0 477 + return ((ctx->tbflags & ENV_FLAG_PS_USER) == 0 457 478 && addr >> TARGET_VIRT_ADDR_SPACE_BITS == -1 458 479 && ((addr >> 41) & 3) == 2); 459 480 #else ··· 1125 1146 1126 1147 static void gen_rx(DisasContext *ctx, int ra, int set) 1127 1148 { 1128 - TCGv_i32 tmp; 1149 + TCGv tmp; 1129 1150 1130 1151 if (ra != 31) { 1131 - tcg_gen_ld8u_i64(ctx->ir[ra], cpu_env, 1132 - offsetof(CPUAlphaState, intr_flag)); 1152 + ld_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); 1133 1153 } 1134 1154 1135 - tmp = tcg_const_i32(set); 1136 - tcg_gen_st8_i32(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag)); 1137 - tcg_temp_free_i32(tmp); 1155 + tmp = tcg_const_i64(set); 1156 + st_flag_byte(ctx->ir[ra], ENV_FLAG_RX_SHIFT); 1157 + tcg_temp_free(tmp); 1138 1158 } 1139 1159 1140 1160 static ExitStatus gen_call_pal(DisasContext *ctx, int palcode) ··· 1168 1188 1169 1189 #ifndef CONFIG_USER_ONLY 1170 1190 /* Privileged PAL code */ 1171 - if (palcode < 0x40 && (ctx->tbflags & TB_FLAGS_USER_MODE) == 0) { 1191 + if (palcode < 0x40 && (ctx->tbflags & ENV_FLAG_PS_USER) == 0) { 1172 1192 TCGv tmp; 1173 1193 switch (palcode) { 1174 1194 case 0x01: ··· 1199 1219 /* SWPIPL */ 1200 1220 /* Note that we already know we're in kernel mode, so we know 1201 1221 that PS only contains the 3 IPL bits. */ 1202 - tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env, 1203 - offsetof(CPUAlphaState, ps)); 1222 + ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1204 1223 1205 1224 /* But make sure and store only the 3 IPL bits from the user. */ 1206 1225 tmp = tcg_temp_new(); 1207 1226 tcg_gen_andi_i64(tmp, ctx->ir[IR_A0], PS_INT_MASK); 1208 - tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, ps)); 1227 + st_flag_byte(tmp, ENV_FLAG_PS_SHIFT); 1209 1228 tcg_temp_free(tmp); 1210 1229 1211 1230 /* Allow interrupts to be recognized right away. */ ··· 1214 1233 1215 1234 case 0x36: 1216 1235 /* RDPS */ 1217 - tcg_gen_ld8u_i64(ctx->ir[IR_V0], cpu_env, 1218 - offsetof(CPUAlphaState, ps)); 1236 + ld_flag_byte(ctx->ir[IR_V0], ENV_FLAG_PS_SHIFT); 1219 1237 break; 1238 + 1220 1239 case 0x38: 1221 1240 /* WRUSP */ 1222 1241 tcg_gen_st_i64(ctx->ir[IR_A0], cpu_env, ··· 1259 1278 uint64_t exc_addr = ctx->pc; 1260 1279 uint64_t entry = ctx->palbr; 1261 1280 1262 - if (ctx->tbflags & TB_FLAGS_PAL_MODE) { 1281 + if (ctx->tbflags & ENV_FLAG_PAL_MODE) { 1263 1282 exc_addr |= 1; 1264 1283 } else { 1265 1284 tcg_gen_movi_i64(tmp, 1); 1266 - tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode)); 1285 + st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 1267 1286 } 1268 1287 1269 1288 tcg_gen_movi_i64(tmp, exc_addr); ··· 1293 1312 1294 1313 #ifndef CONFIG_USER_ONLY 1295 1314 1296 - #define PR_BYTE 0x100000 1297 1315 #define PR_LONG 0x200000 1298 1316 1299 1317 static int cpu_pr_data(int pr) 1300 1318 { 1301 1319 switch (pr) { 1302 - case 0: return offsetof(CPUAlphaState, ps) | PR_BYTE; 1303 - case 1: return offsetof(CPUAlphaState, fen) | PR_BYTE; 1304 1320 case 2: return offsetof(CPUAlphaState, pcc_ofs) | PR_LONG; 1305 1321 case 3: return offsetof(CPUAlphaState, trap_arg0); 1306 1322 case 4: return offsetof(CPUAlphaState, trap_arg1); ··· 1348 1364 } else { 1349 1365 helper(va); 1350 1366 } 1367 + break; 1368 + 1369 + case 0: /* PS */ 1370 + ld_flag_byte(va, ENV_FLAG_PS_SHIFT); 1371 + break; 1372 + case 1: /* FEN */ 1373 + ld_flag_byte(va, ENV_FLAG_FEN_SHIFT); 1351 1374 break; 1352 1375 1353 1376 default: ··· 1356 1379 data = cpu_pr_data(regno); 1357 1380 if (data == 0) { 1358 1381 tcg_gen_movi_i64(va, 0); 1359 - } else if (data & PR_BYTE) { 1360 - tcg_gen_ld8u_i64(va, cpu_env, data & ~PR_BYTE); 1361 1382 } else if (data & PR_LONG) { 1362 1383 tcg_gen_ld32s_i64(va, cpu_env, data & ~PR_LONG); 1363 1384 } else { ··· 1417 1438 tcg_gen_mov_i64(cpu_std_ir[regno], vb); 1418 1439 break; 1419 1440 1441 + case 0: /* PS */ 1442 + st_flag_byte(vb, ENV_FLAG_PS_SHIFT); 1443 + break; 1444 + case 1: /* FEN */ 1445 + st_flag_byte(vb, ENV_FLAG_FEN_SHIFT); 1446 + break; 1447 + 1420 1448 default: 1421 1449 /* The basic registers are data only, and unknown registers 1422 1450 are read-zero, write-ignore. */ 1423 1451 data = cpu_pr_data(regno); 1424 1452 if (data != 0) { 1425 - if (data & PR_BYTE) { 1426 - tcg_gen_st8_i64(vb, cpu_env, data & ~PR_BYTE); 1427 - } else if (data & PR_LONG) { 1453 + if (data & PR_LONG) { 1428 1454 tcg_gen_st32_i64(vb, cpu_env, data & ~PR_LONG); 1429 1455 } else { 1430 1456 tcg_gen_st_i64(vb, cpu_env, data); ··· 2430 2456 case 0x19: 2431 2457 /* HW_MFPR (PALcode) */ 2432 2458 #ifndef CONFIG_USER_ONLY 2433 - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); 2459 + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2434 2460 va = dest_gpr(ctx, ra); 2435 2461 ret = gen_mfpr(ctx, va, insn & 0xffff); 2436 2462 break; ··· 2452 2478 case 0x1B: 2453 2479 /* HW_LD (PALcode) */ 2454 2480 #ifndef CONFIG_USER_ONLY 2455 - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); 2481 + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2456 2482 { 2457 2483 TCGv addr = tcg_temp_new(); 2458 2484 vb = load_gpr(ctx, rb); ··· 2674 2700 case 0x1D: 2675 2701 /* HW_MTPR (PALcode) */ 2676 2702 #ifndef CONFIG_USER_ONLY 2677 - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); 2703 + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2678 2704 vb = load_gpr(ctx, rb); 2679 2705 ret = gen_mtpr(ctx, vb, insn & 0xffff); 2680 2706 break; ··· 2685 2711 case 0x1E: 2686 2712 /* HW_RET (PALcode) */ 2687 2713 #ifndef CONFIG_USER_ONLY 2688 - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); 2714 + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2689 2715 if (rb == 31) { 2690 2716 /* Pre-EV6 CPUs interpreted this as HW_REI, loading the return 2691 2717 address from EXC_ADDR. This turns out to be useful for our ··· 2695 2721 } else { 2696 2722 vb = load_gpr(ctx, rb); 2697 2723 } 2724 + tcg_gen_movi_i64(cpu_lock_addr, -1); 2698 2725 tmp = tcg_temp_new(); 2699 2726 tcg_gen_movi_i64(tmp, 0); 2700 - tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, intr_flag)); 2701 - tcg_gen_movi_i64(cpu_lock_addr, -1); 2727 + st_flag_byte(tmp, ENV_FLAG_RX_SHIFT); 2702 2728 tcg_gen_andi_i64(tmp, vb, 1); 2703 - tcg_gen_st8_i64(tmp, cpu_env, offsetof(CPUAlphaState, pal_mode)); 2729 + st_flag_byte(tmp, ENV_FLAG_PAL_SHIFT); 2730 + tcg_temp_free(tmp); 2704 2731 tcg_gen_andi_i64(cpu_pc, vb, ~3); 2705 2732 /* Allow interrupts to be recognized right away. */ 2706 2733 ret = EXIT_PC_UPDATED_NOCHAIN; ··· 2712 2739 case 0x1F: 2713 2740 /* HW_ST (PALcode) */ 2714 2741 #ifndef CONFIG_USER_ONLY 2715 - REQUIRE_TB_FLAG(TB_FLAGS_PAL_MODE); 2742 + REQUIRE_TB_FLAG(ENV_FLAG_PAL_MODE); 2716 2743 { 2717 2744 switch ((insn >> 12) & 0xF) { 2718 2745 case 0x0: ··· 2943 2970 ctx.ir = cpu_std_ir; 2944 2971 #else 2945 2972 ctx.palbr = env->palbr; 2946 - ctx.ir = (ctx.tbflags & TB_FLAGS_PAL_MODE ? cpu_pal_ir : cpu_std_ir); 2973 + ctx.ir = (ctx.tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); 2947 2974 #endif 2948 2975 2949 2976 /* ??? Every TB begins with unset rounding mode, to be initialized on