qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190610' into staging

Move softmmu tlb into CPUNegativeOffsetState

# gpg: Signature made Mon 10 Jun 2019 15:07:55 BST
# gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg: issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full]
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20190610: (39 commits)
tcg/arm: Remove mostly unreachable tlb special case
tcg/arm: Use LDRD to load tlb mask+table
tcg/aarch64: Use LDP to load tlb mask+table
cpu: Remove CPU_COMMON
cpu: Move the softmmu tlb to CPUNegativeOffsetState
cpu: Move icount_decr to CPUNegativeOffsetState
cpu: Introduce CPUNegativeOffsetState
cpu: Introduce cpu_set_cpustate_pointers
cpu: Move ENV_OFFSET to exec/gen-icount.h
target/xtensa: Use env_cpu, env_archcpu
target/unicore32: Use env_cpu, env_archcpu
target/tricore: Use env_cpu
target/tilegx: Use env_cpu
target/sparc: Use env_cpu, env_archcpu
target/sh4: Use env_cpu, env_archcpu
target/s390x: Use env_cpu, env_archcpu
target/riscv: Use env_cpu, env_archcpu
target/ppc: Use env_cpu, env_archcpu
target/openrisc: Use env_cpu, env_archcpu
target/nios2: Use env_cpu, env_archcpu
...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+2372 -2556
+4 -4
accel/tcg/atomic_template.h
··· 62 62 #define ATOMIC_TRACE_RMW do { \ 63 63 uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \ 64 64 \ 65 - trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \ 66 - trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, \ 65 + trace_guest_mem_before_exec(env_cpu(env), addr, info); \ 66 + trace_guest_mem_before_exec(env_cpu(env), addr, \ 67 67 info | TRACE_MEM_ST); \ 68 68 } while (0) 69 69 70 70 #define ATOMIC_TRACE_LD do { \ 71 71 uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false); \ 72 72 \ 73 - trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \ 73 + trace_guest_mem_before_exec(env_cpu(env), addr, info); \ 74 74 } while (0) 75 75 76 76 # define ATOMIC_TRACE_ST do { \ 77 77 uint8_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true); \ 78 78 \ 79 - trace_guest_mem_before_exec(ENV_GET_CPU(env), addr, info); \ 79 + trace_guest_mem_before_exec(env_cpu(env), addr, info); \ 80 80 } while (0) 81 81 82 82 /* Define host-endian atomic operations. Note that END is used within
+12 -11
accel/tcg/cpu-exec.c
··· 54 54 #define MAX_DELAY_PRINT_RATE 2000000000LL 55 55 #define MAX_NB_PRINTS 100 56 56 57 - static void align_clocks(SyncClocks *sc, const CPUState *cpu) 57 + static void align_clocks(SyncClocks *sc, CPUState *cpu) 58 58 { 59 59 int64_t cpu_icount; 60 60 ··· 62 62 return; 63 63 } 64 64 65 - cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; 65 + cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; 66 66 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount); 67 67 sc->last_cpu_icount = cpu_icount; 68 68 ··· 105 105 } 106 106 } 107 107 108 - static void init_delay_params(SyncClocks *sc, 109 - const CPUState *cpu) 108 + static void init_delay_params(SyncClocks *sc, CPUState *cpu) 110 109 { 111 110 if (!icount_align_option) { 112 111 return; 113 112 } 114 113 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); 115 114 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; 116 - sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; 115 + sc->last_cpu_icount 116 + = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; 117 117 if (sc->diff_clk < max_delay) { 118 118 max_delay = sc->diff_clk; 119 119 } ··· 467 467 if (cpu->exception_index < 0) { 468 468 #ifndef CONFIG_USER_ONLY 469 469 if (replay_has_exception() 470 - && cpu->icount_decr.u16.low + cpu->icount_extra == 0) { 470 + && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { 471 471 /* try to cause an exception pending in the log */ 472 472 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true); 473 473 } ··· 525 525 * Ensure zeroing happens before reading cpu->exit_request or 526 526 * cpu->interrupt_request (see also smp_wmb in cpu_exit()) 527 527 */ 528 - atomic_mb_set(&cpu->icount_decr.u16.high, 0); 528 + atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); 529 529 530 530 if (unlikely(atomic_read(&cpu->interrupt_request))) { 531 531 int interrupt_request; ··· 596 596 } 597 597 598 598 /* Finally, check if we need to exit to the main loop. */ 599 - if (unlikely(atomic_read(&cpu->exit_request) 600 - || (use_icount && cpu->icount_decr.u16.low + cpu->icount_extra == 0))) { 599 + if (unlikely(atomic_read(&cpu->exit_request)) 600 + || (use_icount 601 + && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { 601 602 atomic_set(&cpu->exit_request, 0); 602 603 if (cpu->exception_index == -1) { 603 604 cpu->exception_index = EXCP_INTERRUPT; ··· 624 625 } 625 626 626 627 *last_tb = NULL; 627 - insns_left = atomic_read(&cpu->icount_decr.u32); 628 + insns_left = atomic_read(&cpu_neg(cpu)->icount_decr.u32); 628 629 if (insns_left < 0) { 629 630 /* Something asked us to stop executing chained TBs; just 630 631 * continue round the main loop. Whatever requested the exit ··· 643 644 cpu_update_icount(cpu); 644 645 /* Refill decrementer and continue execution. */ 645 646 insns_left = MIN(0xffff, cpu->icount_budget); 646 - cpu->icount_decr.u16.low = insns_left; 647 + cpu_neg(cpu)->icount_decr.u16.low = insns_left; 647 648 cpu->icount_extra = cpu->icount_budget - insns_left; 648 649 if (!cpu->icount_extra) { 649 650 /* Execute any remaining instructions, then let the main loop
+119 -107
accel/tcg/cputlb.c
··· 76 76 77 77 static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx) 78 78 { 79 - return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS); 79 + return env_tlb(env)->f[mmu_idx].mask + (1 << CPU_TLB_ENTRY_BITS); 80 80 } 81 81 82 - static void tlb_window_reset(CPUTLBWindow *window, int64_t ns, 82 + static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, 83 83 size_t max_entries) 84 84 { 85 - window->begin_ns = ns; 86 - window->max_entries = max_entries; 85 + desc->window_begin_ns = ns; 86 + desc->window_max_entries = max_entries; 87 87 } 88 88 89 89 static void tlb_dyn_init(CPUArchState *env) ··· 91 91 int i; 92 92 93 93 for (i = 0; i < NB_MMU_MODES; i++) { 94 - CPUTLBDesc *desc = &env->tlb_d[i]; 94 + CPUTLBDesc *desc = &env_tlb(env)->d[i]; 95 95 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; 96 96 97 - tlb_window_reset(&desc->window, get_clock_realtime(), 0); 97 + tlb_window_reset(desc, get_clock_realtime(), 0); 98 98 desc->n_used_entries = 0; 99 - env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 100 - env->tlb_table[i] = g_new(CPUTLBEntry, n_entries); 101 - env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries); 99 + env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; 100 + env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries); 101 + env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries); 102 102 } 103 103 } 104 104 ··· 144 144 */ 145 145 static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) 146 146 { 147 - CPUTLBDesc *desc = &env->tlb_d[mmu_idx]; 147 + CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; 148 148 size_t old_size = tlb_n_entries(env, mmu_idx); 149 149 size_t rate; 150 150 size_t new_size = old_size; 151 151 int64_t now = get_clock_realtime(); 152 152 int64_t window_len_ms = 100; 153 153 int64_t window_len_ns = window_len_ms * 1000 * 1000; 154 - bool window_expired = now > desc->window.begin_ns + window_len_ns; 154 + bool window_expired = now > desc->window_begin_ns + window_len_ns; 155 155 156 - if (desc->n_used_entries > desc->window.max_entries) { 157 - desc->window.max_entries = desc->n_used_entries; 156 + if (desc->n_used_entries > desc->window_max_entries) { 157 + desc->window_max_entries = desc->n_used_entries; 158 158 } 159 - rate = desc->window.max_entries * 100 / old_size; 159 + rate = desc->window_max_entries * 100 / old_size; 160 160 161 161 if (rate > 70) { 162 162 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); 163 163 } else if (rate < 30 && window_expired) { 164 - size_t ceil = pow2ceil(desc->window.max_entries); 165 - size_t expected_rate = desc->window.max_entries * 100 / ceil; 164 + size_t ceil = pow2ceil(desc->window_max_entries); 165 + size_t expected_rate = desc->window_max_entries * 100 / ceil; 166 166 167 167 /* 168 168 * Avoid undersizing when the max number of entries seen is just below ··· 182 182 183 183 if (new_size == old_size) { 184 184 if (window_expired) { 185 - tlb_window_reset(&desc->window, now, desc->n_used_entries); 185 + tlb_window_reset(desc, now, desc->n_used_entries); 186 186 } 187 187 return; 188 188 } 189 189 190 - g_free(env->tlb_table[mmu_idx]); 191 - g_free(env->iotlb[mmu_idx]); 190 + g_free(env_tlb(env)->f[mmu_idx].table); 191 + g_free(env_tlb(env)->d[mmu_idx].iotlb); 192 192 193 - tlb_window_reset(&desc->window, now, 0); 193 + tlb_window_reset(desc, now, 0); 194 194 /* desc->n_used_entries is cleared by the caller */ 195 - env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS; 196 - env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size); 197 - env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size); 195 + env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 196 + env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); 197 + env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); 198 198 /* 199 199 * If the allocations fail, try smaller sizes. We just freed some 200 200 * memory, so going back to half of new_size has a good chance of working. ··· 202 202 * allocations to fail though, so we progressively reduce the allocation 203 203 * size, aborting if we cannot even allocate the smallest TLB we support. 204 204 */ 205 - while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) { 205 + while (env_tlb(env)->f[mmu_idx].table == NULL || 206 + env_tlb(env)->d[mmu_idx].iotlb == NULL) { 206 207 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { 207 208 error_report("%s: %s", __func__, strerror(errno)); 208 209 abort(); 209 210 } 210 211 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); 211 - env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS; 212 + env_tlb(env)->f[mmu_idx].mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; 212 213 213 - g_free(env->tlb_table[mmu_idx]); 214 - g_free(env->iotlb[mmu_idx]); 215 - env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size); 216 - env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size); 214 + g_free(env_tlb(env)->f[mmu_idx].table); 215 + g_free(env_tlb(env)->d[mmu_idx].iotlb); 216 + env_tlb(env)->f[mmu_idx].table = g_try_new(CPUTLBEntry, new_size); 217 + env_tlb(env)->d[mmu_idx].iotlb = g_try_new(CPUIOTLBEntry, new_size); 217 218 } 218 219 } 219 220 220 221 static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx) 221 222 { 222 223 tlb_mmu_resize_locked(env, mmu_idx); 223 - memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx)); 224 - env->tlb_d[mmu_idx].n_used_entries = 0; 224 + memset(env_tlb(env)->f[mmu_idx].table, -1, sizeof_tlb(env, mmu_idx)); 225 + env_tlb(env)->d[mmu_idx].n_used_entries = 0; 225 226 } 226 227 227 228 static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) 228 229 { 229 - env->tlb_d[mmu_idx].n_used_entries++; 230 + env_tlb(env)->d[mmu_idx].n_used_entries++; 230 231 } 231 232 232 233 static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) 233 234 { 234 - env->tlb_d[mmu_idx].n_used_entries--; 235 + env_tlb(env)->d[mmu_idx].n_used_entries--; 235 236 } 236 237 237 238 void tlb_init(CPUState *cpu) 238 239 { 239 240 CPUArchState *env = cpu->env_ptr; 240 241 241 - qemu_spin_init(&env->tlb_c.lock); 242 + qemu_spin_init(&env_tlb(env)->c.lock); 242 243 243 244 /* Ensure that cpu_reset performs a full flush. */ 244 - env->tlb_c.dirty = ALL_MMUIDX_BITS; 245 + env_tlb(env)->c.dirty = ALL_MMUIDX_BITS; 245 246 246 247 tlb_dyn_init(env); 247 248 } ··· 273 274 CPU_FOREACH(cpu) { 274 275 CPUArchState *env = cpu->env_ptr; 275 276 276 - full += atomic_read(&env->tlb_c.full_flush_count); 277 - part += atomic_read(&env->tlb_c.part_flush_count); 278 - elide += atomic_read(&env->tlb_c.elide_flush_count); 277 + full += atomic_read(&env_tlb(env)->c.full_flush_count); 278 + part += atomic_read(&env_tlb(env)->c.part_flush_count); 279 + elide += atomic_read(&env_tlb(env)->c.elide_flush_count); 279 280 } 280 281 *pfull = full; 281 282 *ppart = part; ··· 285 286 static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) 286 287 { 287 288 tlb_table_flush_by_mmuidx(env, mmu_idx); 288 - memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); 289 - env->tlb_d[mmu_idx].large_page_addr = -1; 290 - env->tlb_d[mmu_idx].large_page_mask = -1; 291 - env->tlb_d[mmu_idx].vindex = 0; 289 + env_tlb(env)->d[mmu_idx].large_page_addr = -1; 290 + env_tlb(env)->d[mmu_idx].large_page_mask = -1; 291 + env_tlb(env)->d[mmu_idx].vindex = 0; 292 + memset(env_tlb(env)->d[mmu_idx].vtable, -1, 293 + sizeof(env_tlb(env)->d[0].vtable)); 292 294 } 293 295 294 296 static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) ··· 301 303 302 304 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); 303 305 304 - qemu_spin_lock(&env->tlb_c.lock); 306 + qemu_spin_lock(&env_tlb(env)->c.lock); 305 307 306 - all_dirty = env->tlb_c.dirty; 308 + all_dirty = env_tlb(env)->c.dirty; 307 309 to_clean = asked & all_dirty; 308 310 all_dirty &= ~to_clean; 309 - env->tlb_c.dirty = all_dirty; 311 + env_tlb(env)->c.dirty = all_dirty; 310 312 311 313 for (work = to_clean; work != 0; work &= work - 1) { 312 314 int mmu_idx = ctz32(work); 313 315 tlb_flush_one_mmuidx_locked(env, mmu_idx); 314 316 } 315 317 316 - qemu_spin_unlock(&env->tlb_c.lock); 318 + qemu_spin_unlock(&env_tlb(env)->c.lock); 317 319 318 320 cpu_tb_jmp_cache_clear(cpu); 319 321 320 322 if (to_clean == ALL_MMUIDX_BITS) { 321 - atomic_set(&env->tlb_c.full_flush_count, 322 - env->tlb_c.full_flush_count + 1); 323 + atomic_set(&env_tlb(env)->c.full_flush_count, 324 + env_tlb(env)->c.full_flush_count + 1); 323 325 } else { 324 - atomic_set(&env->tlb_c.part_flush_count, 325 - env->tlb_c.part_flush_count + ctpop16(to_clean)); 326 + atomic_set(&env_tlb(env)->c.part_flush_count, 327 + env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); 326 328 if (to_clean != asked) { 327 - atomic_set(&env->tlb_c.elide_flush_count, 328 - env->tlb_c.elide_flush_count + 329 + atomic_set(&env_tlb(env)->c.elide_flush_count, 330 + env_tlb(env)->c.elide_flush_count + 329 331 ctpop16(asked & ~to_clean)); 330 332 } 331 333 } ··· 410 412 static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, 411 413 target_ulong page) 412 414 { 415 + CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; 413 416 int k; 414 417 415 - assert_cpu_is_self(ENV_GET_CPU(env)); 418 + assert_cpu_is_self(env_cpu(env)); 416 419 for (k = 0; k < CPU_VTLB_SIZE; k++) { 417 - if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) { 420 + if (tlb_flush_entry_locked(&d->vtable[k], page)) { 418 421 tlb_n_used_entries_dec(env, mmu_idx); 419 422 } 420 423 } ··· 423 426 static void tlb_flush_page_locked(CPUArchState *env, int midx, 424 427 target_ulong page) 425 428 { 426 - target_ulong lp_addr = env->tlb_d[midx].large_page_addr; 427 - target_ulong lp_mask = env->tlb_d[midx].large_page_mask; 429 + target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; 430 + target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; 428 431 429 432 /* Check if we need to flush due to large pages. */ 430 433 if ((page & lp_mask) == lp_addr) { ··· 459 462 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n", 460 463 addr, mmu_idx_bitmap); 461 464 462 - qemu_spin_lock(&env->tlb_c.lock); 465 + qemu_spin_lock(&env_tlb(env)->c.lock); 463 466 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 464 467 if (test_bit(mmu_idx, &mmu_idx_bitmap)) { 465 468 tlb_flush_page_locked(env, mmu_idx, addr); 466 469 } 467 470 } 468 - qemu_spin_unlock(&env->tlb_c.lock); 471 + qemu_spin_unlock(&env_tlb(env)->c.lock); 469 472 470 473 tb_flush_jmp_cache(cpu, addr); 471 474 } ··· 609 612 int mmu_idx; 610 613 611 614 env = cpu->env_ptr; 612 - qemu_spin_lock(&env->tlb_c.lock); 615 + qemu_spin_lock(&env_tlb(env)->c.lock); 613 616 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 614 617 unsigned int i; 615 618 unsigned int n = tlb_n_entries(env, mmu_idx); 616 619 617 620 for (i = 0; i < n; i++) { 618 - tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1, 619 - length); 621 + tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], 622 + start1, length); 620 623 } 621 624 622 625 for (i = 0; i < CPU_VTLB_SIZE; i++) { 623 - tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1, 624 - length); 626 + tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], 627 + start1, length); 625 628 } 626 629 } 627 - qemu_spin_unlock(&env->tlb_c.lock); 630 + qemu_spin_unlock(&env_tlb(env)->c.lock); 628 631 } 629 632 630 633 /* Called with tlb_c.lock held */ ··· 646 649 assert_cpu_is_self(cpu); 647 650 648 651 vaddr &= TARGET_PAGE_MASK; 649 - qemu_spin_lock(&env->tlb_c.lock); 652 + qemu_spin_lock(&env_tlb(env)->c.lock); 650 653 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 651 654 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); 652 655 } ··· 654 657 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { 655 658 int k; 656 659 for (k = 0; k < CPU_VTLB_SIZE; k++) { 657 - tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr); 660 + tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); 658 661 } 659 662 } 660 - qemu_spin_unlock(&env->tlb_c.lock); 663 + qemu_spin_unlock(&env_tlb(env)->c.lock); 661 664 } 662 665 663 666 /* Our TLB does not support large pages, so remember the area covered by ··· 665 668 static void tlb_add_large_page(CPUArchState *env, int mmu_idx, 666 669 target_ulong vaddr, target_ulong size) 667 670 { 668 - target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr; 671 + target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; 669 672 target_ulong lp_mask = ~(size - 1); 670 673 671 674 if (lp_addr == (target_ulong)-1) { ··· 675 678 /* Extend the existing region to include the new page. 676 679 This is a compromise between unnecessary flushes and 677 680 the cost of maintaining a full variable size TLB. */ 678 - lp_mask &= env->tlb_d[mmu_idx].large_page_mask; 681 + lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; 679 682 while (((lp_addr ^ vaddr) & lp_mask) != 0) { 680 683 lp_mask <<= 1; 681 684 } 682 685 } 683 - env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask; 684 - env->tlb_d[mmu_idx].large_page_mask = lp_mask; 686 + env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; 687 + env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; 685 688 } 686 689 687 690 /* Add a new TLB entry. At most one entry for a given virtual address ··· 696 699 int mmu_idx, target_ulong size) 697 700 { 698 701 CPUArchState *env = cpu->env_ptr; 702 + CPUTLB *tlb = env_tlb(env); 703 + CPUTLBDesc *desc = &tlb->d[mmu_idx]; 699 704 MemoryRegionSection *section; 700 705 unsigned int index; 701 706 target_ulong address; ··· 757 762 * a longer critical section, but this is not a concern since the TLB lock 758 763 * is unlikely to be contended. 759 764 */ 760 - qemu_spin_lock(&env->tlb_c.lock); 765 + qemu_spin_lock(&tlb->c.lock); 761 766 762 767 /* Note that the tlb is no longer clean. */ 763 - env->tlb_c.dirty |= 1 << mmu_idx; 768 + tlb->c.dirty |= 1 << mmu_idx; 764 769 765 770 /* Make sure there's no cached translation for the new page. */ 766 771 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); ··· 770 775 * different page; otherwise just overwrite the stale data. 771 776 */ 772 777 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { 773 - unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE; 774 - CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx]; 778 + unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; 779 + CPUTLBEntry *tv = &desc->vtable[vidx]; 775 780 776 781 /* Evict the old entry into the victim tlb. */ 777 782 copy_tlb_helper_locked(tv, te); 778 - env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; 783 + desc->viotlb[vidx] = desc->iotlb[index]; 779 784 tlb_n_used_entries_dec(env, mmu_idx); 780 785 } 781 786 ··· 792 797 * subtract here is that of the page base, and not the same as the 793 798 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). 794 799 */ 795 - env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; 796 - env->iotlb[mmu_idx][index].attrs = attrs; 800 + desc->iotlb[index].addr = iotlb - vaddr_page; 801 + desc->iotlb[index].attrs = attrs; 797 802 798 803 /* Now calculate the new entry */ 799 804 tn.addend = addend - vaddr_page; ··· 829 834 830 835 copy_tlb_helper_locked(te, &tn); 831 836 tlb_n_used_entries_inc(env, mmu_idx); 832 - qemu_spin_unlock(&env->tlb_c.lock); 837 + qemu_spin_unlock(&tlb->c.lock); 833 838 } 834 839 835 840 /* Add a new TLB entry, but without specifying the memory ··· 878 883 int mmu_idx, target_ulong addr, uintptr_t retaddr, 879 884 MMUAccessType access_type, int size) 880 885 { 881 - CPUState *cpu = ENV_GET_CPU(env); 886 + CPUState *cpu = env_cpu(env); 882 887 hwaddr mr_offset; 883 888 MemoryRegionSection *section; 884 889 MemoryRegion *mr; ··· 922 927 int mmu_idx, uint64_t val, target_ulong addr, 923 928 uintptr_t retaddr, int size) 924 929 { 925 - CPUState *cpu = ENV_GET_CPU(env); 930 + CPUState *cpu = env_cpu(env); 926 931 hwaddr mr_offset; 927 932 MemoryRegionSection *section; 928 933 MemoryRegion *mr; ··· 974 979 { 975 980 size_t vidx; 976 981 977 - assert_cpu_is_self(ENV_GET_CPU(env)); 982 + assert_cpu_is_self(env_cpu(env)); 978 983 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { 979 - CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; 980 - target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs); 984 + CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; 985 + target_ulong cmp; 986 + 987 + /* elt_ofs might correspond to .addr_write, so use atomic_read */ 988 + #if TCG_OVERSIZED_GUEST 989 + cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); 990 + #else 991 + cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); 992 + #endif 981 993 982 994 if (cmp == page) { 983 995 /* Found entry in victim tlb, swap tlb and iotlb. */ 984 - CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; 996 + CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; 985 997 986 - qemu_spin_lock(&env->tlb_c.lock); 998 + qemu_spin_lock(&env_tlb(env)->c.lock); 987 999 copy_tlb_helper_locked(&tmptlb, tlb); 988 1000 copy_tlb_helper_locked(tlb, vtlb); 989 1001 copy_tlb_helper_locked(vtlb, &tmptlb); 990 - qemu_spin_unlock(&env->tlb_c.lock); 1002 + qemu_spin_unlock(&env_tlb(env)->c.lock); 991 1003 992 - CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; 993 - CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; 1004 + CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; 1005 + CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; 994 1006 tmpio = *io; *io = *vio; *vio = tmpio; 995 1007 return true; 996 1008 } ··· 1017 1029 1018 1030 if (unlikely(!tlb_hit(entry->addr_code, addr))) { 1019 1031 if (!VICTIM_TLB_HIT(addr_code, addr)) { 1020 - tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 1032 + tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); 1021 1033 index = tlb_index(env, mmu_idx, addr); 1022 1034 entry = tlb_entry(env, mmu_idx, addr); 1023 1035 } ··· 1055 1067 if (!tlb_hit(tlb_addr_write(entry), addr)) { 1056 1068 /* TLB entry is for a different page */ 1057 1069 if (!VICTIM_TLB_HIT(addr_write, addr)) { 1058 - tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 1070 + tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 1059 1071 mmu_idx, retaddr); 1060 1072 } 1061 1073 } ··· 1089 1101 uintptr_t index = tlb_index(env, mmu_idx, addr); 1090 1102 1091 1103 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) { 1092 - CPUState *cs = ENV_GET_CPU(env); 1104 + CPUState *cs = env_cpu(env); 1093 1105 CPUClass *cc = CPU_GET_CLASS(cs); 1094 1106 1095 1107 if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) { ··· 1132 1144 /* Enforce guest required alignment. */ 1133 1145 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { 1134 1146 /* ??? Maybe indicate atomic op to cpu_unaligned_access */ 1135 - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, 1147 + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1136 1148 mmu_idx, retaddr); 1137 1149 } 1138 1150 ··· 1148 1160 /* Check TLB entry and enforce page permissions. */ 1149 1161 if (!tlb_hit(tlb_addr, addr)) { 1150 1162 if (!VICTIM_TLB_HIT(addr_write, addr)) { 1151 - tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, 1163 + tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_STORE, 1152 1164 mmu_idx, retaddr); 1153 1165 index = tlb_index(env, mmu_idx, addr); 1154 1166 tlbe = tlb_entry(env, mmu_idx, addr); ··· 1165 1177 1166 1178 /* Let the guest notice RMW on a write-only page. */ 1167 1179 if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { 1168 - tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, 1180 + tlb_fill(env_cpu(env), addr, 1 << s_bits, MMU_DATA_LOAD, 1169 1181 mmu_idx, retaddr); 1170 1182 /* Since we don't support reads and writes to different addresses, 1171 1183 and we do have the proper page loaded for write, this shouldn't ··· 1178 1190 ndi->active = false; 1179 1191 if (unlikely(tlb_addr & TLB_NOTDIRTY)) { 1180 1192 ndi->active = true; 1181 - memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr, 1193 + memory_notdirty_write_prepare(ndi, env_cpu(env), addr, 1182 1194 qemu_ram_addr_from_host_nofail(hostaddr), 1183 1195 1 << s_bits); 1184 1196 } ··· 1186 1198 return hostaddr; 1187 1199 1188 1200 stop_the_world: 1189 - cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); 1201 + cpu_loop_exit_atomic(env_cpu(env), retaddr); 1190 1202 } 1191 1203 1192 1204 #ifdef TARGET_WORDS_BIGENDIAN ··· 1251 1263 1252 1264 /* Handle CPU specific unaligned behaviour */ 1253 1265 if (addr & ((1 << a_bits) - 1)) { 1254 - cpu_unaligned_access(ENV_GET_CPU(env), addr, access_type, 1266 + cpu_unaligned_access(env_cpu(env), addr, access_type, 1255 1267 mmu_idx, retaddr); 1256 1268 } 1257 1269 ··· 1259 1271 if (!tlb_hit(tlb_addr, addr)) { 1260 1272 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1261 1273 addr & TARGET_PAGE_MASK)) { 1262 - tlb_fill(ENV_GET_CPU(env), addr, size, 1274 + tlb_fill(env_cpu(env), addr, size, 1263 1275 access_type, mmu_idx, retaddr); 1264 1276 index = tlb_index(env, mmu_idx, addr); 1265 1277 entry = tlb_entry(env, mmu_idx, addr); ··· 1280 1292 * repeat the MMU check here. This tlb_fill() call might 1281 1293 * longjump out if this access should cause a guest exception. 1282 1294 */ 1283 - tlb_fill(ENV_GET_CPU(env), addr, size, 1295 + tlb_fill(env_cpu(env), addr, size, 1284 1296 access_type, mmu_idx, retaddr); 1285 1297 index = tlb_index(env, mmu_idx, addr); 1286 1298 entry = tlb_entry(env, mmu_idx, addr); ··· 1293 1305 } 1294 1306 } 1295 1307 1296 - res = io_readx(env, &env->iotlb[mmu_idx][index], mmu_idx, addr, 1297 - retaddr, access_type, size); 1308 + res = io_readx(env, &env_tlb(env)->d[mmu_idx].iotlb[index], 1309 + mmu_idx, addr, retaddr, access_type, size); 1298 1310 return handle_bswap(res, size, big_endian); 1299 1311 } 1300 1312 ··· 1499 1511 1500 1512 /* Handle CPU specific unaligned behaviour */ 1501 1513 if (addr & ((1 << a_bits) - 1)) { 1502 - cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, 1514 + cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, 1503 1515 mmu_idx, retaddr); 1504 1516 } 1505 1517 ··· 1507 1519 if (!tlb_hit(tlb_addr, addr)) { 1508 1520 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, 1509 1521 addr & TARGET_PAGE_MASK)) { 1510 - tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 1522 + tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 1511 1523 mmu_idx, retaddr); 1512 1524 index = tlb_index(env, mmu_idx, addr); 1513 1525 entry = tlb_entry(env, mmu_idx, addr); ··· 1528 1540 * repeat the MMU check here. This tlb_fill() call might 1529 1541 * longjump out if this access should cause a guest exception. 1530 1542 */ 1531 - tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, 1543 + tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, 1532 1544 mmu_idx, retaddr); 1533 1545 index = tlb_index(env, mmu_idx, addr); 1534 1546 entry = tlb_entry(env, mmu_idx, addr); ··· 1541 1553 } 1542 1554 } 1543 1555 1544 - io_writex(env, &env->iotlb[mmu_idx][index], mmu_idx, 1556 + io_writex(env, &env_tlb(env)->d[mmu_idx].iotlb[index], mmu_idx, 1545 1557 handle_bswap(val, size, big_endian), 1546 1558 addr, retaddr, size); 1547 1559 return; ··· 1568 1580 if (!tlb_hit_page(tlb_addr2, page2) 1569 1581 && !victim_tlb_hit(env, mmu_idx, index2, tlb_off, 1570 1582 page2 & TARGET_PAGE_MASK)) { 1571 - tlb_fill(ENV_GET_CPU(env), page2, size, MMU_DATA_STORE, 1583 + tlb_fill(env_cpu(env), page2, size, MMU_DATA_STORE, 1572 1584 mmu_idx, retaddr); 1573 1585 } 1574 1586
+2 -4
accel/tcg/tcg-all.c
··· 28 28 #include "sysemu/sysemu.h" 29 29 #include "qom/object.h" 30 30 #include "qemu-common.h" 31 - #include "qom/cpu.h" 31 + #include "cpu.h" 32 32 #include "sysemu/cpus.h" 33 33 #include "qemu/main-loop.h" 34 34 35 35 unsigned long tcg_tb_size; 36 36 37 - #ifndef CONFIG_USER_ONLY 38 37 /* mask must never be zero, except for A20 change call */ 39 38 static void tcg_handle_interrupt(CPUState *cpu, int mask) 40 39 { ··· 51 50 if (!qemu_cpu_is_self(cpu)) { 52 51 qemu_cpu_kick(cpu); 53 52 } else { 54 - atomic_set(&cpu->icount_decr.u16.high, -1); 53 + atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); 55 54 if (use_icount && 56 55 !cpu->can_do_io 57 56 && (mask & ~old_mask) != 0) { ··· 59 58 } 60 59 } 61 60 } 62 - #endif 63 61 64 62 static int tcg_init(MachineState *ms) 65 63 {
+2 -2
accel/tcg/tcg-runtime.c
··· 146 146 147 147 void *HELPER(lookup_tb_ptr)(CPUArchState *env) 148 148 { 149 - CPUState *cpu = ENV_GET_CPU(env); 149 + CPUState *cpu = env_cpu(env); 150 150 TranslationBlock *tb; 151 151 target_ulong cs_base, pc; 152 152 uint32_t flags; ··· 165 165 166 166 void HELPER(exit_atomic)(CPUArchState *env) 167 167 { 168 - cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC()); 168 + cpu_loop_exit_atomic(env_cpu(env), GETPC()); 169 169 }
+5 -5
accel/tcg/translate-all.c
··· 364 364 assert(use_icount); 365 365 /* Reset the cycle counter to the start of the block 366 366 and shift if to the number of actually executed instructions */ 367 - cpu->icount_decr.u16.low += num_insns - i; 367 + cpu_neg(cpu)->icount_decr.u16.low += num_insns - i; 368 368 } 369 369 restore_state_to_opc(env, tb, data); 370 370 ··· 1732 1732 1733 1733 tcg_func_start(tcg_ctx); 1734 1734 1735 - tcg_ctx->cpu = ENV_GET_CPU(env); 1735 + tcg_ctx->cpu = env_cpu(env); 1736 1736 gen_intermediate_code(cpu, tb, max_insns); 1737 1737 tcg_ctx->cpu = NULL; 1738 1738 ··· 2200 2200 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 2201 2201 && env->active_tc.PC != tb->pc) { 2202 2202 env->active_tc.PC -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); 2203 - cpu->icount_decr.u16.low++; 2203 + cpu_neg(cpu)->icount_decr.u16.low++; 2204 2204 env->hflags &= ~MIPS_HFLAG_BMASK; 2205 2205 n = 2; 2206 2206 } ··· 2208 2208 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 2209 2209 && env->pc != tb->pc) { 2210 2210 env->pc -= 2; 2211 - cpu->icount_decr.u16.low++; 2211 + cpu_neg(cpu)->icount_decr.u16.low++; 2212 2212 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); 2213 2213 n = 2; 2214 2214 } ··· 2382 2382 { 2383 2383 g_assert(qemu_mutex_iothread_locked()); 2384 2384 cpu->interrupt_request |= mask; 2385 - atomic_set(&cpu->icount_decr.u16.high, -1); 2385 + atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1); 2386 2386 } 2387 2387 2388 2388 /*
+1 -1
accel/tcg/user-exec.c
··· 680 680 { 681 681 /* Enforce qemu required alignment. */ 682 682 if (unlikely(addr & (size - 1))) { 683 - cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); 683 + cpu_loop_exit_atomic(env_cpu(env), retaddr); 684 684 } 685 685 helper_retaddr = retaddr; 686 686 return g2h(addr);
+2 -3
bsd-user/main.c
··· 140 140 141 141 void cpu_loop(CPUX86State *env) 142 142 { 143 - X86CPU *cpu = x86_env_get_cpu(env); 144 - CPUState *cs = CPU(cpu); 143 + CPUState *cs = env_cpu(env); 145 144 int trapnr; 146 145 abi_ulong pc; 147 146 //target_siginfo_t info; ··· 487 486 488 487 void cpu_loop(CPUSPARCState *env) 489 488 { 490 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 489 + CPUState *cs = env_cpu(env); 491 490 int trapnr, ret, syscall_nr; 492 491 //target_siginfo_t info; 493 492
+3 -3
bsd-user/syscall.c
··· 315 315 abi_long arg5, abi_long arg6, abi_long arg7, 316 316 abi_long arg8) 317 317 { 318 - CPUState *cpu = ENV_GET_CPU(cpu_env); 318 + CPUState *cpu = env_cpu(cpu_env); 319 319 abi_long ret; 320 320 void *p; 321 321 ··· 413 413 abi_long arg2, abi_long arg3, abi_long arg4, 414 414 abi_long arg5, abi_long arg6) 415 415 { 416 - CPUState *cpu = ENV_GET_CPU(cpu_env); 416 + CPUState *cpu = env_cpu(cpu_env); 417 417 abi_long ret; 418 418 void *p; 419 419 ··· 488 488 abi_long arg2, abi_long arg3, abi_long arg4, 489 489 abi_long arg5, abi_long arg6) 490 490 { 491 - CPUState *cpu = ENV_GET_CPU(cpu_env); 491 + CPUState *cpu = env_cpu(cpu_env); 492 492 abi_long ret; 493 493 void *p; 494 494
+5 -4
cpus.c
··· 239 239 */ 240 240 static int64_t cpu_get_icount_executed(CPUState *cpu) 241 241 { 242 - return cpu->icount_budget - (cpu->icount_decr.u16.low + cpu->icount_extra); 242 + return (cpu->icount_budget - 243 + (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra)); 243 244 } 244 245 245 246 /* ··· 1389 1390 * each vCPU execution. However u16.high can be raised 1390 1391 * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt 1391 1392 */ 1392 - g_assert(cpu->icount_decr.u16.low == 0); 1393 + g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0); 1393 1394 g_assert(cpu->icount_extra == 0); 1394 1395 1395 1396 cpu->icount_budget = tcg_get_icount_limit(); 1396 1397 insns_left = MIN(0xffff, cpu->icount_budget); 1397 - cpu->icount_decr.u16.low = insns_left; 1398 + cpu_neg(cpu)->icount_decr.u16.low = insns_left; 1398 1399 cpu->icount_extra = cpu->icount_budget - insns_left; 1399 1400 1400 1401 replay_mutex_lock(); ··· 1408 1409 cpu_update_icount(cpu); 1409 1410 1410 1411 /* Reset the counters */ 1411 - cpu->icount_decr.u16.low = 0; 1412 + cpu_neg(cpu)->icount_decr.u16.low = 0; 1412 1413 cpu->icount_extra = 0; 1413 1414 cpu->icount_budget = 0; 1414 1415
+2 -2
docs/devel/tracing.txt
··· 434 434 /* trace emitted at this point */ 435 435 trace_foo(0xd1); 436 436 /* trace emitted at this point */ 437 - trace_bar(ENV_GET_CPU(env), 0xd2); 437 + trace_bar(env_cpu(env), 0xd2); 438 438 /* trace emitted at this point (env) and when guest code is executed (cpu_env) */ 439 - trace_baz_tcg(ENV_GET_CPU(env), cpu_env, 0xd3); 439 + trace_baz_tcg(env_cpu(env), cpu_env, 0xd3); 440 440 } 441 441 442 442 If the translating vCPU has address 0xc1 and code is later executed by vCPU
+2 -2
hw/i386/kvmvapic.c
··· 152 152 153 153 static int find_real_tpr_addr(VAPICROMState *s, CPUX86State *env) 154 154 { 155 - CPUState *cs = CPU(x86_env_get_cpu(env)); 155 + CPUState *cs = env_cpu(env); 156 156 hwaddr paddr; 157 157 target_ulong addr; 158 158 ··· 279 279 280 280 static int update_rom_mapping(VAPICROMState *s, CPUX86State *env, target_ulong ip) 281 281 { 282 - CPUState *cs = CPU(x86_env_get_cpu(env)); 282 + CPUState *cs = env_cpu(env); 283 283 hwaddr paddr; 284 284 uint32_t rom_state_vaddr; 285 285 uint32_t pos, patch, offset;
+1 -1
hw/i386/pc.c
··· 406 406 /* IRQ handling */ 407 407 int cpu_get_pic_interrupt(CPUX86State *env) 408 408 { 409 - X86CPU *cpu = x86_env_get_cpu(env); 409 + X86CPU *cpu = env_archcpu(env); 410 410 int intno; 411 411 412 412 if (!kvm_irqchip_in_kernel()) {
+1 -1
hw/intc/mips_gic.c
··· 44 44 GIC_VP_MASK_CMP_SHF; 45 45 } 46 46 if (kvm_enabled()) { 47 - kvm_mips_set_ipi_interrupt(mips_env_get_cpu(gic->vps[vp].env), 47 + kvm_mips_set_ipi_interrupt(env_archcpu(gic->vps[vp].env), 48 48 pin + GIC_CPU_PIN_OFFSET, 49 49 ored_level); 50 50 } else {
+1 -1
hw/mips/mips_int.c
··· 76 76 qemu_irq *qi; 77 77 int i; 78 78 79 - qi = qemu_allocate_irqs(cpu_mips_irq_request, mips_env_get_cpu(env), 8); 79 + qi = qemu_allocate_irqs(cpu_mips_irq_request, env_archcpu(env), 8); 80 80 for (i = 0; i < 8; i++) { 81 81 env->irq[i] = qi[i]; 82 82 }
+1 -4
hw/nios2/cpu_pic.c
··· 54 54 55 55 void nios2_check_interrupts(CPUNios2State *env) 56 56 { 57 - Nios2CPU *cpu = nios2_env_get_cpu(env); 58 - CPUState *cs = CPU(cpu); 59 - 60 57 if (env->irq_pending) { 61 58 env->irq_pending = 0; 62 - cpu_interrupt(cs, CPU_INTERRUPT_HARD); 59 + cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HARD); 63 60 } 64 61 } 65 62
+9 -9
hw/ppc/ppc.c
··· 385 385 386 386 void store_40x_dbcr0(CPUPPCState *env, uint32_t val) 387 387 { 388 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 388 + PowerPCCPU *cpu = env_archcpu(env); 389 389 390 390 switch ((val >> 28) & 0x3) { 391 391 case 0x0: ··· 785 785 786 786 target_ulong cpu_ppc_load_hdecr(CPUPPCState *env) 787 787 { 788 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 788 + PowerPCCPU *cpu = env_archcpu(env); 789 789 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 790 790 ppc_tb_t *tb_env = env->tb_env; 791 791 uint64_t hdecr; ··· 923 923 924 924 void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value) 925 925 { 926 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 926 + PowerPCCPU *cpu = env_archcpu(env); 927 927 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 928 928 int nr_bits = 32; 929 929 ··· 955 955 956 956 void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value) 957 957 { 958 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 958 + PowerPCCPU *cpu = env_archcpu(env); 959 959 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 960 960 961 961 _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value, ··· 980 980 static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq) 981 981 { 982 982 CPUPPCState *env = opaque; 983 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 983 + PowerPCCPU *cpu = env_archcpu(env); 984 984 ppc_tb_t *tb_env = env->tb_env; 985 985 986 986 tb_env->tb_freq = freq; ··· 1095 1095 /* Set up (once) timebase frequency (in Hz) */ 1096 1096 clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq) 1097 1097 { 1098 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 1098 + PowerPCCPU *cpu = env_archcpu(env); 1099 1099 ppc_tb_t *tb_env; 1100 1100 1101 1101 tb_env = g_malloc0(sizeof(ppc_tb_t)); ··· 1165 1165 uint64_t now, next; 1166 1166 1167 1167 env = opaque; 1168 - cpu = ppc_env_get_cpu(env); 1168 + cpu = env_archcpu(env); 1169 1169 tb_env = env->tb_env; 1170 1170 ppc40x_timer = tb_env->opaque; 1171 1171 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); ··· 1235 1235 ppc40x_timer_t *ppc40x_timer; 1236 1236 1237 1237 env = opaque; 1238 - cpu = ppc_env_get_cpu(env); 1238 + cpu = env_archcpu(env); 1239 1239 tb_env = env->tb_env; 1240 1240 ppc40x_timer = tb_env->opaque; 1241 1241 env->spr[SPR_40x_TSR] |= 1 << 27; ··· 1261 1261 uint64_t now, next; 1262 1262 1263 1263 env = opaque; 1264 - cpu = ppc_env_get_cpu(env); 1264 + cpu = env_archcpu(env); 1265 1265 tb_env = env->tb_env; 1266 1266 ppc40x_timer = tb_env->opaque; 1267 1267 now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+1 -1
hw/ppc/ppc405_uc.c
··· 49 49 ram_addr_t ppc405_set_bootinfo (CPUPPCState *env, ppc4xx_bd_info_t *bd, 50 50 uint32_t flags) 51 51 { 52 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 52 + CPUState *cs = env_cpu(env); 53 53 ram_addr_t bdloc; 54 54 int i, n; 55 55
+2 -2
hw/ppc/ppc_booke.c
··· 249 249 250 250 void store_booke_tsr(CPUPPCState *env, target_ulong val) 251 251 { 252 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 252 + PowerPCCPU *cpu = env_archcpu(env); 253 253 ppc_tb_t *tb_env = env->tb_env; 254 254 booke_timer_t *booke_timer = tb_env->opaque; 255 255 ··· 277 277 278 278 void store_booke_tcr(CPUPPCState *env, target_ulong val) 279 279 { 280 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 280 + PowerPCCPU *cpu = env_archcpu(env); 281 281 ppc_tb_t *tb_env = env->tb_env; 282 282 booke_timer_t *booke_timer = tb_env->opaque; 283 283
+1 -1
hw/semihosting/console.c
··· 40 40 */ 41 41 static GString *copy_user_string(CPUArchState *env, target_ulong addr, int len) 42 42 { 43 - CPUState *cpu = ENV_GET_CPU(env); 43 + CPUState *cpu = env_cpu(env); 44 44 GString *s = g_string_sized_new(len ? len : 128); 45 45 uint8_t c; 46 46 bool done;
+2 -2
hw/sparc/leon3.c
··· 159 159 160 160 env->interrupt_index = TT_EXTINT | i; 161 161 if (old_interrupt != env->interrupt_index) { 162 - cs = CPU(sparc_env_get_cpu(env)); 162 + cs = env_cpu(env); 163 163 trace_leon3_set_irq(i); 164 164 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 165 165 } ··· 167 167 } 168 168 } 169 169 } else if (!env->pil_in && (env->interrupt_index & ~15) == TT_EXTINT) { 170 - cs = CPU(sparc_env_get_cpu(env)); 170 + cs = env_cpu(env); 171 171 trace_leon3_reset_irq(env->interrupt_index & 15); 172 172 env->interrupt_index = 0; 173 173 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+2 -2
hw/sparc/sun4m.c
··· 166 166 167 167 env->interrupt_index = TT_EXTINT | i; 168 168 if (old_interrupt != env->interrupt_index) { 169 - cs = CPU(sparc_env_get_cpu(env)); 169 + cs = env_cpu(env); 170 170 trace_sun4m_cpu_interrupt(i); 171 171 cpu_interrupt(cs, CPU_INTERRUPT_HARD); 172 172 } ··· 174 174 } 175 175 } 176 176 } else if (!env->pil_in && (env->interrupt_index & ~15) == TT_EXTINT) { 177 - cs = CPU(sparc_env_get_cpu(env)); 177 + cs = env_cpu(env); 178 178 trace_sun4m_cpu_reset_interrupt(env->interrupt_index & 15); 179 179 env->interrupt_index = 0; 180 180 cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
+1 -1
hw/sparc64/sparc64.c
··· 46 46 if (env->ivec_status & 0x20) { 47 47 return; 48 48 } 49 - cs = CPU(sparc_env_get_cpu(env)); 49 + cs = env_cpu(env); 50 50 /* check if TM or SM in SOFTINT are set 51 51 setting these also causes interrupt 14 */ 52 52 if (env->softint & (SOFTINT_TIMER | SOFTINT_STIMER)) {
+1 -1
hw/unicore32/puv3.c
··· 56 56 57 57 /* Initialize interrupt controller */ 58 58 cpu_intc = qemu_allocate_irq(puv3_intc_cpu_handler, 59 - uc32_env_get_cpu(env), 0); 59 + env_archcpu(env), 0); 60 60 dev = sysbus_create_simple("puv3_intc", PUV3_INTC_BASE, cpu_intc); 61 61 for (i = 0; i < PUV3_IRQS_NR; i++) { 62 62 irqs[i] = qdev_get_gpio_in(dev, i);
+1 -1
hw/xtensa/pic_cpu.c
··· 33 33 34 34 void check_interrupts(CPUXtensaState *env) 35 35 { 36 - CPUState *cs = CPU(xtensa_env_get_cpu(env)); 36 + CPUState *cs = env_cpu(env); 37 37 int minlevel = xtensa_get_cintlevel(env); 38 38 uint32_t int_set_enabled = env->sregs[INTSET] & env->sregs[INTENABLE]; 39 39 int level;
+69
include/exec/cpu-all.h
··· 371 371 372 372 int cpu_exec(CPUState *cpu); 373 373 374 + /** 375 + * cpu_set_cpustate_pointers(cpu) 376 + * @cpu: The cpu object 377 + * 378 + * Set the generic pointers in CPUState into the outer object. 379 + */ 380 + static inline void cpu_set_cpustate_pointers(ArchCPU *cpu) 381 + { 382 + cpu->parent_obj.env_ptr = &cpu->env; 383 + cpu->parent_obj.icount_decr_ptr = &cpu->neg.icount_decr; 384 + } 385 + 386 + /** 387 + * env_archcpu(env) 388 + * @env: The architecture environment 389 + * 390 + * Return the ArchCPU associated with the environment. 391 + */ 392 + static inline ArchCPU *env_archcpu(CPUArchState *env) 393 + { 394 + return container_of(env, ArchCPU, env); 395 + } 396 + 397 + /** 398 + * env_cpu(env) 399 + * @env: The architecture environment 400 + * 401 + * Return the CPUState associated with the environment. 402 + */ 403 + static inline CPUState *env_cpu(CPUArchState *env) 404 + { 405 + return &env_archcpu(env)->parent_obj; 406 + } 407 + 408 + /** 409 + * env_neg(env) 410 + * @env: The architecture environment 411 + * 412 + * Return the CPUNegativeOffsetState associated with the environment. 413 + */ 414 + static inline CPUNegativeOffsetState *env_neg(CPUArchState *env) 415 + { 416 + ArchCPU *arch_cpu = container_of(env, ArchCPU, env); 417 + return &arch_cpu->neg; 418 + } 419 + 420 + /** 421 + * cpu_neg(cpu) 422 + * @cpu: The generic CPUState 423 + * 424 + * Return the CPUNegativeOffsetState associated with the cpu. 425 + */ 426 + static inline CPUNegativeOffsetState *cpu_neg(CPUState *cpu) 427 + { 428 + ArchCPU *arch_cpu = container_of(cpu, ArchCPU, parent_obj); 429 + return &arch_cpu->neg; 430 + } 431 + 432 + /** 433 + * env_tlb(env) 434 + * @env: The architecture environment 435 + * 436 + * Return the CPUTLB state associated with the environment. 437 + */ 438 + static inline CPUTLB *env_tlb(CPUArchState *env) 439 + { 440 + return &env_neg(env)->tlb; 441 + } 442 + 374 443 #endif /* CPU_ALL_H */
+70 -37
include/exec/cpu-defs.h
··· 33 33 #include "exec/hwaddr.h" 34 34 #endif 35 35 #include "exec/memattrs.h" 36 + #include "qom/cpu.h" 37 + 38 + #include "cpu-param.h" 36 39 37 40 #ifndef TARGET_LONG_BITS 38 - #error TARGET_LONG_BITS must be defined before including this header 41 + # error TARGET_LONG_BITS must be defined in cpu-param.h 42 + #endif 43 + #ifndef NB_MMU_MODES 44 + # error NB_MMU_MODES must be defined in cpu-param.h 45 + #endif 46 + #ifndef TARGET_PHYS_ADDR_SPACE_BITS 47 + # error TARGET_PHYS_ADDR_SPACE_BITS must be defined in cpu-param.h 48 + #endif 49 + #ifndef TARGET_VIRT_ADDR_SPACE_BITS 50 + # error TARGET_VIRT_ADDR_SPACE_BITS must be defined in cpu-param.h 51 + #endif 52 + #ifndef TARGET_PAGE_BITS 53 + # ifdef TARGET_PAGE_BITS_VARY 54 + # ifndef TARGET_PAGE_BITS_MIN 55 + # error TARGET_PAGE_BITS_MIN must be defined in cpu-param.h 56 + # endif 57 + # else 58 + # error TARGET_PAGE_BITS must be defined in cpu-param.h 59 + # endif 39 60 #endif 40 61 41 62 #define TARGET_LONG_SIZE (TARGET_LONG_BITS / 8) ··· 58 79 #endif 59 80 60 81 #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) 82 + 61 83 /* use a fully associative victim tlb of 8 entries */ 62 84 #define CPU_VTLB_SIZE 8 63 85 ··· 127 149 MemTxAttrs attrs; 128 150 } CPUIOTLBEntry; 129 151 130 - /** 131 - * struct CPUTLBWindow 132 - * @begin_ns: host time (in ns) at the beginning of the time window 133 - * @max_entries: maximum number of entries observed in the window 134 - * 135 - * See also: tlb_mmu_resize_locked() 152 + /* 153 + * Data elements that are per MMU mode, minus the bits accessed by 154 + * the TCG fast path. 136 155 */ 137 - typedef struct CPUTLBWindow { 138 - int64_t begin_ns; 139 - size_t max_entries; 140 - } CPUTLBWindow; 141 - 142 156 typedef struct CPUTLBDesc { 143 157 /* 144 158 * Describe a region covering all of the large pages allocated ··· 148 162 */ 149 163 target_ulong large_page_addr; 150 164 target_ulong large_page_mask; 165 + /* host time (in ns) at the beginning of the time window */ 166 + int64_t window_begin_ns; 167 + /* maximum number of entries observed in the window */ 168 + size_t window_max_entries; 169 + size_t n_used_entries; 151 170 /* The next index to use in the tlb victim table. */ 152 171 size_t vindex; 153 - CPUTLBWindow window; 154 - size_t n_used_entries; 172 + /* The tlb victim table, in two parts. */ 173 + CPUTLBEntry vtable[CPU_VTLB_SIZE]; 174 + CPUIOTLBEntry viotlb[CPU_VTLB_SIZE]; 175 + /* The iotlb. */ 176 + CPUIOTLBEntry *iotlb; 155 177 } CPUTLBDesc; 156 178 157 179 /* 180 + * Data elements that are per MMU mode, accessed by the fast path. 181 + * The structure is aligned to aid loading the pair with one insn. 182 + */ 183 + typedef struct CPUTLBDescFast { 184 + /* Contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */ 185 + uintptr_t mask; 186 + /* The array of tlb entries itself. */ 187 + CPUTLBEntry *table; 188 + } CPUTLBDescFast QEMU_ALIGNED(2 * sizeof(void *)); 189 + 190 + /* 158 191 * Data elements that are shared between all MMU modes. 159 192 */ 160 193 typedef struct CPUTLBCommon { 161 - /* Serialize updates to tlb_table and tlb_v_table, and others as noted. */ 194 + /* Serialize updates to f.table and d.vtable, and others as noted. */ 162 195 QemuSpin lock; 163 196 /* 164 197 * Within dirty, for each bit N, modifications have been made to ··· 176 209 size_t elide_flush_count; 177 210 } CPUTLBCommon; 178 211 179 - # define CPU_TLB \ 180 - /* tlb_mask[i] contains (n_entries - 1) << CPU_TLB_ENTRY_BITS */ \ 181 - uintptr_t tlb_mask[NB_MMU_MODES]; \ 182 - CPUTLBEntry *tlb_table[NB_MMU_MODES]; 183 - # define CPU_IOTLB \ 184 - CPUIOTLBEntry *iotlb[NB_MMU_MODES]; 185 - 186 212 /* 213 + * The entire softmmu tlb, for all MMU modes. 187 214 * The meaning of each of the MMU modes is defined in the target code. 188 - * Note that NB_MMU_MODES is not yet defined; we can only reference it 189 - * within preprocessor defines that will be expanded later. 215 + * Since this is placed within CPUNegativeOffsetState, the smallest 216 + * negative offsets are at the end of the struct. 190 217 */ 191 - #define CPU_COMMON_TLB \ 192 - CPUTLBCommon tlb_c; \ 193 - CPUTLBDesc tlb_d[NB_MMU_MODES]; \ 194 - CPU_TLB \ 195 - CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \ 196 - CPU_IOTLB \ 197 - CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; 218 + typedef struct CPUTLB { 219 + CPUTLBCommon c; 220 + CPUTLBDesc d[NB_MMU_MODES]; 221 + CPUTLBDescFast f[NB_MMU_MODES]; 222 + } CPUTLB; 223 + 224 + /* This will be used by TCG backends to compute offsets. */ 225 + #define TLB_MASK_TABLE_OFS(IDX) \ 226 + ((int)offsetof(ArchCPU, neg.tlb.f[IDX]) - (int)offsetof(ArchCPU, env)) 198 227 199 228 #else 200 229 201 - #define CPU_COMMON_TLB 202 - 203 - #endif 230 + typedef struct CPUTLB { } CPUTLB; 204 231 232 + #endif /* !CONFIG_USER_ONLY && CONFIG_TCG */ 205 233 206 - #define CPU_COMMON \ 207 - /* soft mmu support */ \ 208 - CPU_COMMON_TLB \ 234 + /* 235 + * This structure must be placed in ArchCPU immedately 236 + * before CPUArchState, as a field named "neg". 237 + */ 238 + typedef struct CPUNegativeOffsetState { 239 + CPUTLB tlb; 240 + IcountDecr icount_decr; 241 + } CPUNegativeOffsetState; 209 242 210 243 #endif
+3 -3
include/exec/cpu_ldst.h
··· 139 139 static inline uintptr_t tlb_index(CPUArchState *env, uintptr_t mmu_idx, 140 140 target_ulong addr) 141 141 { 142 - uintptr_t size_mask = env->tlb_mask[mmu_idx] >> CPU_TLB_ENTRY_BITS; 142 + uintptr_t size_mask = env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; 143 143 144 144 return (addr >> TARGET_PAGE_BITS) & size_mask; 145 145 } 146 146 147 147 static inline size_t tlb_n_entries(CPUArchState *env, uintptr_t mmu_idx) 148 148 { 149 - return (env->tlb_mask[mmu_idx] >> CPU_TLB_ENTRY_BITS) + 1; 149 + return (env_tlb(env)->f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS) + 1; 150 150 } 151 151 152 152 /* Find the TLB entry corresponding to the mmu_idx + address pair. */ 153 153 static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, 154 154 target_ulong addr) 155 155 { 156 - return &env->tlb_table[mmu_idx][tlb_index(env, mmu_idx, addr)]; 156 + return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; 157 157 } 158 158 159 159 #ifdef MMU_MODE0_SUFFIX
+3 -3
include/exec/cpu_ldst_template.h
··· 89 89 90 90 #if !defined(SOFTMMU_CODE_ACCESS) 91 91 trace_guest_mem_before_exec( 92 - ENV_GET_CPU(env), ptr, 92 + env_cpu(env), ptr, 93 93 trace_mem_build_info(SHIFT, false, MO_TE, false)); 94 94 #endif 95 95 ··· 128 128 129 129 #if !defined(SOFTMMU_CODE_ACCESS) 130 130 trace_guest_mem_before_exec( 131 - ENV_GET_CPU(env), ptr, 131 + env_cpu(env), ptr, 132 132 trace_mem_build_info(SHIFT, true, MO_TE, false)); 133 133 #endif 134 134 ··· 170 170 171 171 #if !defined(SOFTMMU_CODE_ACCESS) 172 172 trace_guest_mem_before_exec( 173 - ENV_GET_CPU(env), ptr, 173 + env_cpu(env), ptr, 174 174 trace_mem_build_info(SHIFT, false, MO_TE, true)); 175 175 #endif 176 176
+3 -3
include/exec/cpu_ldst_useronly_template.h
··· 66 66 { 67 67 #if !defined(CODE_ACCESS) 68 68 trace_guest_mem_before_exec( 69 - ENV_GET_CPU(env), ptr, 69 + env_cpu(env), ptr, 70 70 trace_mem_build_info(SHIFT, false, MO_TE, false)); 71 71 #endif 72 72 return glue(glue(ld, USUFFIX), _p)(g2h(ptr)); ··· 90 90 { 91 91 #if !defined(CODE_ACCESS) 92 92 trace_guest_mem_before_exec( 93 - ENV_GET_CPU(env), ptr, 93 + env_cpu(env), ptr, 94 94 trace_mem_build_info(SHIFT, true, MO_TE, false)); 95 95 #endif 96 96 return glue(glue(lds, SUFFIX), _p)(g2h(ptr)); ··· 116 116 { 117 117 #if !defined(CODE_ACCESS) 118 118 trace_guest_mem_before_exec( 119 - ENV_GET_CPU(env), ptr, 119 + env_cpu(env), ptr, 120 120 trace_mem_build_info(SHIFT, false, MO_TE, true)); 121 121 #endif 122 122 glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
+10 -4
include/exec/gen-icount.h
··· 19 19 } 20 20 21 21 tcg_gen_ld_i32(count, cpu_env, 22 - -ENV_OFFSET + offsetof(CPUState, icount_decr.u32)); 22 + offsetof(ArchCPU, neg.icount_decr.u32) - 23 + offsetof(ArchCPU, env)); 23 24 24 25 if (tb_cflags(tb) & CF_USE_ICOUNT) { 25 26 imm = tcg_temp_new_i32(); ··· 37 38 38 39 if (tb_cflags(tb) & CF_USE_ICOUNT) { 39 40 tcg_gen_st16_i32(count, cpu_env, 40 - -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low)); 41 + offsetof(ArchCPU, neg.icount_decr.u16.low) - 42 + offsetof(ArchCPU, env)); 41 43 } 42 44 43 45 tcg_temp_free_i32(count); ··· 58 60 static inline void gen_io_start(void) 59 61 { 60 62 TCGv_i32 tmp = tcg_const_i32(1); 61 - tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io)); 63 + tcg_gen_st_i32(tmp, cpu_env, 64 + offsetof(ArchCPU, parent_obj.can_do_io) - 65 + offsetof(ArchCPU, env)); 62 66 tcg_temp_free_i32(tmp); 63 67 } 64 68 65 69 static inline void gen_io_end(void) 66 70 { 67 71 TCGv_i32 tmp = tcg_const_i32(0); 68 - tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io)); 72 + tcg_gen_st_i32(tmp, cpu_env, 73 + offsetof(ArchCPU, parent_obj.can_do_io) - 74 + offsetof(ArchCPU, env)); 69 75 tcg_temp_free_i32(tmp); 70 76 } 71 77
+8 -8
include/exec/softmmu-semi.h
··· 14 14 { 15 15 uint64_t val; 16 16 17 - cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 8, 0); 17 + cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 0); 18 18 return tswap64(val); 19 19 } 20 20 ··· 22 22 { 23 23 uint32_t val; 24 24 25 - cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 0); 25 + cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 0); 26 26 return tswap32(val); 27 27 } 28 28 ··· 30 30 { 31 31 uint8_t val; 32 32 33 - cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &val, 1, 0); 33 + cpu_memory_rw_debug(env_cpu(env), addr, &val, 1, 0); 34 34 return val; 35 35 } 36 36 ··· 43 43 target_ulong addr, uint64_t val) 44 44 { 45 45 val = tswap64(val); 46 - cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 8, 1); 46 + cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 8, 1); 47 47 } 48 48 49 49 static inline void softmmu_tput32(CPUArchState *env, 50 50 target_ulong addr, uint32_t val) 51 51 { 52 52 val = tswap32(val); 53 - cpu_memory_rw_debug(ENV_GET_CPU(env), addr, (uint8_t *)&val, 4, 1); 53 + cpu_memory_rw_debug(env_cpu(env), addr, (uint8_t *)&val, 4, 1); 54 54 } 55 55 #define put_user_u64(arg, p) ({ softmmu_tput64(env, p, arg) ; 0; }) 56 56 #define put_user_u32(arg, p) ({ softmmu_tput32(env, p, arg) ; 0; }) ··· 63 63 /* TODO: Make this something that isn't fixed size. */ 64 64 p = malloc(len); 65 65 if (p && copy) { 66 - cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 0); 66 + cpu_memory_rw_debug(env_cpu(env), addr, p, len, 0); 67 67 } 68 68 return p; 69 69 } ··· 79 79 return NULL; 80 80 } 81 81 do { 82 - cpu_memory_rw_debug(ENV_GET_CPU(env), addr, &c, 1, 0); 82 + cpu_memory_rw_debug(env_cpu(env), addr, &c, 1, 0); 83 83 addr++; 84 84 *(p++) = c; 85 85 } while (c); ··· 90 90 target_ulong len) 91 91 { 92 92 if (len) { 93 - cpu_memory_rw_debug(ENV_GET_CPU(env), addr, p, len, 1); 93 + cpu_memory_rw_debug(env_cpu(env), addr, p, len, 1); 94 94 } 95 95 free(p); 96 96 }
+18 -22
include/qom/cpu.h
··· 232 232 bool gdb_stop_before_watchpoint; 233 233 } CPUClass; 234 234 235 + /* 236 + * Low 16 bits: number of cycles left, used only in icount mode. 237 + * High 16 bits: Set to -1 to force TCG to stop executing linked TBs 238 + * for this CPU and return to its top level loop (even in non-icount mode). 239 + * This allows a single read-compare-cbranch-write sequence to test 240 + * for both decrementer underflow and exceptions. 241 + */ 242 + typedef union IcountDecr { 243 + uint32_t u32; 244 + struct { 235 245 #ifdef HOST_WORDS_BIGENDIAN 236 - typedef struct icount_decr_u16 { 237 - uint16_t high; 238 - uint16_t low; 239 - } icount_decr_u16; 246 + uint16_t high; 247 + uint16_t low; 240 248 #else 241 - typedef struct icount_decr_u16 { 242 - uint16_t low; 243 - uint16_t high; 244 - } icount_decr_u16; 249 + uint16_t low; 250 + uint16_t high; 245 251 #endif 252 + } u16; 253 + } IcountDecr; 246 254 247 255 typedef struct CPUBreakpoint { 248 256 vaddr pc; ··· 314 322 * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU 315 323 * @singlestep_enabled: Flags for single-stepping. 316 324 * @icount_extra: Instructions until next timer event. 317 - * @icount_decr: Low 16 bits: number of cycles left, only used in icount mode. 318 - * High 16 bits: Set to -1 to force TCG to stop executing linked TBs for this 319 - * CPU and return to its top level loop (even in non-icount mode). 320 - * This allows a single read-compare-cbranch-write sequence to test 321 - * for both decrementer underflow and exceptions. 322 325 * @can_do_io: Nonzero if memory-mapped IO is safe. Deterministic execution 323 326 * requires that IO only be performed on the last instruction of a TB 324 327 * so that interrupts take effect immediately. ··· 328 331 * @as: Pointer to the first AddressSpace, for the convenience of targets which 329 332 * only have a single AddressSpace 330 333 * @env_ptr: Pointer to subclass-specific CPUArchState field. 334 + * @icount_decr_ptr: Pointer to IcountDecr field within subclass. 331 335 * @gdb_regs: Additional GDB registers. 332 336 * @gdb_num_regs: Number of total registers accessible to GDB. 333 337 * @gdb_num_g_regs: Number of registers in GDB 'g' packets. ··· 387 391 MemoryRegion *memory; 388 392 389 393 void *env_ptr; /* CPUArchState */ 394 + IcountDecr *icount_decr_ptr; 390 395 391 396 /* Accessed in parallel; all accesses must be atomic */ 392 397 struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; ··· 440 445 bool throttle_thread_scheduled; 441 446 442 447 bool ignore_memory_transaction_failures; 443 - 444 - /* Note that this is accessed at the start of every TB via a negative 445 - offset from AREG0. Leave this field at the end so as to make the 446 - (absolute value) offset as small as possible. This reduces code 447 - size, especially for hosts without large memory offsets. */ 448 - union { 449 - uint32_t u32; 450 - icount_decr_u16 u16; 451 - } icount_decr; 452 448 453 449 struct hax_vcpu_state *hax_vcpu; 454 450
+3 -3
linux-user/aarch64/cpu_loop.c
··· 73 73 /* AArch64 main loop */ 74 74 void cpu_loop(CPUARMState *env) 75 75 { 76 - CPUState *cs = CPU(arm_env_get_cpu(env)); 76 + CPUState *cs = env_cpu(env); 77 77 int trapnr; 78 78 abi_long ret; 79 79 target_siginfo_t info; ··· 150 150 151 151 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 152 152 { 153 - ARMCPU *cpu = arm_env_get_cpu(env); 154 - CPUState *cs = CPU(cpu); 153 + ARMCPU *cpu = env_archcpu(env); 154 + CPUState *cs = env_cpu(env); 155 155 TaskState *ts = cs->opaque; 156 156 struct image_info *info = ts->info; 157 157 int i;
+2 -2
linux-user/aarch64/signal.c
··· 314 314 break; 315 315 316 316 case TARGET_SVE_MAGIC: 317 - if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { 317 + if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 318 318 vq = (env->vfp.zcr_el[1] & 0xf) + 1; 319 319 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); 320 320 if (!sve && size == sve_size) { ··· 433 433 &layout); 434 434 435 435 /* SVE state needs saving only if it exists. */ 436 - if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { 436 + if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 437 437 vq = (env->vfp.zcr_el[1] & 0xf) + 1; 438 438 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); 439 439 sve_ofs = alloc_sigframe_space(sve_size, &layout);
+1 -1
linux-user/alpha/cpu_loop.c
··· 23 23 24 24 void cpu_loop(CPUAlphaState *env) 25 25 { 26 - CPUState *cs = CPU(alpha_env_get_cpu(env)); 26 + CPUState *cs = env_cpu(env); 27 27 int trapnr; 28 28 target_siginfo_t info; 29 29 abi_long sysret;
+2 -2
linux-user/arm/cpu_loop.c
··· 206 206 207 207 void cpu_loop(CPUARMState *env) 208 208 { 209 - CPUState *cs = CPU(arm_env_get_cpu(env)); 209 + CPUState *cs = env_cpu(env); 210 210 int trapnr; 211 211 unsigned int n, insn; 212 212 target_siginfo_t info; ··· 423 423 424 424 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 425 425 { 426 - CPUState *cpu = ENV_GET_CPU(env); 426 + CPUState *cpu = env_cpu(env); 427 427 TaskState *ts = cpu->opaque; 428 428 struct image_info *info = ts->info; 429 429 int i;
+1 -1
linux-user/cpu_loop-common.h
··· 24 24 25 25 #define EXCP_DUMP(env, fmt, ...) \ 26 26 do { \ 27 - CPUState *cs = ENV_GET_CPU(env); \ 27 + CPUState *cs = env_cpu(env); \ 28 28 fprintf(stderr, fmt , ## __VA_ARGS__); \ 29 29 cpu_dump_state(cs, stderr, 0); \ 30 30 if (qemu_log_separate()) { \
+2 -2
linux-user/cris/cpu_loop.c
··· 23 23 24 24 void cpu_loop(CPUCRISState *env) 25 25 { 26 - CPUState *cs = CPU(cris_env_get_cpu(env)); 26 + CPUState *cs = env_cpu(env); 27 27 int trapnr, ret; 28 28 target_siginfo_t info; 29 29 ··· 83 83 84 84 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 85 85 { 86 - CPUState *cpu = ENV_GET_CPU(env); 86 + CPUState *cpu = env_cpu(env); 87 87 TaskState *ts = cpu->opaque; 88 88 struct image_info *info = ts->info; 89 89
+3 -3
linux-user/elfload.c
··· 3377 3377 3378 3378 static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env) 3379 3379 { 3380 - CPUState *cpu = ENV_GET_CPU((CPUArchState *)env); 3380 + CPUState *cpu = env_cpu((CPUArchState *)env); 3381 3381 TaskState *ts = (TaskState *)cpu->opaque; 3382 3382 struct elf_thread_status *ets; 3383 3383 ··· 3407 3407 long signr, const CPUArchState *env) 3408 3408 { 3409 3409 #define NUMNOTES 3 3410 - CPUState *cpu = ENV_GET_CPU((CPUArchState *)env); 3410 + CPUState *cpu = env_cpu((CPUArchState *)env); 3411 3411 TaskState *ts = (TaskState *)cpu->opaque; 3412 3412 int i; 3413 3413 ··· 3531 3531 */ 3532 3532 static int elf_core_dump(int signr, const CPUArchState *env) 3533 3533 { 3534 - const CPUState *cpu = ENV_GET_CPU((CPUArchState *)env); 3534 + const CPUState *cpu = env_cpu((CPUArchState *)env); 3535 3535 const TaskState *ts = (const TaskState *)cpu->opaque; 3536 3536 struct vm_area_struct *vma = NULL; 3537 3537 char corefile[PATH_MAX];
+1 -1
linux-user/hppa/cpu_loop.c
··· 105 105 106 106 void cpu_loop(CPUHPPAState *env) 107 107 { 108 - CPUState *cs = CPU(hppa_env_get_cpu(env)); 108 + CPUState *cs = env_cpu(env); 109 109 target_siginfo_t info; 110 110 abi_ulong ret; 111 111 int trapnr;
+1 -1
linux-user/i386/cpu_loop.c
··· 82 82 83 83 void cpu_loop(CPUX86State *env) 84 84 { 85 - CPUState *cs = CPU(x86_env_get_cpu(env)); 85 + CPUState *cs = env_cpu(env); 86 86 int trapnr; 87 87 abi_ulong pc; 88 88 abi_ulong ret;
+1 -1
linux-user/i386/signal.c
··· 198 198 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask, 199 199 abi_ulong fpstate_addr) 200 200 { 201 - CPUState *cs = CPU(x86_env_get_cpu(env)); 201 + CPUState *cs = env_cpu(env); 202 202 #ifndef TARGET_X86_64 203 203 uint16_t magic; 204 204
+1 -2
linux-user/m68k-sim.c
··· 91 91 #define ARG(x) tswap32(args[x]) 92 92 void do_m68k_simcall(CPUM68KState *env, int nr) 93 93 { 94 - M68kCPU *cpu = m68k_env_get_cpu(env); 95 94 uint32_t *args; 96 95 97 96 args = (uint32_t *)(unsigned long)(env->aregs[7] + 4); ··· 159 158 check_err(env, lseek(ARG(0), (int32_t)ARG(1), ARG(2))); 160 159 break; 161 160 default: 162 - cpu_abort(CPU(cpu), "Unsupported m68k sim syscall %d\n", nr); 161 + cpu_abort(env_cpu(env), "Unsupported m68k sim syscall %d\n", nr); 163 162 } 164 163 }
+2 -2
linux-user/m68k/cpu_loop.c
··· 23 23 24 24 void cpu_loop(CPUM68KState *env) 25 25 { 26 - CPUState *cs = CPU(m68k_env_get_cpu(env)); 26 + CPUState *cs = env_cpu(env); 27 27 int trapnr; 28 28 unsigned int n; 29 29 target_siginfo_t info; ··· 130 130 131 131 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 132 132 { 133 - CPUState *cpu = ENV_GET_CPU(env); 133 + CPUState *cpu = env_cpu(env); 134 134 TaskState *ts = cpu->opaque; 135 135 struct image_info *info = ts->info; 136 136
+1 -1
linux-user/m68k/target_cpu.h
··· 31 31 32 32 static inline void cpu_set_tls(CPUM68KState *env, target_ulong newtls) 33 33 { 34 - CPUState *cs = CPU(m68k_env_get_cpu(env)); 34 + CPUState *cs = env_cpu(env); 35 35 TaskState *ts = cs->opaque; 36 36 37 37 ts->tp_value = newtls;
+1 -1
linux-user/main.c
··· 180 180 181 181 CPUArchState *cpu_copy(CPUArchState *env) 182 182 { 183 - CPUState *cpu = ENV_GET_CPU(env); 183 + CPUState *cpu = env_cpu(env); 184 184 CPUState *new_cpu = cpu_create(cpu_type); 185 185 CPUArchState *new_env = new_cpu->env_ptr; 186 186 CPUBreakpoint *bp;
+1 -1
linux-user/microblaze/cpu_loop.c
··· 23 23 24 24 void cpu_loop(CPUMBState *env) 25 25 { 26 - CPUState *cs = CPU(mb_env_get_cpu(env)); 26 + CPUState *cs = env_cpu(env); 27 27 int trapnr, ret; 28 28 target_siginfo_t info; 29 29
+2 -2
linux-user/mips/cpu_loop.c
··· 425 425 426 426 void cpu_loop(CPUMIPSState *env) 427 427 { 428 - CPUState *cs = CPU(mips_env_get_cpu(env)); 428 + CPUState *cs = env_cpu(env); 429 429 target_siginfo_t info; 430 430 int trapnr; 431 431 abi_long ret; ··· 654 654 655 655 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 656 656 { 657 - CPUState *cpu = ENV_GET_CPU(env); 657 + CPUState *cpu = env_cpu(env); 658 658 TaskState *ts = cpu->opaque; 659 659 struct image_info *info = ts->info; 660 660 int i;
+1 -1
linux-user/nios2/cpu_loop.c
··· 23 23 24 24 void cpu_loop(CPUNios2State *env) 25 25 { 26 - CPUState *cs = ENV_GET_CPU(env); 26 + CPUState *cs = env_cpu(env); 27 27 Nios2CPU *cpu = NIOS2_CPU(cs); 28 28 target_siginfo_t info; 29 29 int trapnr, ret;
+1 -1
linux-user/openrisc/cpu_loop.c
··· 23 23 24 24 void cpu_loop(CPUOpenRISCState *env) 25 25 { 26 - CPUState *cs = CPU(openrisc_env_get_cpu(env)); 26 + CPUState *cs = env_cpu(env); 27 27 int trapnr; 28 28 abi_long ret; 29 29 target_siginfo_t info;
+1 -1
linux-user/ppc/cpu_loop.c
··· 67 67 68 68 void cpu_loop(CPUPPCState *env) 69 69 { 70 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 70 + CPUState *cs = env_cpu(env); 71 71 target_siginfo_t info; 72 72 int trapnr; 73 73 target_ulong ret;
+2 -2
linux-user/riscv/cpu_loop.c
··· 25 25 26 26 void cpu_loop(CPURISCVState *env) 27 27 { 28 - CPUState *cs = CPU(riscv_env_get_cpu(env)); 28 + CPUState *cs = env_cpu(env); 29 29 int trapnr, signum, sigcode; 30 30 target_ulong sigaddr; 31 31 target_ulong ret; ··· 116 116 117 117 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 118 118 { 119 - CPUState *cpu = ENV_GET_CPU(env); 119 + CPUState *cpu = env_cpu(env); 120 120 TaskState *ts = cpu->opaque; 121 121 struct image_info *info = ts->info; 122 122
+1 -1
linux-user/s390x/cpu_loop.c
··· 26 26 27 27 void cpu_loop(CPUS390XState *env) 28 28 { 29 - CPUState *cs = CPU(s390_env_get_cpu(env)); 29 + CPUState *cs = env_cpu(env); 30 30 int trapnr, n, sig; 31 31 target_siginfo_t info; 32 32 target_ulong addr;
+1 -1
linux-user/sh4/cpu_loop.c
··· 23 23 24 24 void cpu_loop(CPUSH4State *env) 25 25 { 26 - CPUState *cs = CPU(sh_env_get_cpu(env)); 26 + CPUState *cs = env_cpu(env); 27 27 int trapnr, ret; 28 28 target_siginfo_t info; 29 29
+4 -4
linux-user/signal.c
··· 626 626 int queue_signal(CPUArchState *env, int sig, int si_type, 627 627 target_siginfo_t *info) 628 628 { 629 - CPUState *cpu = ENV_GET_CPU(env); 629 + CPUState *cpu = env_cpu(env); 630 630 TaskState *ts = cpu->opaque; 631 631 632 632 trace_user_queue_signal(env, sig); ··· 651 651 void *puc) 652 652 { 653 653 CPUArchState *env = thread_cpu->env_ptr; 654 - CPUState *cpu = ENV_GET_CPU(env); 654 + CPUState *cpu = env_cpu(env); 655 655 TaskState *ts = cpu->opaque; 656 656 657 657 int sig; ··· 842 842 static void handle_pending_signal(CPUArchState *cpu_env, int sig, 843 843 struct emulated_sigtable *k) 844 844 { 845 - CPUState *cpu = ENV_GET_CPU(cpu_env); 845 + CPUState *cpu = env_cpu(cpu_env); 846 846 abi_ulong handler; 847 847 sigset_t set; 848 848 target_sigset_t target_old_set; ··· 927 927 928 928 void process_pending_signals(CPUArchState *cpu_env) 929 929 { 930 - CPUState *cpu = ENV_GET_CPU(cpu_env); 930 + CPUState *cpu = env_cpu(cpu_env); 931 931 int sig; 932 932 TaskState *ts = cpu->opaque; 933 933 sigset_t set;
+1 -1
linux-user/sparc/cpu_loop.c
··· 145 145 146 146 void cpu_loop (CPUSPARCState *env) 147 147 { 148 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 148 + CPUState *cs = env_cpu(env); 149 149 int trapnr; 150 150 abi_long ret; 151 151 target_siginfo_t info;
+13 -13
linux-user/syscall.c
··· 5484 5484 rcu_register_thread(); 5485 5485 tcg_register_thread(); 5486 5486 env = info->env; 5487 - cpu = ENV_GET_CPU(env); 5487 + cpu = env_cpu(env); 5488 5488 thread_cpu = cpu; 5489 5489 ts = (TaskState *)cpu->opaque; 5490 5490 info->tid = sys_gettid(); ··· 5514 5514 abi_ulong parent_tidptr, target_ulong newtls, 5515 5515 abi_ulong child_tidptr) 5516 5516 { 5517 - CPUState *cpu = ENV_GET_CPU(env); 5517 + CPUState *cpu = env_cpu(env); 5518 5518 int ret; 5519 5519 TaskState *ts; 5520 5520 CPUState *new_cpu; ··· 5547 5547 new_env = cpu_copy(env); 5548 5548 /* Init regs that differ from the parent. */ 5549 5549 cpu_clone_regs(new_env, newsp); 5550 - new_cpu = ENV_GET_CPU(new_env); 5550 + new_cpu = env_cpu(new_env); 5551 5551 new_cpu->opaque = ts; 5552 5552 ts->bprm = parent_ts->bprm; 5553 5553 ts->info = parent_ts->info; ··· 6654 6654 6655 6655 static int open_self_cmdline(void *cpu_env, int fd) 6656 6656 { 6657 - CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 6657 + CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 6658 6658 struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm; 6659 6659 int i; 6660 6660 ··· 6671 6671 6672 6672 static int open_self_maps(void *cpu_env, int fd) 6673 6673 { 6674 - CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 6674 + CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 6675 6675 TaskState *ts = cpu->opaque; 6676 6676 FILE *fp; 6677 6677 char *line = NULL; ··· 6720 6720 6721 6721 static int open_self_stat(void *cpu_env, int fd) 6722 6722 { 6723 - CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 6723 + CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 6724 6724 TaskState *ts = cpu->opaque; 6725 6725 abi_ulong start_stack = ts->info->start_stack; 6726 6726 int i; ··· 6757 6757 6758 6758 static int open_self_auxv(void *cpu_env, int fd) 6759 6759 { 6760 - CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env); 6760 + CPUState *cpu = env_cpu((CPUArchState *)cpu_env); 6761 6761 TaskState *ts = cpu->opaque; 6762 6762 abi_ulong auxv = ts->info->saved_auxv; 6763 6763 abi_ulong len = ts->info->auxv_len; ··· 7042 7042 abi_long arg5, abi_long arg6, abi_long arg7, 7043 7043 abi_long arg8) 7044 7044 { 7045 - CPUState *cpu = ENV_GET_CPU(cpu_env); 7045 + CPUState *cpu = env_cpu(cpu_env); 7046 7046 abi_long ret; 7047 7047 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \ 7048 7048 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \ ··· 9781 9781 * even though the current architectural maximum is VQ=16. 9782 9782 */ 9783 9783 ret = -TARGET_EINVAL; 9784 - if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env)) 9784 + if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env)) 9785 9785 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { 9786 9786 CPUARMState *env = cpu_env; 9787 - ARMCPU *cpu = arm_env_get_cpu(env); 9787 + ARMCPU *cpu = env_archcpu(env); 9788 9788 uint32_t vq, old_vq; 9789 9789 9790 9790 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1; ··· 9801 9801 case TARGET_PR_SVE_GET_VL: 9802 9802 ret = -TARGET_EINVAL; 9803 9803 { 9804 - ARMCPU *cpu = arm_env_get_cpu(cpu_env); 9804 + ARMCPU *cpu = env_archcpu(cpu_env); 9805 9805 if (cpu_isar_feature(aa64_sve, cpu)) { 9806 9806 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16; 9807 9807 } ··· 9810 9810 case TARGET_PR_PAC_RESET_KEYS: 9811 9811 { 9812 9812 CPUARMState *env = cpu_env; 9813 - ARMCPU *cpu = arm_env_get_cpu(env); 9813 + ARMCPU *cpu = env_archcpu(env); 9814 9814 9815 9815 if (arg3 || arg4 || arg5) { 9816 9816 return -TARGET_EINVAL; ··· 11706 11706 abi_long arg5, abi_long arg6, abi_long arg7, 11707 11707 abi_long arg8) 11708 11708 { 11709 - CPUState *cpu = ENV_GET_CPU(cpu_env); 11709 + CPUState *cpu = env_cpu(cpu_env); 11710 11710 abi_long ret; 11711 11711 11712 11712 #ifdef DEBUG_ERESTARTSYS
+1 -1
linux-user/tilegx/cpu_loop.c
··· 206 206 207 207 void cpu_loop(CPUTLGState *env) 208 208 { 209 - CPUState *cs = CPU(tilegx_env_get_cpu(env)); 209 + CPUState *cs = env_cpu(env); 210 210 int trapnr; 211 211 212 212 while (1) {
+1 -1
linux-user/uname.c
··· 54 54 return "armv5te" utsname_suffix; 55 55 #elif defined(TARGET_I386) && !defined(TARGET_X86_64) 56 56 /* see arch/x86/kernel/cpu/bugs.c: check_bugs(), 386, 486, 586, 686 */ 57 - CPUState *cpu = ENV_GET_CPU((CPUX86State *)cpu_env); 57 + CPUState *cpu = env_cpu((CPUX86State *)cpu_env); 58 58 int family = object_property_get_int(OBJECT(cpu), "family", NULL); 59 59 if (family == 4) { 60 60 return "i486";
+9 -9
linux-user/vm86.c
··· 72 72 73 73 void save_v86_state(CPUX86State *env) 74 74 { 75 - CPUState *cs = CPU(x86_env_get_cpu(env)); 75 + CPUState *cs = env_cpu(env); 76 76 TaskState *ts = cs->opaque; 77 77 struct target_vm86plus_struct * target_v86; 78 78 ··· 132 132 133 133 static inline int set_IF(CPUX86State *env) 134 134 { 135 - CPUState *cs = CPU(x86_env_get_cpu(env)); 135 + CPUState *cs = env_cpu(env); 136 136 TaskState *ts = cs->opaque; 137 137 138 138 ts->v86flags |= VIF_MASK; ··· 145 145 146 146 static inline void clear_IF(CPUX86State *env) 147 147 { 148 - CPUState *cs = CPU(x86_env_get_cpu(env)); 148 + CPUState *cs = env_cpu(env); 149 149 TaskState *ts = cs->opaque; 150 150 151 151 ts->v86flags &= ~VIF_MASK; ··· 163 163 164 164 static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) 165 165 { 166 - CPUState *cs = CPU(x86_env_get_cpu(env)); 166 + CPUState *cs = env_cpu(env); 167 167 TaskState *ts = cs->opaque; 168 168 169 169 set_flags(ts->v86flags, eflags, ts->v86mask); ··· 177 177 178 178 static inline int set_vflags_short(unsigned short flags, CPUX86State *env) 179 179 { 180 - CPUState *cs = CPU(x86_env_get_cpu(env)); 180 + CPUState *cs = env_cpu(env); 181 181 TaskState *ts = cs->opaque; 182 182 183 183 set_flags(ts->v86flags, flags, ts->v86mask & 0xffff); ··· 191 191 192 192 static inline unsigned int get_vflags(CPUX86State *env) 193 193 { 194 - CPUState *cs = CPU(x86_env_get_cpu(env)); 194 + CPUState *cs = env_cpu(env); 195 195 TaskState *ts = cs->opaque; 196 196 unsigned int flags; 197 197 ··· 208 208 support TSS interrupt revectoring, so this code is always executed) */ 209 209 static void do_int(CPUX86State *env, int intno) 210 210 { 211 - CPUState *cs = CPU(x86_env_get_cpu(env)); 211 + CPUState *cs = env_cpu(env); 212 212 TaskState *ts = cs->opaque; 213 213 uint32_t int_addr, segoffs, ssp; 214 214 unsigned int sp; ··· 267 267 268 268 void handle_vm86_fault(CPUX86State *env) 269 269 { 270 - CPUState *cs = CPU(x86_env_get_cpu(env)); 270 + CPUState *cs = env_cpu(env); 271 271 TaskState *ts = cs->opaque; 272 272 uint32_t csp, ssp; 273 273 unsigned int ip, sp, newflags, newip, newcs, opcode, intno; ··· 392 392 393 393 int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr) 394 394 { 395 - CPUState *cs = CPU(x86_env_get_cpu(env)); 395 + CPUState *cs = env_cpu(env); 396 396 TaskState *ts = cs->opaque; 397 397 struct target_vm86plus_struct * target_v86; 398 398 int ret;
+1 -1
linux-user/xtensa/cpu_loop.c
··· 123 123 124 124 void cpu_loop(CPUXtensaState *env) 125 125 { 126 - CPUState *cs = CPU(xtensa_env_get_cpu(env)); 126 + CPUState *cs = env_cpu(env); 127 127 target_siginfo_t info; 128 128 abi_ulong ret; 129 129 int trapnr;
+2 -2
qom/cpu.c
··· 115 115 atomic_set(&cpu->exit_request, 1); 116 116 /* Ensure cpu_exec will see the exit request after TCG has exited. */ 117 117 smp_wmb(); 118 - atomic_set(&cpu->icount_decr.u16.high, -1); 118 + atomic_set(&cpu->icount_decr_ptr->u16.high, -1); 119 119 } 120 120 121 121 int cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, ··· 264 264 cpu->mem_io_pc = 0; 265 265 cpu->mem_io_vaddr = 0; 266 266 cpu->icount_extra = 0; 267 - atomic_set(&cpu->icount_decr.u32, 0); 267 + atomic_set(&cpu->icount_decr_ptr->u32, 0); 268 268 cpu->can_do_io = 1; 269 269 cpu->exception_index = -1; 270 270 cpu->crash_occurred = false;
+1 -1
scripts/tracetool/format/tcg_helper_c.py
··· 25 25 if mode == "code": 26 26 return Arguments([ 27 27 # Does cast from helper requirements to tracing types 28 - ("CPUState *", "ENV_GET_CPU(%s)" % args.names()[0]), 28 + ("CPUState *", "env_cpu(%s)" % args.names()[0]), 29 29 ]) 30 30 else: 31 31 args = Arguments([
+31
target/alpha/cpu-param.h
··· 1 + /* 2 + * Alpha cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2007 Jocelyn Mayer 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef ALPHA_CPU_PARAM_H 9 + #define ALPHA_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 64 12 + #define TARGET_PAGE_BITS 13 13 + #ifdef CONFIG_USER_ONLY 14 + /* 15 + * ??? The kernel likes to give addresses in high memory. If the host has 16 + * more virtual address space than the guest, this can lead to impossible 17 + * allocations. Honor the long-standing assumption that only kernel addrs 18 + * are negative, but otherwise allow allocations anywhere. This could lead 19 + * to tricky emulation problems for programs doing tagged addressing, but 20 + * that's far fewer than encounter the impossible allocation problem. 21 + */ 22 + #define TARGET_PHYS_ADDR_SPACE_BITS 63 23 + #define TARGET_VIRT_ADDR_SPACE_BITS 63 24 + #else 25 + /* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */ 26 + #define TARGET_PHYS_ADDR_SPACE_BITS 44 27 + #define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS) 28 + #endif 29 + #define NB_MMU_MODES 3 30 + 31 + #endif
+1 -2
target/alpha/cpu.c
··· 191 191 192 192 static void alpha_cpu_initfn(Object *obj) 193 193 { 194 - CPUState *cs = CPU(obj); 195 194 AlphaCPU *cpu = ALPHA_CPU(obj); 196 195 CPUAlphaState *env = &cpu->env; 197 196 198 - cs->env_ptr = env; 197 + cpu_set_cpustate_pointers(cpu); 199 198 200 199 env->lock_addr = -1; 201 200 #if defined(CONFIG_USER_ONLY)
+5 -35
target/alpha/cpu.h
··· 22 22 23 23 #include "qemu-common.h" 24 24 #include "cpu-qom.h" 25 + #include "exec/cpu-defs.h" 25 26 26 - #define TARGET_LONG_BITS 64 27 27 #define ALIGNED_ONLY 28 28 29 - #define CPUArchState struct CPUAlphaState 30 - 31 29 /* Alpha processors have a weak memory model */ 32 30 #define TCG_GUEST_DEFAULT_MO (0) 33 - 34 - #include "exec/cpu-defs.h" 35 31 36 32 #define ICACHE_LINE_SIZE 32 37 33 #define DCACHE_LINE_SIZE 32 38 - 39 - #define TARGET_PAGE_BITS 13 40 - 41 - #ifdef CONFIG_USER_ONLY 42 - /* ??? The kernel likes to give addresses in high memory. If the host has 43 - more virtual address space than the guest, this can lead to impossible 44 - allocations. Honor the long-standing assumption that only kernel addrs 45 - are negative, but otherwise allow allocations anywhere. This could lead 46 - to tricky emulation problems for programs doing tagged addressing, but 47 - that's far fewer than encounter the impossible allocation problem. */ 48 - #define TARGET_PHYS_ADDR_SPACE_BITS 63 49 - #define TARGET_VIRT_ADDR_SPACE_BITS 63 50 - #else 51 - /* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */ 52 - #define TARGET_PHYS_ADDR_SPACE_BITS 44 53 - #define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS) 54 - #endif 55 34 56 35 /* Alpha major type */ 57 36 enum { ··· 217 196 PALcode cheats and usees the KSEG mapping for its code+data rather than 218 197 physical addresses. */ 219 198 220 - #define NB_MMU_MODES 3 221 - 222 199 #define MMU_MODE0_SUFFIX _kernel 223 200 #define MMU_MODE1_SUFFIX _user 224 201 #define MMU_KERNEL_IDX 0 ··· 274 251 /* This alarm doesn't exist in real hardware; we wish it did. */ 275 252 uint64_t alarm_expire; 276 253 277 - /* Those resources are used only in QEMU core */ 278 - CPU_COMMON 279 - 280 254 int error_code; 281 255 282 256 uint32_t features; ··· 295 269 CPUState parent_obj; 296 270 /*< public >*/ 297 271 272 + CPUNegativeOffsetState neg; 298 273 CPUAlphaState env; 299 274 300 275 /* This alarm doesn't exist in real hardware; we wish it did. */ 301 276 QEMUTimer *alarm_timer; 302 277 }; 303 278 304 - static inline AlphaCPU *alpha_env_get_cpu(CPUAlphaState *env) 305 - { 306 - return container_of(env, AlphaCPU, env); 307 - } 308 - 309 - #define ENV_GET_CPU(e) CPU(alpha_env_get_cpu(e)) 310 - 311 - #define ENV_OFFSET offsetof(AlphaCPU, env) 312 279 313 280 #ifndef CONFIG_USER_ONLY 314 281 extern const struct VMStateDescription vmstate_alpha_cpu; ··· 326 293 327 294 #define cpu_list alpha_cpu_list 328 295 #define cpu_signal_handler cpu_alpha_signal_handler 296 + 297 + typedef CPUAlphaState CPUArchState; 298 + typedef AlphaCPU ArchCPU; 329 299 330 300 #include "exec/cpu-all.h" 331 301
+3 -5
target/alpha/helper.c
··· 136 136 int prot_need, int mmu_idx, 137 137 target_ulong *pphys, int *pprot) 138 138 { 139 - CPUState *cs = CPU(alpha_env_get_cpu(env)); 139 + CPUState *cs = env_cpu(env); 140 140 target_long saddr = addr; 141 141 target_ulong phys = 0; 142 142 target_ulong L1pte, L2pte, L3pte; ··· 486 486 We expect that ENV->PC has already been updated. */ 487 487 void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error) 488 488 { 489 - AlphaCPU *cpu = alpha_env_get_cpu(env); 490 - CPUState *cs = CPU(cpu); 489 + CPUState *cs = env_cpu(env); 491 490 492 491 cs->exception_index = excp; 493 492 env->error_code = error; ··· 498 497 void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr, 499 498 int excp, int error) 500 499 { 501 - AlphaCPU *cpu = alpha_env_get_cpu(env); 502 - CPUState *cs = CPU(cpu); 500 + CPUState *cs = env_cpu(env); 503 501 504 502 cs->exception_index = excp; 505 503 env->error_code = error;
+4 -4
target/alpha/sys_helper.c
··· 44 44 #ifndef CONFIG_USER_ONLY 45 45 void helper_tbia(CPUAlphaState *env) 46 46 { 47 - tlb_flush(CPU(alpha_env_get_cpu(env))); 47 + tlb_flush(env_cpu(env)); 48 48 } 49 49 50 50 void helper_tbis(CPUAlphaState *env, uint64_t p) 51 51 { 52 - tlb_flush_page(CPU(alpha_env_get_cpu(env)), p); 52 + tlb_flush_page(env_cpu(env), p); 53 53 } 54 54 55 55 void helper_tb_flush(CPUAlphaState *env) 56 56 { 57 - tb_flush(CPU(alpha_env_get_cpu(env))); 57 + tb_flush(env_cpu(env)); 58 58 } 59 59 60 60 void helper_halt(uint64_t restart) ··· 78 78 79 79 void helper_set_alarm(CPUAlphaState *env, uint64_t expire) 80 80 { 81 - AlphaCPU *cpu = alpha_env_get_cpu(env); 81 + AlphaCPU *cpu = env_archcpu(env); 82 82 83 83 if (expire) { 84 84 env->alarm_expire = expire;
+2 -2
target/arm/arm-semi.c
··· 257 257 */ 258 258 target_ulong do_arm_semihosting(CPUARMState *env) 259 259 { 260 - ARMCPU *cpu = arm_env_get_cpu(env); 261 - CPUState *cs = CPU(cpu); 260 + ARMCPU *cpu = env_archcpu(env); 261 + CPUState *cs = env_cpu(env); 262 262 target_ulong args; 263 263 target_ulong arg0, arg1, arg2, arg3; 264 264 char * s;
+34
target/arm/cpu-param.h
··· 1 + /* 2 + * ARM cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2003 Fabrice Bellard 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef ARM_CPU_PARAM_H 9 + #define ARM_CPU_PARAM_H 1 10 + 11 + #ifdef TARGET_AARCH64 12 + # define TARGET_LONG_BITS 64 13 + # define TARGET_PHYS_ADDR_SPACE_BITS 48 14 + # define TARGET_VIRT_ADDR_SPACE_BITS 48 15 + #else 16 + # define TARGET_LONG_BITS 32 17 + # define TARGET_PHYS_ADDR_SPACE_BITS 40 18 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 19 + #endif 20 + 21 + #ifdef CONFIG_USER_ONLY 22 + #define TARGET_PAGE_BITS 12 23 + #else 24 + /* 25 + * ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6 26 + * have to support 1K tiny pages. 27 + */ 28 + # define TARGET_PAGE_BITS_VARY 29 + # define TARGET_PAGE_BITS_MIN 10 30 + #endif 31 + 32 + #define NB_MMU_MODES 8 33 + 34 + #endif
+1 -2
target/arm/cpu.c
··· 697 697 698 698 static void arm_cpu_initfn(Object *obj) 699 699 { 700 - CPUState *cs = CPU(obj); 701 700 ARMCPU *cpu = ARM_CPU(obj); 702 701 703 - cs->env_ptr = &cpu->env; 702 + cpu_set_cpustate_pointers(cpu); 704 703 cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, 705 704 g_free, cpreg_hashtable_data_destroy); 706 705
+8 -44
target/arm/cpu.h
··· 22 22 23 23 #include "kvm-consts.h" 24 24 #include "hw/registerfields.h" 25 - 26 - #if defined(TARGET_AARCH64) 27 - /* AArch64 definitions */ 28 - # define TARGET_LONG_BITS 64 29 - #else 30 - # define TARGET_LONG_BITS 32 31 - #endif 32 - 33 - /* ARM processors have a weak memory model */ 34 - #define TCG_GUEST_DEFAULT_MO (0) 35 - 36 - #define CPUArchState struct CPUARMState 37 - 38 25 #include "qemu-common.h" 39 26 #include "cpu-qom.h" 40 27 #include "exec/cpu-defs.h" 28 + 29 + /* ARM processors have a weak memory model */ 30 + #define TCG_GUEST_DEFAULT_MO (0) 41 31 42 32 #define EXCP_UDEF 1 /* undefined instruction */ 43 33 #define EXCP_SWI 2 /* software interrupt */ ··· 114 104 #define ARM_CPU_VIRQ 2 115 105 #define ARM_CPU_VFIQ 3 116 106 117 - #define NB_MMU_MODES 8 118 107 /* ARM-specific extra insn start words: 119 108 * 1: Conditional execution bits 120 109 * 2: Partial exception syndrome for data aborts ··· 656 645 /* Fields up to this point are cleared by a CPU reset */ 657 646 struct {} end_reset_fields; 658 647 659 - CPU_COMMON 660 - 661 - /* Fields after CPU_COMMON are preserved across CPU reset. */ 648 + /* Fields after this point are preserved across CPU reset. */ 662 649 663 650 /* Internal CPU feature flags. */ 664 651 uint64_t features; ··· 732 719 CPUState parent_obj; 733 720 /*< public >*/ 734 721 722 + CPUNegativeOffsetState neg; 735 723 CPUARMState env; 736 724 737 725 /* Coprocessor information */ ··· 924 912 uint32_t sve_max_vq; 925 913 }; 926 914 927 - static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) 928 - { 929 - return container_of(env, ARMCPU, env); 930 - } 931 - 932 915 void arm_cpu_post_init(Object *obj); 933 916 934 917 uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz); 935 - 936 - #define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e)) 937 - 938 - #define ENV_OFFSET offsetof(ARMCPU, env) 939 918 940 919 #ifndef CONFIG_USER_ONLY 941 920 extern const struct VMStateDescription vmstate_arm_cpu; ··· 2639 2618 #define ARM_CPUID_TI915T 0x54029152 2640 2619 #define ARM_CPUID_TI925T 0x54029252 2641 2620 2642 - #if defined(CONFIG_USER_ONLY) 2643 - #define TARGET_PAGE_BITS 12 2644 - #else 2645 - /* ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6 2646 - * have to support 1K tiny pages. 2647 - */ 2648 - #define TARGET_PAGE_BITS_VARY 2649 - #define TARGET_PAGE_BITS_MIN 10 2650 - #endif 2651 - 2652 - #if defined(TARGET_AARCH64) 2653 - # define TARGET_PHYS_ADDR_SPACE_BITS 48 2654 - # define TARGET_VIRT_ADDR_SPACE_BITS 48 2655 - #else 2656 - # define TARGET_PHYS_ADDR_SPACE_BITS 40 2657 - # define TARGET_VIRT_ADDR_SPACE_BITS 32 2658 - #endif 2659 - 2660 2621 static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, 2661 2622 unsigned int target_el) 2662 2623 { ··· 3153 3114 return (sctlr & (cur_el ? SCTLR_EE : SCTLR_E0E)) != 0; 3154 3115 } 3155 3116 } 3117 + 3118 + typedef CPUARMState CPUArchState; 3119 + typedef ARMCPU ArchCPU; 3156 3120 3157 3121 #include "exec/cpu-all.h" 3158 3122
+1 -1
target/arm/cpu64.c
··· 43 43 #ifndef CONFIG_USER_ONLY 44 44 static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) 45 45 { 46 - ARMCPU *cpu = arm_env_get_cpu(env); 46 + ARMCPU *cpu = env_archcpu(env); 47 47 48 48 /* Number of cores is in [25:24]; otherwise we RAZ */ 49 49 return (cpu->core_count - 1) << 24;
+2 -2
target/arm/helper-a64.c
··· 1005 1005 } 1006 1006 1007 1007 qemu_mutex_lock_iothread(); 1008 - arm_call_pre_el_change_hook(arm_env_get_cpu(env)); 1008 + arm_call_pre_el_change_hook(env_archcpu(env)); 1009 1009 qemu_mutex_unlock_iothread(); 1010 1010 1011 1011 if (!return_to_aa64) { ··· 1047 1047 aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); 1048 1048 1049 1049 qemu_mutex_lock_iothread(); 1050 - arm_call_el_change_hook(arm_env_get_cpu(env)); 1050 + arm_call_el_change_hook(env_archcpu(env)); 1051 1051 qemu_mutex_unlock_iothread(); 1052 1052 1053 1053 return;
+81 -81
target/arm/helper.c
··· 227 227 228 228 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 229 229 { 230 - ARMCPU *cpu = arm_env_get_cpu(env); 230 + ARMCPU *cpu = env_archcpu(env); 231 231 const ARMCPRegInfo *ri; 232 232 uint32_t key; 233 233 ··· 548 548 549 549 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 550 550 { 551 - ARMCPU *cpu = arm_env_get_cpu(env); 551 + ARMCPU *cpu = env_archcpu(env); 552 552 553 553 raw_write(env, ri, value); 554 554 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ ··· 556 556 557 557 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 558 558 { 559 - ARMCPU *cpu = arm_env_get_cpu(env); 559 + ARMCPU *cpu = env_archcpu(env); 560 560 561 561 if (raw_read(env, ri) != value) { 562 562 /* Unlike real hardware the qemu TLB uses virtual addresses, ··· 570 570 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 571 571 uint64_t value) 572 572 { 573 - ARMCPU *cpu = arm_env_get_cpu(env); 573 + ARMCPU *cpu = env_archcpu(env); 574 574 575 575 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 576 576 && !extended_addresses_enabled(env)) { ··· 587 587 static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 588 588 uint64_t value) 589 589 { 590 - CPUState *cs = ENV_GET_CPU(env); 590 + CPUState *cs = env_cpu(env); 591 591 592 592 tlb_flush_all_cpus_synced(cs); 593 593 } ··· 595 595 static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 596 596 uint64_t value) 597 597 { 598 - CPUState *cs = ENV_GET_CPU(env); 598 + CPUState *cs = env_cpu(env); 599 599 600 600 tlb_flush_all_cpus_synced(cs); 601 601 } ··· 603 603 static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 604 604 uint64_t value) 605 605 { 606 - CPUState *cs = ENV_GET_CPU(env); 606 + CPUState *cs = env_cpu(env); 607 607 608 608 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 609 609 } ··· 611 611 static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 612 612 uint64_t value) 613 613 { 614 - CPUState *cs = ENV_GET_CPU(env); 614 + CPUState *cs = env_cpu(env); 615 615 616 616 tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); 617 617 } ··· 631 631 uint64_t value) 632 632 { 633 633 /* Invalidate all (TLBIALL) */ 634 - ARMCPU *cpu = arm_env_get_cpu(env); 634 + ARMCPU *cpu = env_archcpu(env); 635 635 636 636 if (tlb_force_broadcast(env)) { 637 637 tlbiall_is_write(env, NULL, value); ··· 645 645 uint64_t value) 646 646 { 647 647 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 648 - ARMCPU *cpu = arm_env_get_cpu(env); 648 + ARMCPU *cpu = env_archcpu(env); 649 649 650 650 if (tlb_force_broadcast(env)) { 651 651 tlbimva_is_write(env, NULL, value); ··· 659 659 uint64_t value) 660 660 { 661 661 /* Invalidate by ASID (TLBIASID) */ 662 - ARMCPU *cpu = arm_env_get_cpu(env); 662 + ARMCPU *cpu = env_archcpu(env); 663 663 664 664 if (tlb_force_broadcast(env)) { 665 665 tlbiasid_is_write(env, NULL, value); ··· 673 673 uint64_t value) 674 674 { 675 675 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 676 - ARMCPU *cpu = arm_env_get_cpu(env); 676 + ARMCPU *cpu = env_archcpu(env); 677 677 678 678 if (tlb_force_broadcast(env)) { 679 679 tlbimvaa_is_write(env, NULL, value); ··· 686 686 static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, 687 687 uint64_t value) 688 688 { 689 - CPUState *cs = ENV_GET_CPU(env); 689 + CPUState *cs = env_cpu(env); 690 690 691 691 tlb_flush_by_mmuidx(cs, 692 692 ARMMMUIdxBit_S12NSE1 | ··· 697 697 static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 698 698 uint64_t value) 699 699 { 700 - CPUState *cs = ENV_GET_CPU(env); 700 + CPUState *cs = env_cpu(env); 701 701 702 702 tlb_flush_by_mmuidx_all_cpus_synced(cs, 703 703 ARMMMUIdxBit_S12NSE1 | ··· 714 714 * translation information. 715 715 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 716 716 */ 717 - CPUState *cs = ENV_GET_CPU(env); 717 + CPUState *cs = env_cpu(env); 718 718 uint64_t pageaddr; 719 719 720 720 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { ··· 729 729 static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 730 730 uint64_t value) 731 731 { 732 - CPUState *cs = ENV_GET_CPU(env); 732 + CPUState *cs = env_cpu(env); 733 733 uint64_t pageaddr; 734 734 735 735 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { ··· 745 745 static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 746 746 uint64_t value) 747 747 { 748 - CPUState *cs = ENV_GET_CPU(env); 748 + CPUState *cs = env_cpu(env); 749 749 750 750 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); 751 751 } ··· 753 753 static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 754 754 uint64_t value) 755 755 { 756 - CPUState *cs = ENV_GET_CPU(env); 756 + CPUState *cs = env_cpu(env); 757 757 758 758 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 759 759 } ··· 761 761 static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, 762 762 uint64_t value) 763 763 { 764 - CPUState *cs = ENV_GET_CPU(env); 764 + CPUState *cs = env_cpu(env); 765 765 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 766 766 767 767 tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_S1E2); ··· 770 770 static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, 771 771 uint64_t value) 772 772 { 773 - CPUState *cs = ENV_GET_CPU(env); 773 + CPUState *cs = env_cpu(env); 774 774 uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); 775 775 776 776 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ··· 1353 1353 1354 1354 static void pmu_update_irq(CPUARMState *env) 1355 1355 { 1356 - ARMCPU *cpu = arm_env_get_cpu(env); 1356 + ARMCPU *cpu = env_archcpu(env); 1357 1357 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1358 1358 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1359 1359 } ··· 1408 1408 if (overflow_in > 0) { 1409 1409 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1410 1410 overflow_in; 1411 - ARMCPU *cpu = arm_env_get_cpu(env); 1411 + ARMCPU *cpu = env_archcpu(env); 1412 1412 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1413 1413 } 1414 1414 #endif ··· 1457 1457 if (overflow_in > 0) { 1458 1458 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1459 1459 overflow_in; 1460 - ARMCPU *cpu = arm_env_get_cpu(env); 1460 + ARMCPU *cpu = env_archcpu(env); 1461 1461 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1462 1462 } 1463 1463 #endif ··· 1865 1865 { 1866 1866 /* Begin with base v8.0 state. */ 1867 1867 uint32_t valid_mask = 0x3fff; 1868 - ARMCPU *cpu = arm_env_get_cpu(env); 1868 + ARMCPU *cpu = env_archcpu(env); 1869 1869 1870 1870 if (arm_el_is_aa64(env, 3)) { 1871 1871 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ ··· 1902 1902 1903 1903 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1904 1904 { 1905 - ARMCPU *cpu = arm_env_get_cpu(env); 1905 + ARMCPU *cpu = env_archcpu(env); 1906 1906 1907 1907 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1908 1908 * bank ··· 1921 1921 1922 1922 static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1923 1923 { 1924 - CPUState *cs = ENV_GET_CPU(env); 1924 + CPUState *cs = env_cpu(env); 1925 1925 uint64_t hcr_el2 = arm_hcr_el2_eff(env); 1926 1926 uint64_t ret = 0; 1927 1927 ··· 2452 2452 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2453 2453 int timeridx) 2454 2454 { 2455 - ARMCPU *cpu = arm_env_get_cpu(env); 2455 + ARMCPU *cpu = env_archcpu(env); 2456 2456 2457 2457 timer_del(cpu->gt_timer[timeridx]); 2458 2458 } ··· 2473 2473 { 2474 2474 trace_arm_gt_cval_write(timeridx, value); 2475 2475 env->cp15.c14_timer[timeridx].cval = value; 2476 - gt_recalc_timer(arm_env_get_cpu(env), timeridx); 2476 + gt_recalc_timer(env_archcpu(env), timeridx); 2477 2477 } 2478 2478 2479 2479 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, ··· 2494 2494 trace_arm_gt_tval_write(timeridx, value); 2495 2495 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2496 2496 sextract64(value, 0, 32); 2497 - gt_recalc_timer(arm_env_get_cpu(env), timeridx); 2497 + gt_recalc_timer(env_archcpu(env), timeridx); 2498 2498 } 2499 2499 2500 2500 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2501 2501 int timeridx, 2502 2502 uint64_t value) 2503 2503 { 2504 - ARMCPU *cpu = arm_env_get_cpu(env); 2504 + ARMCPU *cpu = env_archcpu(env); 2505 2505 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2506 2506 2507 2507 trace_arm_gt_ctl_write(timeridx, value); ··· 2579 2579 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2580 2580 uint64_t value) 2581 2581 { 2582 - ARMCPU *cpu = arm_env_get_cpu(env); 2582 + ARMCPU *cpu = env_archcpu(env); 2583 2583 2584 2584 trace_arm_gt_cntvoff_write(value); 2585 2585 raw_write(env, ri, value); ··· 3212 3212 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3213 3213 uint64_t value) 3214 3214 { 3215 - ARMCPU *cpu = arm_env_get_cpu(env); 3215 + ARMCPU *cpu = env_archcpu(env); 3216 3216 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3217 3217 3218 3218 if (!u32p) { ··· 3227 3227 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3228 3228 uint64_t value) 3229 3229 { 3230 - ARMCPU *cpu = arm_env_get_cpu(env); 3230 + ARMCPU *cpu = env_archcpu(env); 3231 3231 uint32_t nrgs = cpu->pmsav7_dregion; 3232 3232 3233 3233 if (value >= nrgs) { ··· 3355 3355 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3356 3356 uint64_t value) 3357 3357 { 3358 - ARMCPU *cpu = arm_env_get_cpu(env); 3358 + ARMCPU *cpu = env_archcpu(env); 3359 3359 TCR *tcr = raw_ptr(env, ri); 3360 3360 3361 3361 if (arm_feature(env, ARM_FEATURE_LPAE)) { ··· 3384 3384 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3385 3385 uint64_t value) 3386 3386 { 3387 - ARMCPU *cpu = arm_env_get_cpu(env); 3387 + ARMCPU *cpu = env_archcpu(env); 3388 3388 TCR *tcr = raw_ptr(env, ri); 3389 3389 3390 3390 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ ··· 3398 3398 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 3399 3399 if (cpreg_field_is_64bit(ri) && 3400 3400 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 3401 - ARMCPU *cpu = arm_env_get_cpu(env); 3401 + ARMCPU *cpu = env_archcpu(env); 3402 3402 tlb_flush(CPU(cpu)); 3403 3403 } 3404 3404 raw_write(env, ri, value); ··· 3407 3407 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3408 3408 uint64_t value) 3409 3409 { 3410 - ARMCPU *cpu = arm_env_get_cpu(env); 3410 + ARMCPU *cpu = env_archcpu(env); 3411 3411 CPUState *cs = CPU(cpu); 3412 3412 3413 3413 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ ··· 3497 3497 uint64_t value) 3498 3498 { 3499 3499 /* Wait-for-interrupt (deprecated) */ 3500 - cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); 3500 + cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 3501 3501 } 3502 3502 3503 3503 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, ··· 3650 3650 3651 3651 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3652 3652 { 3653 - ARMCPU *cpu = arm_env_get_cpu(env); 3653 + ARMCPU *cpu = env_archcpu(env); 3654 3654 unsigned int cur_el = arm_current_el(env); 3655 3655 bool secure = arm_is_secure(env); 3656 3656 ··· 3662 3662 3663 3663 static uint64_t mpidr_read_val(CPUARMState *env) 3664 3664 { 3665 - ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env)); 3665 + ARMCPU *cpu = env_archcpu(env); 3666 3666 uint64_t mpidr = cpu->mp_affinity; 3667 3667 3668 3668 if (arm_feature(env, ARM_FEATURE_V7MP)) { ··· 3773 3773 static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3774 3774 uint64_t value) 3775 3775 { 3776 - CPUState *cs = ENV_GET_CPU(env); 3776 + CPUState *cs = env_cpu(env); 3777 3777 bool sec = arm_is_secure_below_el3(env); 3778 3778 3779 3779 if (sec) { ··· 3790 3790 static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3791 3791 uint64_t value) 3792 3792 { 3793 - CPUState *cs = ENV_GET_CPU(env); 3793 + CPUState *cs = env_cpu(env); 3794 3794 3795 3795 if (tlb_force_broadcast(env)) { 3796 3796 tlbi_aa64_vmalle1is_write(env, NULL, value); ··· 3815 3815 * stage 2 translations, whereas most other scopes only invalidate 3816 3816 * stage 1 translations. 3817 3817 */ 3818 - ARMCPU *cpu = arm_env_get_cpu(env); 3818 + ARMCPU *cpu = env_archcpu(env); 3819 3819 CPUState *cs = CPU(cpu); 3820 3820 3821 3821 if (arm_is_secure_below_el3(env)) { ··· 3839 3839 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3840 3840 uint64_t value) 3841 3841 { 3842 - ARMCPU *cpu = arm_env_get_cpu(env); 3842 + ARMCPU *cpu = env_archcpu(env); 3843 3843 CPUState *cs = CPU(cpu); 3844 3844 3845 3845 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); ··· 3848 3848 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3849 3849 uint64_t value) 3850 3850 { 3851 - ARMCPU *cpu = arm_env_get_cpu(env); 3851 + ARMCPU *cpu = env_archcpu(env); 3852 3852 CPUState *cs = CPU(cpu); 3853 3853 3854 3854 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3); ··· 3861 3861 * stage 2 translations, whereas most other scopes only invalidate 3862 3862 * stage 1 translations. 3863 3863 */ 3864 - CPUState *cs = ENV_GET_CPU(env); 3864 + CPUState *cs = env_cpu(env); 3865 3865 bool sec = arm_is_secure_below_el3(env); 3866 3866 bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); 3867 3867 ··· 3884 3884 static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3885 3885 uint64_t value) 3886 3886 { 3887 - CPUState *cs = ENV_GET_CPU(env); 3887 + CPUState *cs = env_cpu(env); 3888 3888 3889 3889 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E2); 3890 3890 } ··· 3892 3892 static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3893 3893 uint64_t value) 3894 3894 { 3895 - CPUState *cs = ENV_GET_CPU(env); 3895 + CPUState *cs = env_cpu(env); 3896 3896 3897 3897 tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_S1E3); 3898 3898 } ··· 3904 3904 * Currently handles both VAE2 and VALE2, since we don't support 3905 3905 * flush-last-level-only. 3906 3906 */ 3907 - ARMCPU *cpu = arm_env_get_cpu(env); 3907 + ARMCPU *cpu = env_archcpu(env); 3908 3908 CPUState *cs = CPU(cpu); 3909 3909 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3910 3910 ··· 3918 3918 * Currently handles both VAE3 and VALE3, since we don't support 3919 3919 * flush-last-level-only. 3920 3920 */ 3921 - ARMCPU *cpu = arm_env_get_cpu(env); 3921 + ARMCPU *cpu = env_archcpu(env); 3922 3922 CPUState *cs = CPU(cpu); 3923 3923 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3924 3924 ··· 3928 3928 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3929 3929 uint64_t value) 3930 3930 { 3931 - ARMCPU *cpu = arm_env_get_cpu(env); 3931 + ARMCPU *cpu = env_archcpu(env); 3932 3932 CPUState *cs = CPU(cpu); 3933 3933 bool sec = arm_is_secure_below_el3(env); 3934 3934 uint64_t pageaddr = sextract64(value << 12, 0, 56); ··· 3952 3952 * since we don't support flush-for-specific-ASID-only or 3953 3953 * flush-last-level-only. 3954 3954 */ 3955 - ARMCPU *cpu = arm_env_get_cpu(env); 3955 + ARMCPU *cpu = env_archcpu(env); 3956 3956 CPUState *cs = CPU(cpu); 3957 3957 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3958 3958 ··· 3975 3975 static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3976 3976 uint64_t value) 3977 3977 { 3978 - CPUState *cs = ENV_GET_CPU(env); 3978 + CPUState *cs = env_cpu(env); 3979 3979 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3980 3980 3981 3981 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ··· 3985 3985 static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3986 3986 uint64_t value) 3987 3987 { 3988 - CPUState *cs = ENV_GET_CPU(env); 3988 + CPUState *cs = env_cpu(env); 3989 3989 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3990 3990 3991 3991 tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, ··· 4001 4001 * translation information. 4002 4002 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 4003 4003 */ 4004 - ARMCPU *cpu = arm_env_get_cpu(env); 4004 + ARMCPU *cpu = env_archcpu(env); 4005 4005 CPUState *cs = CPU(cpu); 4006 4006 uint64_t pageaddr; 4007 4007 ··· 4017 4017 static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 4018 4018 uint64_t value) 4019 4019 { 4020 - CPUState *cs = ENV_GET_CPU(env); 4020 + CPUState *cs = env_cpu(env); 4021 4021 uint64_t pageaddr; 4022 4022 4023 4023 if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { ··· 4044 4044 4045 4045 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4046 4046 { 4047 - ARMCPU *cpu = arm_env_get_cpu(env); 4047 + ARMCPU *cpu = env_archcpu(env); 4048 4048 int dzp_bit = 1 << 4; 4049 4049 4050 4050 /* DZP indicates whether DC ZVA access is allowed */ ··· 4079 4079 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4080 4080 uint64_t value) 4081 4081 { 4082 - ARMCPU *cpu = arm_env_get_cpu(env); 4082 + ARMCPU *cpu = env_archcpu(env); 4083 4083 4084 4084 if (raw_read(env, ri) == value) { 4085 4085 /* Skip the TLB flush if nothing actually changed; Linux likes ··· 4571 4571 4572 4572 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 4573 4573 { 4574 - ARMCPU *cpu = arm_env_get_cpu(env); 4574 + ARMCPU *cpu = env_archcpu(env); 4575 4575 uint64_t valid_mask = HCR_MASK; 4576 4576 4577 4577 if (arm_feature(env, ARM_FEATURE_EL3)) { ··· 5238 5238 */ 5239 5239 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 5240 5240 { 5241 - ARMCPU *cpu = arm_env_get_cpu(env); 5241 + ARMCPU *cpu = env_archcpu(env); 5242 5242 uint32_t zcr_len = cpu->sve_max_vq - 1; 5243 5243 5244 5244 if (el <= 1) { ··· 5406 5406 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5407 5407 uint64_t value) 5408 5408 { 5409 - ARMCPU *cpu = arm_env_get_cpu(env); 5409 + ARMCPU *cpu = env_archcpu(env); 5410 5410 int i = ri->crm; 5411 5411 5412 5412 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the ··· 5422 5422 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5423 5423 uint64_t value) 5424 5424 { 5425 - ARMCPU *cpu = arm_env_get_cpu(env); 5425 + ARMCPU *cpu = env_archcpu(env); 5426 5426 int i = ri->crm; 5427 5427 5428 5428 raw_write(env, ri, value); ··· 5524 5524 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5525 5525 uint64_t value) 5526 5526 { 5527 - ARMCPU *cpu = arm_env_get_cpu(env); 5527 + ARMCPU *cpu = env_archcpu(env); 5528 5528 int i = ri->crm; 5529 5529 5530 5530 raw_write(env, ri, value); ··· 5534 5534 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5535 5535 uint64_t value) 5536 5536 { 5537 - ARMCPU *cpu = arm_env_get_cpu(env); 5537 + ARMCPU *cpu = env_archcpu(env); 5538 5538 int i = ri->crm; 5539 5539 5540 5540 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only ··· 5630 5630 */ 5631 5631 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 5632 5632 { 5633 - ARMCPU *cpu = arm_env_get_cpu(env); 5633 + ARMCPU *cpu = env_archcpu(env); 5634 5634 uint64_t pfr1 = cpu->id_pfr1; 5635 5635 5636 5636 if (env->gicv3state) { ··· 5641 5641 5642 5642 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 5643 5643 { 5644 - ARMCPU *cpu = arm_env_get_cpu(env); 5644 + ARMCPU *cpu = env_archcpu(env); 5645 5645 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 5646 5646 5647 5647 if (env->gicv3state) { ··· 7421 7421 /* These should probably raise undefined insn exceptions. */ 7422 7422 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) 7423 7423 { 7424 - ARMCPU *cpu = arm_env_get_cpu(env); 7424 + ARMCPU *cpu = env_archcpu(env); 7425 7425 7426 7426 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); 7427 7427 } 7428 7428 7429 7429 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 7430 7430 { 7431 - ARMCPU *cpu = arm_env_get_cpu(env); 7431 + ARMCPU *cpu = env_archcpu(env); 7432 7432 7433 7433 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); 7434 7434 return 0; ··· 7488 7488 7489 7489 static void switch_mode(CPUARMState *env, int mode) 7490 7490 { 7491 - ARMCPU *cpu = arm_env_get_cpu(env); 7491 + ARMCPU *cpu = env_archcpu(env); 7492 7492 7493 7493 if (mode != ARM_CPU_MODE_USR) { 7494 7494 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); ··· 7831 7831 * PreserveFPState() pseudocode. 7832 7832 * We may throw an exception if the stacking fails. 7833 7833 */ 7834 - ARMCPU *cpu = arm_env_get_cpu(env); 7834 + ARMCPU *cpu = env_archcpu(env); 7835 7835 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 7836 7836 bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK); 7837 7837 bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK); ··· 10938 10938 target_ulong *page_size, 10939 10939 ARMMMUFaultInfo *fi) 10940 10940 { 10941 - CPUState *cs = CPU(arm_env_get_cpu(env)); 10941 + CPUState *cs = env_cpu(env); 10942 10942 int level = 1; 10943 10943 uint32_t table; 10944 10944 uint32_t desc; ··· 11059 11059 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 11060 11060 target_ulong *page_size, ARMMMUFaultInfo *fi) 11061 11061 { 11062 - CPUState *cs = CPU(arm_env_get_cpu(env)); 11062 + CPUState *cs = env_cpu(env); 11063 11063 int level = 1; 11064 11064 uint32_t table; 11065 11065 uint32_t desc; ··· 11444 11444 target_ulong *page_size_ptr, 11445 11445 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 11446 11446 { 11447 - ARMCPU *cpu = arm_env_get_cpu(env); 11447 + ARMCPU *cpu = env_archcpu(env); 11448 11448 CPUState *cs = CPU(cpu); 11449 11449 /* Read an LPAE long-descriptor translation table. */ 11450 11450 ARMFaultType fault_type = ARMFault_Translation; ··· 11802 11802 target_ulong *page_size, 11803 11803 ARMMMUFaultInfo *fi) 11804 11804 { 11805 - ARMCPU *cpu = arm_env_get_cpu(env); 11805 + ARMCPU *cpu = env_archcpu(env); 11806 11806 int n; 11807 11807 bool is_user = regime_is_user(env, mmu_idx); 11808 11808 ··· 12006 12006 * pseudocode SecurityCheck() function. 12007 12007 * We assume the caller has zero-initialized *sattrs. 12008 12008 */ 12009 - ARMCPU *cpu = arm_env_get_cpu(env); 12009 + ARMCPU *cpu = env_archcpu(env); 12010 12010 int r; 12011 12011 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 12012 12012 int idau_region = IREGION_NOTVALID; ··· 12119 12119 * We set is_subpage to true if the region hit doesn't cover the 12120 12120 * entire TARGET_PAGE the address is within. 12121 12121 */ 12122 - ARMCPU *cpu = arm_env_get_cpu(env); 12122 + ARMCPU *cpu = env_archcpu(env); 12123 12123 bool is_user = regime_is_user(env, mmu_idx); 12124 12124 uint32_t secure = regime_is_secure(env, mmu_idx); 12125 12125 int n; ··· 12899 12899 limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false]; 12900 12900 12901 12901 if (val < limit) { 12902 - CPUState *cs = CPU(arm_env_get_cpu(env)); 12902 + CPUState *cs = env_cpu(env); 12903 12903 12904 12904 cpu_restore_state(cs, GETPC(), true); 12905 12905 raise_exception(env, EXCP_STKOF, 0, 1); ··· 13180 13180 * alignment faults or any memory attribute handling). 13181 13181 */ 13182 13182 13183 - ARMCPU *cpu = arm_env_get_cpu(env); 13183 + ARMCPU *cpu = env_archcpu(env); 13184 13184 uint64_t blocklen = 4 << cpu->dcz_blocksize; 13185 13185 uint64_t vaddr = vaddr_in & ~(blocklen - 1); 13186 13186 ··· 13680 13680 uint32_t flags = 0; 13681 13681 13682 13682 if (is_a64(env)) { 13683 - ARMCPU *cpu = arm_env_get_cpu(env); 13683 + ARMCPU *cpu = env_archcpu(env); 13684 13684 uint64_t sctlr; 13685 13685 13686 13686 *pc = env->pc; ··· 13853 13853 uint64_t pmask; 13854 13854 13855 13855 assert(vq >= 1 && vq <= ARM_MAX_VQ); 13856 - assert(vq <= arm_env_get_cpu(env)->sve_max_vq); 13856 + assert(vq <= env_archcpu(env)->sve_max_vq); 13857 13857 13858 13858 /* Zap the high bits of the zregs. */ 13859 13859 for (i = 0; i < 32; i++) { ··· 13879 13879 void aarch64_sve_change_el(CPUARMState *env, int old_el, 13880 13880 int new_el, bool el0_a64) 13881 13881 { 13882 - ARMCPU *cpu = arm_env_get_cpu(env); 13882 + ARMCPU *cpu = env_archcpu(env); 13883 13883 int old_len, new_len; 13884 13884 bool old_a64, new_a64; 13885 13885
+10 -11
target/arm/op_helper.c
··· 31 31 static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp, 32 32 uint32_t syndrome, uint32_t target_el) 33 33 { 34 - CPUState *cs = CPU(arm_env_get_cpu(env)); 34 + CPUState *cs = env_cpu(env); 35 35 36 36 if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) { 37 37 /* ··· 224 224 * raising an exception if the limit is breached. 225 225 */ 226 226 if (newvalue < v7m_sp_limit(env)) { 227 - CPUState *cs = CPU(arm_env_get_cpu(env)); 227 + CPUState *cs = env_cpu(env); 228 228 229 229 /* 230 230 * Stack limit exceptions are a rare case, so rather than syncing ··· 427 427 428 428 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len) 429 429 { 430 - CPUState *cs = CPU(arm_env_get_cpu(env)); 430 + CPUState *cs = env_cpu(env); 431 431 int target_el = check_wfx_trap(env, false); 432 432 433 433 if (cpu_has_work(cs)) { ··· 462 462 463 463 void HELPER(yield)(CPUARMState *env) 464 464 { 465 - ARMCPU *cpu = arm_env_get_cpu(env); 466 - CPUState *cs = CPU(cpu); 465 + CPUState *cs = env_cpu(env); 467 466 468 467 /* This is a non-trappable hint instruction that generally indicates 469 468 * that the guest is currently busy-looping. Yield control back to the ··· 481 480 */ 482 481 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) 483 482 { 484 - CPUState *cs = CPU(arm_env_get_cpu(env)); 483 + CPUState *cs = env_cpu(env); 485 484 486 485 assert(excp_is_internal(excp)); 487 486 cs->exception_index = excp; ··· 524 523 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) 525 524 { 526 525 qemu_mutex_lock_iothread(); 527 - arm_call_pre_el_change_hook(arm_env_get_cpu(env)); 526 + arm_call_pre_el_change_hook(env_archcpu(env)); 528 527 qemu_mutex_unlock_iothread(); 529 528 530 529 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn); ··· 537 536 env->regs[15] &= (env->thumb ? ~1 : ~3); 538 537 539 538 qemu_mutex_lock_iothread(); 540 - arm_call_el_change_hook(arm_env_get_cpu(env)); 539 + arm_call_el_change_hook(env_archcpu(env)); 541 540 qemu_mutex_unlock_iothread(); 542 541 } 543 542 ··· 842 841 843 842 void HELPER(pre_hvc)(CPUARMState *env) 844 843 { 845 - ARMCPU *cpu = arm_env_get_cpu(env); 844 + ARMCPU *cpu = env_archcpu(env); 846 845 int cur_el = arm_current_el(env); 847 846 /* FIXME: Use actual secure state. */ 848 847 bool secure = false; ··· 882 881 883 882 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) 884 883 { 885 - ARMCPU *cpu = arm_env_get_cpu(env); 884 + ARMCPU *cpu = env_archcpu(env); 886 885 int cur_el = arm_current_el(env); 887 886 bool secure = arm_is_secure(env); 888 887 bool smd_flag = env->cp15.scr_el3 & SCR_SMD; ··· 1156 1155 1157 1156 void HELPER(check_breakpoints)(CPUARMState *env) 1158 1157 { 1159 - ARMCPU *cpu = arm_env_get_cpu(env); 1158 + ARMCPU *cpu = env_archcpu(env); 1160 1159 1161 1160 if (check_breakpoints(cpu)) { 1162 1161 HELPER(exception_internal(env, EXCP_DEBUG));
+2 -2
target/arm/translate-a64.c
··· 14134 14134 * table entry even for that case. 14135 14135 */ 14136 14136 return (tlb_hit(entry->addr_code, addr) && 14137 - env->iotlb[mmu_idx][index].attrs.target_tlb_bit0); 14137 + env_tlb(env)->d[mmu_idx].iotlb[index].attrs.target_tlb_bit0); 14138 14138 #endif 14139 14139 } 14140 14140 ··· 14289 14289 { 14290 14290 DisasContext *dc = container_of(dcbase, DisasContext, base); 14291 14291 CPUARMState *env = cpu->env_ptr; 14292 - ARMCPU *arm_cpu = arm_env_get_cpu(env); 14292 + ARMCPU *arm_cpu = env_archcpu(env); 14293 14293 uint32_t tb_flags = dc->base.tb->flags; 14294 14294 int bound, core_mmu_idx; 14295 14295
+1 -1
target/arm/translate.c
··· 13408 13408 { 13409 13409 DisasContext *dc = container_of(dcbase, DisasContext, base); 13410 13410 CPUARMState *env = cs->env_ptr; 13411 - ARMCPU *cpu = arm_env_get_cpu(env); 13411 + ARMCPU *cpu = env_archcpu(env); 13412 13412 uint32_t tb_flags = dc->base.tb->flags; 13413 13413 uint32_t condexec, core_mmu_idx; 13414 13414
+1 -1
target/arm/vfp_helper.c
··· 101 101 uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR]; 102 102 103 103 /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ 104 - if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) { 104 + if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) { 105 105 val &= ~FPCR_FZ16; 106 106 } 107 107
+17
target/cris/cpu-param.h
··· 1 + /* 2 + * CRIS cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2007 AXIS Communications AB 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef CRIS_CPU_PARAM_H 9 + #define CRIS_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + #define TARGET_PAGE_BITS 13 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 14 + #define TARGET_VIRT_ADDR_SPACE_BITS 32 15 + #define NB_MMU_MODES 2 16 + 17 + #endif
+1 -2
target/cris/cpu.c
··· 172 172 173 173 static void cris_cpu_initfn(Object *obj) 174 174 { 175 - CPUState *cs = CPU(obj); 176 175 CRISCPU *cpu = CRIS_CPU(obj); 177 176 CRISCPUClass *ccc = CRIS_CPU_GET_CLASS(obj); 178 177 CPUCRISState *env = &cpu->env; 179 178 180 - cs->env_ptr = env; 179 + cpu_set_cpustate_pointers(cpu); 181 180 182 181 env->pregs[PR_VR] = ccc->vr; 183 182
+4 -21
target/cris/cpu.h
··· 23 23 24 24 #include "qemu-common.h" 25 25 #include "cpu-qom.h" 26 - 27 - #define TARGET_LONG_BITS 32 28 - 29 - #define CPUArchState struct CPUCRISState 30 - 31 26 #include "exec/cpu-defs.h" 32 27 33 28 #define EXCP_NMI 1 ··· 104 99 #define CC_LE 13 105 100 #define CC_A 14 106 101 #define CC_P 15 107 - 108 - #define NB_MMU_MODES 2 109 102 110 103 typedef struct { 111 104 uint32_t hi; ··· 170 163 /* Fields up to this point are cleared by a CPU reset */ 171 164 struct {} end_reset_fields; 172 165 173 - CPU_COMMON 174 - 175 166 /* Members from load_info on are preserved across resets. */ 176 167 void *load_info; 177 168 } CPUCRISState; ··· 187 178 CPUState parent_obj; 188 179 /*< public >*/ 189 180 181 + CPUNegativeOffsetState neg; 190 182 CPUCRISState env; 191 183 }; 192 184 193 - static inline CRISCPU *cris_env_get_cpu(CPUCRISState *env) 194 - { 195 - return container_of(env, CRISCPU, env); 196 - } 197 - 198 - #define ENV_GET_CPU(e) CPU(cris_env_get_cpu(e)) 199 - 200 - #define ENV_OFFSET offsetof(CRISCPU, env) 201 185 202 186 #ifndef CONFIG_USER_ONLY 203 187 extern const struct VMStateDescription vmstate_cris_cpu; ··· 260 244 }; 261 245 262 246 /* CRIS uses 8k pages. */ 263 - #define TARGET_PAGE_BITS 13 264 247 #define MMAP_SHIFT TARGET_PAGE_BITS 265 - 266 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 267 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 268 248 269 249 #define CRIS_CPU_TYPE_SUFFIX "-" TYPE_CRIS_CPU 270 250 #define CRIS_CPU_TYPE_NAME(name) (name CRIS_CPU_TYPE_SUFFIX) ··· 294 274 #define SFR_RW_MM_TLB_SEL env->pregs[PR_SRS]][4 295 275 #define SFR_RW_MM_TLB_LO env->pregs[PR_SRS]][5 296 276 #define SFR_RW_MM_TLB_HI env->pregs[PR_SRS]][6 277 + 278 + typedef CPUCRISState CPUArchState; 279 + typedef CRISCPU ArchCPU; 297 280 298 281 #include "exec/cpu-all.h" 299 282
+237 -243
target/cris/mmu.c
··· 33 33 34 34 void cris_mmu_init(CPUCRISState *env) 35 35 { 36 - env->mmu_rand_lfsr = 0xcccc; 36 + env->mmu_rand_lfsr = 0xcccc; 37 37 } 38 38 39 39 #define SR_POLYNOM 0x8805 40 40 static inline unsigned int compute_polynom(unsigned int sr) 41 41 { 42 - unsigned int i; 43 - unsigned int f; 42 + unsigned int i; 43 + unsigned int f; 44 44 45 - f = 0; 46 - for (i = 0; i < 16; i++) 47 - f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1); 45 + f = 0; 46 + for (i = 0; i < 16; i++) { 47 + f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1); 48 + } 48 49 49 - return f; 50 + return f; 50 51 } 51 52 52 53 static void cris_mmu_update_rand_lfsr(CPUCRISState *env) 53 54 { 54 - unsigned int f; 55 + unsigned int f; 55 56 56 - /* Update lfsr at every fault. */ 57 - f = compute_polynom(env->mmu_rand_lfsr); 58 - env->mmu_rand_lfsr >>= 1; 59 - env->mmu_rand_lfsr |= (f << 15); 60 - env->mmu_rand_lfsr &= 0xffff; 57 + /* Update lfsr at every fault. */ 58 + f = compute_polynom(env->mmu_rand_lfsr); 59 + env->mmu_rand_lfsr >>= 1; 60 + env->mmu_rand_lfsr |= (f << 15); 61 + env->mmu_rand_lfsr &= 0xffff; 61 62 } 62 63 63 64 static inline int cris_mmu_enabled(uint32_t rw_gc_cfg) 64 65 { 65 - return (rw_gc_cfg & 12) != 0; 66 + return (rw_gc_cfg & 12) != 0; 66 67 } 67 68 68 69 static inline int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg) 69 70 { 70 - return (1 << seg) & rw_mm_cfg; 71 + return (1 << seg) & rw_mm_cfg; 71 72 } 72 73 73 74 static uint32_t cris_mmu_translate_seg(CPUCRISState *env, int seg) 74 75 { 75 - uint32_t base; 76 - int i; 76 + uint32_t base; 77 + int i; 77 78 78 - if (seg < 8) 79 - base = env->sregs[SFR_RW_MM_KBASE_LO]; 80 - else 81 - base = env->sregs[SFR_RW_MM_KBASE_HI]; 79 + if (seg < 8) { 80 + base = env->sregs[SFR_RW_MM_KBASE_LO]; 81 + } else { 82 + base = env->sregs[SFR_RW_MM_KBASE_HI]; 83 + } 82 84 83 - i = seg & 7; 84 - base >>= i * 4; 85 - base &= 15; 85 + i = seg & 7; 86 + base >>= i * 4; 87 + base &= 15; 86 88 87 - base <<= 28; 88 - return base; 89 + base <<= 28; 90 + return base; 89 91 } 92 + 90 93 /* Used by the tlb decoder. */ 91 - #define EXTRACT_FIELD(src, start, end) \ 92 - (((src) >> start) & ((1 << (end - start + 1)) - 1)) 94 + #define EXTRACT_FIELD(src, start, end) \ 95 + (((src) >> start) & ((1 << (end - start + 1)) - 1)) 93 96 94 - static inline void set_field(uint32_t *dst, unsigned int val, 97 + static inline void set_field(uint32_t *dst, unsigned int val, 95 98 unsigned int offset, unsigned int width) 96 99 { 97 - uint32_t mask; 100 + uint32_t mask; 98 101 99 - mask = (1 << width) - 1; 100 - mask <<= offset; 101 - val <<= offset; 102 + mask = (1 << width) - 1; 103 + mask <<= offset; 104 + val <<= offset; 102 105 103 - val &= mask; 104 - *dst &= ~(mask); 105 - *dst |= val; 106 + val &= mask; 107 + *dst &= ~(mask); 108 + *dst |= val; 106 109 } 107 110 108 111 #ifdef DEBUG 109 112 static void dump_tlb(CPUCRISState *env, int mmu) 110 113 { 111 - int set; 112 - int idx; 113 - uint32_t hi, lo, tlb_vpn, tlb_pfn; 114 + int set; 115 + int idx; 116 + uint32_t hi, lo, tlb_vpn, tlb_pfn; 114 117 115 - for (set = 0; set < 4; set++) { 116 - for (idx = 0; idx < 16; idx++) { 117 - lo = env->tlbsets[mmu][set][idx].lo; 118 - hi = env->tlbsets[mmu][set][idx].hi; 119 - tlb_vpn = EXTRACT_FIELD(hi, 13, 31); 120 - tlb_pfn = EXTRACT_FIELD(lo, 13, 31); 118 + for (set = 0; set < 4; set++) { 119 + for (idx = 0; idx < 16; idx++) { 120 + lo = env->tlbsets[mmu][set][idx].lo; 121 + hi = env->tlbsets[mmu][set][idx].hi; 122 + tlb_vpn = EXTRACT_FIELD(hi, 13, 31); 123 + tlb_pfn = EXTRACT_FIELD(lo, 13, 31); 121 124 122 - printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n", 123 - set, idx, hi, lo, tlb_vpn, tlb_pfn); 124 - } 125 - } 125 + printf("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n", 126 + set, idx, hi, lo, tlb_vpn, tlb_pfn); 127 + } 128 + } 126 129 } 127 130 #endif 128 131 ··· 131 134 CPUCRISState *env, uint32_t vaddr, 132 135 int rw, int usermode, int debug) 133 136 { 134 - unsigned int vpage; 135 - unsigned int idx; 136 - uint32_t pid, lo, hi; 137 - uint32_t tlb_vpn, tlb_pfn = 0; 138 - int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x; 139 - int cfg_v, cfg_k, cfg_w, cfg_x; 140 - int set, match = 0; 141 - uint32_t r_cause; 142 - uint32_t r_cfg; 143 - int rwcause; 144 - int mmu = 1; /* Data mmu is default. */ 145 - int vect_base; 146 - 147 - r_cause = env->sregs[SFR_R_MM_CAUSE]; 148 - r_cfg = env->sregs[SFR_RW_MM_CFG]; 149 - pid = env->pregs[PR_PID] & 0xff; 150 - 151 - switch (rw) { 152 - case 2: rwcause = CRIS_MMU_ERR_EXEC; mmu = 0; break; 153 - case 1: rwcause = CRIS_MMU_ERR_WRITE; break; 154 - default: 155 - case 0: rwcause = CRIS_MMU_ERR_READ; break; 156 - } 137 + unsigned int vpage; 138 + unsigned int idx; 139 + uint32_t pid, lo, hi; 140 + uint32_t tlb_vpn, tlb_pfn = 0; 141 + int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x; 142 + int cfg_v, cfg_k, cfg_w, cfg_x; 143 + int set, match = 0; 144 + uint32_t r_cause; 145 + uint32_t r_cfg; 146 + int rwcause; 147 + int mmu = 1; /* Data mmu is default. */ 148 + int vect_base; 157 149 158 - /* I exception vectors 4 - 7, D 8 - 11. */ 159 - vect_base = (mmu + 1) * 4; 150 + r_cause = env->sregs[SFR_R_MM_CAUSE]; 151 + r_cfg = env->sregs[SFR_RW_MM_CFG]; 152 + pid = env->pregs[PR_PID] & 0xff; 160 153 161 - vpage = vaddr >> 13; 154 + switch (rw) { 155 + case 2: 156 + rwcause = CRIS_MMU_ERR_EXEC; 157 + mmu = 0; 158 + break; 159 + case 1: 160 + rwcause = CRIS_MMU_ERR_WRITE; 161 + break; 162 + default: 163 + case 0: 164 + rwcause = CRIS_MMU_ERR_READ; 165 + break; 166 + } 162 167 163 - /* We know the index which to check on each set. 164 - Scan both I and D. */ 165 - #if 0 166 - for (set = 0; set < 4; set++) { 167 - for (idx = 0; idx < 16; idx++) { 168 - lo = env->tlbsets[mmu][set][idx].lo; 169 - hi = env->tlbsets[mmu][set][idx].hi; 170 - tlb_vpn = EXTRACT_FIELD(hi, 13, 31); 171 - tlb_pfn = EXTRACT_FIELD(lo, 13, 31); 168 + /* I exception vectors 4 - 7, D 8 - 11. */ 169 + vect_base = (mmu + 1) * 4; 172 170 173 - printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n", 174 - set, idx, hi, lo, tlb_vpn, tlb_pfn); 175 - } 176 - } 177 - #endif 171 + vpage = vaddr >> 13; 178 172 179 - idx = vpage & 15; 180 - for (set = 0; set < 4; set++) 181 - { 182 - lo = env->tlbsets[mmu][set][idx].lo; 183 - hi = env->tlbsets[mmu][set][idx].hi; 173 + /* 174 + * We know the index which to check on each set. 175 + * Scan both I and D. 176 + */ 177 + idx = vpage & 15; 178 + for (set = 0; set < 4; set++) { 179 + lo = env->tlbsets[mmu][set][idx].lo; 180 + hi = env->tlbsets[mmu][set][idx].hi; 184 181 185 - tlb_vpn = hi >> 13; 186 - tlb_pid = EXTRACT_FIELD(hi, 0, 7); 187 - tlb_g = EXTRACT_FIELD(lo, 4, 4); 182 + tlb_vpn = hi >> 13; 183 + tlb_pid = EXTRACT_FIELD(hi, 0, 7); 184 + tlb_g = EXTRACT_FIELD(lo, 4, 4); 188 185 189 - D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n", 190 - mmu, set, idx, tlb_vpn, vpage, lo, hi); 191 - if ((tlb_g || (tlb_pid == pid)) 192 - && tlb_vpn == vpage) { 193 - match = 1; 194 - break; 195 - } 196 - } 186 + D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n", 187 + mmu, set, idx, tlb_vpn, vpage, lo, hi); 188 + if ((tlb_g || (tlb_pid == pid)) && tlb_vpn == vpage) { 189 + match = 1; 190 + break; 191 + } 192 + } 197 193 198 - res->bf_vec = vect_base; 199 - if (match) { 200 - cfg_w = EXTRACT_FIELD(r_cfg, 19, 19); 201 - cfg_k = EXTRACT_FIELD(r_cfg, 18, 18); 202 - cfg_x = EXTRACT_FIELD(r_cfg, 17, 17); 203 - cfg_v = EXTRACT_FIELD(r_cfg, 16, 16); 194 + res->bf_vec = vect_base; 195 + if (match) { 196 + cfg_w = EXTRACT_FIELD(r_cfg, 19, 19); 197 + cfg_k = EXTRACT_FIELD(r_cfg, 18, 18); 198 + cfg_x = EXTRACT_FIELD(r_cfg, 17, 17); 199 + cfg_v = EXTRACT_FIELD(r_cfg, 16, 16); 204 200 205 - tlb_pfn = EXTRACT_FIELD(lo, 13, 31); 206 - tlb_v = EXTRACT_FIELD(lo, 3, 3); 207 - tlb_k = EXTRACT_FIELD(lo, 2, 2); 208 - tlb_w = EXTRACT_FIELD(lo, 1, 1); 209 - tlb_x = EXTRACT_FIELD(lo, 0, 0); 201 + tlb_pfn = EXTRACT_FIELD(lo, 13, 31); 202 + tlb_v = EXTRACT_FIELD(lo, 3, 3); 203 + tlb_k = EXTRACT_FIELD(lo, 2, 2); 204 + tlb_w = EXTRACT_FIELD(lo, 1, 1); 205 + tlb_x = EXTRACT_FIELD(lo, 0, 0); 210 206 211 - /* 212 - set_exception_vector(0x04, i_mmu_refill); 213 - set_exception_vector(0x05, i_mmu_invalid); 214 - set_exception_vector(0x06, i_mmu_access); 215 - set_exception_vector(0x07, i_mmu_execute); 216 - set_exception_vector(0x08, d_mmu_refill); 217 - set_exception_vector(0x09, d_mmu_invalid); 218 - set_exception_vector(0x0a, d_mmu_access); 219 - set_exception_vector(0x0b, d_mmu_write); 220 - */ 221 - if (cfg_k && tlb_k && usermode) { 222 - D(printf ("tlb: kernel protected %x lo=%x pc=%x\n", 223 - vaddr, lo, env->pc)); 224 - match = 0; 225 - res->bf_vec = vect_base + 2; 226 - } else if (rw == 1 && cfg_w && !tlb_w) { 227 - D(printf ("tlb: write protected %x lo=%x pc=%x\n", 228 - vaddr, lo, env->pc)); 229 - match = 0; 230 - /* write accesses never go through the I mmu. */ 231 - res->bf_vec = vect_base + 3; 232 - } else if (rw == 2 && cfg_x && !tlb_x) { 233 - D(printf ("tlb: exec protected %x lo=%x pc=%x\n", 234 - vaddr, lo, env->pc)); 235 - match = 0; 236 - res->bf_vec = vect_base + 3; 237 - } else if (cfg_v && !tlb_v) { 238 - D(printf ("tlb: invalid %x\n", vaddr)); 239 - match = 0; 240 - res->bf_vec = vect_base + 1; 241 - } 207 + /* 208 + * set_exception_vector(0x04, i_mmu_refill); 209 + * set_exception_vector(0x05, i_mmu_invalid); 210 + * set_exception_vector(0x06, i_mmu_access); 211 + * set_exception_vector(0x07, i_mmu_execute); 212 + * set_exception_vector(0x08, d_mmu_refill); 213 + * set_exception_vector(0x09, d_mmu_invalid); 214 + * set_exception_vector(0x0a, d_mmu_access); 215 + * set_exception_vector(0x0b, d_mmu_write); 216 + */ 217 + if (cfg_k && tlb_k && usermode) { 218 + D(printf("tlb: kernel protected %x lo=%x pc=%x\n", 219 + vaddr, lo, env->pc)); 220 + match = 0; 221 + res->bf_vec = vect_base + 2; 222 + } else if (rw == 1 && cfg_w && !tlb_w) { 223 + D(printf("tlb: write protected %x lo=%x pc=%x\n", 224 + vaddr, lo, env->pc)); 225 + match = 0; 226 + /* write accesses never go through the I mmu. */ 227 + res->bf_vec = vect_base + 3; 228 + } else if (rw == 2 && cfg_x && !tlb_x) { 229 + D(printf("tlb: exec protected %x lo=%x pc=%x\n", 230 + vaddr, lo, env->pc)); 231 + match = 0; 232 + res->bf_vec = vect_base + 3; 233 + } else if (cfg_v && !tlb_v) { 234 + D(printf("tlb: invalid %x\n", vaddr)); 235 + match = 0; 236 + res->bf_vec = vect_base + 1; 237 + } 242 238 243 - res->prot = 0; 244 - if (match) { 245 - res->prot |= PAGE_READ; 246 - if (tlb_w) 247 - res->prot |= PAGE_WRITE; 248 - if (mmu == 0 && (cfg_x || tlb_x)) 249 - res->prot |= PAGE_EXEC; 250 - } 251 - else 252 - D(dump_tlb(env, mmu)); 253 - } else { 254 - /* If refill, provide a randomized set. */ 255 - set = env->mmu_rand_lfsr & 3; 256 - } 239 + res->prot = 0; 240 + if (match) { 241 + res->prot |= PAGE_READ; 242 + if (tlb_w) { 243 + res->prot |= PAGE_WRITE; 244 + } 245 + if (mmu == 0 && (cfg_x || tlb_x)) { 246 + res->prot |= PAGE_EXEC; 247 + } 248 + } else { 249 + D(dump_tlb(env, mmu)); 250 + } 251 + } else { 252 + /* If refill, provide a randomized set. */ 253 + set = env->mmu_rand_lfsr & 3; 254 + } 257 255 258 - if (!match && !debug) { 259 - cris_mmu_update_rand_lfsr(env); 256 + if (!match && !debug) { 257 + cris_mmu_update_rand_lfsr(env); 260 258 261 - /* Compute index. */ 262 - idx = vpage & 15; 259 + /* Compute index. */ 260 + idx = vpage & 15; 263 261 264 - /* Update RW_MM_TLB_SEL. */ 265 - env->sregs[SFR_RW_MM_TLB_SEL] = 0; 266 - set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4); 267 - set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2); 262 + /* Update RW_MM_TLB_SEL. */ 263 + env->sregs[SFR_RW_MM_TLB_SEL] = 0; 264 + set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4); 265 + set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2); 268 266 269 - /* Update RW_MM_CAUSE. */ 270 - set_field(&r_cause, rwcause, 8, 2); 271 - set_field(&r_cause, vpage, 13, 19); 272 - set_field(&r_cause, pid, 0, 8); 273 - env->sregs[SFR_R_MM_CAUSE] = r_cause; 274 - D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc)); 275 - } 267 + /* Update RW_MM_CAUSE. */ 268 + set_field(&r_cause, rwcause, 8, 2); 269 + set_field(&r_cause, vpage, 13, 19); 270 + set_field(&r_cause, pid, 0, 8); 271 + env->sregs[SFR_R_MM_CAUSE] = r_cause; 272 + D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc)); 273 + } 276 274 277 - D(printf ("%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x" 278 - " %x cause=%x sel=%x sp=%x %x %x\n", 279 - __func__, rw, match, env->pc, 280 - vaddr, vpage, 281 - tlb_vpn, tlb_pfn, tlb_pid, 282 - pid, 283 - r_cause, 284 - env->sregs[SFR_RW_MM_TLB_SEL], 285 - env->regs[R_SP], env->pregs[PR_USP], env->ksp)); 275 + D(printf("%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x" 276 + " %x cause=%x sel=%x sp=%x %x %x\n", 277 + __func__, rw, match, env->pc, 278 + vaddr, vpage, 279 + tlb_vpn, tlb_pfn, tlb_pid, 280 + pid, 281 + r_cause, 282 + env->sregs[SFR_RW_MM_TLB_SEL], 283 + env->regs[R_SP], env->pregs[PR_USP], env->ksp)); 286 284 287 - res->phy = tlb_pfn << TARGET_PAGE_BITS; 288 - return !match; 285 + res->phy = tlb_pfn << TARGET_PAGE_BITS; 286 + return !match; 289 287 } 290 288 291 289 void cris_mmu_flush_pid(CPUCRISState *env, uint32_t pid) 292 290 { 293 - CRISCPU *cpu = cris_env_get_cpu(env); 294 - target_ulong vaddr; 295 - unsigned int idx; 296 - uint32_t lo, hi; 297 - uint32_t tlb_vpn; 298 - int tlb_pid, tlb_g, tlb_v; 299 - unsigned int set; 300 - unsigned int mmu; 291 + target_ulong vaddr; 292 + unsigned int idx; 293 + uint32_t lo, hi; 294 + uint32_t tlb_vpn; 295 + int tlb_pid, tlb_g, tlb_v; 296 + unsigned int set; 297 + unsigned int mmu; 301 298 302 - pid &= 0xff; 303 - for (mmu = 0; mmu < 2; mmu++) { 304 - for (set = 0; set < 4; set++) 305 - { 306 - for (idx = 0; idx < 16; idx++) { 307 - lo = env->tlbsets[mmu][set][idx].lo; 308 - hi = env->tlbsets[mmu][set][idx].hi; 309 - 310 - tlb_vpn = EXTRACT_FIELD(hi, 13, 31); 311 - tlb_pid = EXTRACT_FIELD(hi, 0, 7); 312 - tlb_g = EXTRACT_FIELD(lo, 4, 4); 313 - tlb_v = EXTRACT_FIELD(lo, 3, 3); 299 + pid &= 0xff; 300 + for (mmu = 0; mmu < 2; mmu++) { 301 + for (set = 0; set < 4; set++) { 302 + for (idx = 0; idx < 16; idx++) { 303 + lo = env->tlbsets[mmu][set][idx].lo; 304 + hi = env->tlbsets[mmu][set][idx].hi; 314 305 315 - if (tlb_v && !tlb_g && (tlb_pid == pid)) { 316 - vaddr = tlb_vpn << TARGET_PAGE_BITS; 317 - D_LOG("flush pid=%x vaddr=%x\n", 318 - pid, vaddr); 319 - tlb_flush_page(CPU(cpu), vaddr); 320 - } 321 - } 322 - } 323 - } 306 + tlb_vpn = EXTRACT_FIELD(hi, 13, 31); 307 + tlb_pid = EXTRACT_FIELD(hi, 0, 7); 308 + tlb_g = EXTRACT_FIELD(lo, 4, 4); 309 + tlb_v = EXTRACT_FIELD(lo, 3, 3); 310 + 311 + if (tlb_v && !tlb_g && (tlb_pid == pid)) { 312 + vaddr = tlb_vpn << TARGET_PAGE_BITS; 313 + D_LOG("flush pid=%x vaddr=%x\n", pid, vaddr); 314 + tlb_flush_page(env_cpu(env), vaddr); 315 + } 316 + } 317 + } 318 + } 324 319 } 325 320 326 321 int cris_mmu_translate(struct cris_mmu_result *res, 327 322 CPUCRISState *env, uint32_t vaddr, 328 323 int rw, int mmu_idx, int debug) 329 324 { 330 - int seg; 331 - int miss = 0; 332 - int is_user = mmu_idx == MMU_USER_IDX; 333 - uint32_t old_srs; 325 + int seg; 326 + int miss = 0; 327 + int is_user = mmu_idx == MMU_USER_IDX; 328 + uint32_t old_srs; 334 329 335 - old_srs= env->pregs[PR_SRS]; 330 + old_srs = env->pregs[PR_SRS]; 336 331 337 - /* rw == 2 means exec, map the access to the insn mmu. */ 338 - env->pregs[PR_SRS] = rw == 2 ? 1 : 2; 332 + /* rw == 2 means exec, map the access to the insn mmu. */ 333 + env->pregs[PR_SRS] = rw == 2 ? 1 : 2; 339 334 340 - if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) { 341 - res->phy = vaddr; 342 - res->prot = PAGE_BITS; 343 - goto done; 344 - } 335 + if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) { 336 + res->phy = vaddr; 337 + res->prot = PAGE_BITS; 338 + goto done; 339 + } 345 340 346 - seg = vaddr >> 28; 347 - if (!is_user && cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG])) 348 - { 349 - uint32_t base; 341 + seg = vaddr >> 28; 342 + if (!is_user && cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG])) { 343 + uint32_t base; 350 344 351 - miss = 0; 352 - base = cris_mmu_translate_seg(env, seg); 353 - res->phy = base | (0x0fffffff & vaddr); 354 - res->prot = PAGE_BITS; 355 - } else { 356 - miss = cris_mmu_translate_page(res, env, vaddr, rw, 357 - is_user, debug); 358 - } 359 - done: 360 - env->pregs[PR_SRS] = old_srs; 361 - return miss; 345 + miss = 0; 346 + base = cris_mmu_translate_seg(env, seg); 347 + res->phy = base | (0x0fffffff & vaddr); 348 + res->prot = PAGE_BITS; 349 + } else { 350 + miss = cris_mmu_translate_page(res, env, vaddr, rw, 351 + is_user, debug); 352 + } 353 + done: 354 + env->pregs[PR_SRS] = old_srs; 355 + return miss; 362 356 }
+401 -426
target/cris/op_helper.c
··· 39 39 40 40 void helper_raise_exception(CPUCRISState *env, uint32_t index) 41 41 { 42 - CPUState *cs = CPU(cris_env_get_cpu(env)); 42 + CPUState *cs = env_cpu(env); 43 43 44 44 cs->exception_index = index; 45 45 cpu_loop_exit(cs); ··· 48 48 void helper_tlb_flush_pid(CPUCRISState *env, uint32_t pid) 49 49 { 50 50 #if !defined(CONFIG_USER_ONLY) 51 - pid &= 0xff; 52 - if (pid != (env->pregs[PR_PID] & 0xff)) 53 - cris_mmu_flush_pid(env, env->pregs[PR_PID]); 51 + pid &= 0xff; 52 + if (pid != (env->pregs[PR_PID] & 0xff)) { 53 + cris_mmu_flush_pid(env, env->pregs[PR_PID]); 54 + } 54 55 #endif 55 56 } 56 57 57 58 void helper_spc_write(CPUCRISState *env, uint32_t new_spc) 58 59 { 59 60 #if !defined(CONFIG_USER_ONLY) 60 - CRISCPU *cpu = cris_env_get_cpu(env); 61 - CPUState *cs = CPU(cpu); 61 + CPUState *cs = env_cpu(env); 62 62 63 63 tlb_flush_page(cs, env->pregs[PR_SPC]); 64 64 tlb_flush_page(cs, new_spc); ··· 66 66 } 67 67 68 68 /* Used by the tlb decoder. */ 69 - #define EXTRACT_FIELD(src, start, end) \ 70 - (((src) >> start) & ((1 << (end - start + 1)) - 1)) 69 + #define EXTRACT_FIELD(src, start, end) \ 70 + (((src) >> start) & ((1 << (end - start + 1)) - 1)) 71 71 72 72 void helper_movl_sreg_reg(CPUCRISState *env, uint32_t sreg, uint32_t reg) 73 73 { 74 - #if !defined(CONFIG_USER_ONLY) 75 - CRISCPU *cpu = cris_env_get_cpu(env); 76 - #endif 77 - uint32_t srs; 78 - srs = env->pregs[PR_SRS]; 79 - srs &= 3; 80 - env->sregs[srs][sreg] = env->regs[reg]; 74 + uint32_t srs; 75 + srs = env->pregs[PR_SRS]; 76 + srs &= 3; 77 + env->sregs[srs][sreg] = env->regs[reg]; 81 78 82 79 #if !defined(CONFIG_USER_ONLY) 83 - if (srs == 1 || srs == 2) { 84 - if (sreg == 6) { 85 - /* Writes to tlb-hi write to mm_cause as a side 86 - effect. */ 87 - env->sregs[SFR_RW_MM_TLB_HI] = env->regs[reg]; 88 - env->sregs[SFR_R_MM_CAUSE] = env->regs[reg]; 89 - } 90 - else if (sreg == 5) { 91 - uint32_t set; 92 - uint32_t idx; 93 - uint32_t lo, hi; 94 - uint32_t vaddr; 95 - int tlb_v; 80 + if (srs == 1 || srs == 2) { 81 + if (sreg == 6) { 82 + /* Writes to tlb-hi write to mm_cause as a side effect. */ 83 + env->sregs[SFR_RW_MM_TLB_HI] = env->regs[reg]; 84 + env->sregs[SFR_R_MM_CAUSE] = env->regs[reg]; 85 + } else if (sreg == 5) { 86 + uint32_t set; 87 + uint32_t idx; 88 + uint32_t lo, hi; 89 + uint32_t vaddr; 90 + int tlb_v; 96 91 97 - idx = set = env->sregs[SFR_RW_MM_TLB_SEL]; 98 - set >>= 4; 99 - set &= 3; 92 + idx = set = env->sregs[SFR_RW_MM_TLB_SEL]; 93 + set >>= 4; 94 + set &= 3; 100 95 101 - idx &= 15; 102 - /* We've just made a write to tlb_lo. */ 103 - lo = env->sregs[SFR_RW_MM_TLB_LO]; 104 - /* Writes are done via r_mm_cause. */ 105 - hi = env->sregs[SFR_R_MM_CAUSE]; 96 + idx &= 15; 97 + /* We've just made a write to tlb_lo. */ 98 + lo = env->sregs[SFR_RW_MM_TLB_LO]; 99 + /* Writes are done via r_mm_cause. */ 100 + hi = env->sregs[SFR_R_MM_CAUSE]; 106 101 107 - vaddr = EXTRACT_FIELD(env->tlbsets[srs-1][set][idx].hi, 108 - 13, 31); 109 - vaddr <<= TARGET_PAGE_BITS; 110 - tlb_v = EXTRACT_FIELD(env->tlbsets[srs-1][set][idx].lo, 111 - 3, 3); 112 - env->tlbsets[srs - 1][set][idx].lo = lo; 113 - env->tlbsets[srs - 1][set][idx].hi = hi; 102 + vaddr = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].hi, 13, 31); 103 + vaddr <<= TARGET_PAGE_BITS; 104 + tlb_v = EXTRACT_FIELD(env->tlbsets[srs - 1][set][idx].lo, 3, 3); 105 + env->tlbsets[srs - 1][set][idx].lo = lo; 106 + env->tlbsets[srs - 1][set][idx].hi = hi; 114 107 115 - D_LOG("tlb flush vaddr=%x v=%d pc=%x\n", 116 - vaddr, tlb_v, env->pc); 117 - if (tlb_v) { 118 - tlb_flush_page(CPU(cpu), vaddr); 119 - } 120 - } 121 - } 108 + D_LOG("tlb flush vaddr=%x v=%d pc=%x\n", 109 + vaddr, tlb_v, env->pc); 110 + if (tlb_v) { 111 + tlb_flush_page(env_cpu(env), vaddr); 112 + } 113 + } 114 + } 122 115 #endif 123 116 } 124 117 125 118 void helper_movl_reg_sreg(CPUCRISState *env, uint32_t reg, uint32_t sreg) 126 119 { 127 - uint32_t srs; 128 - env->pregs[PR_SRS] &= 3; 129 - srs = env->pregs[PR_SRS]; 130 - 120 + uint32_t srs; 121 + env->pregs[PR_SRS] &= 3; 122 + srs = env->pregs[PR_SRS]; 123 + 131 124 #if !defined(CONFIG_USER_ONLY) 132 - if (srs == 1 || srs == 2) 133 - { 134 - uint32_t set; 135 - uint32_t idx; 136 - uint32_t lo, hi; 125 + if (srs == 1 || srs == 2) { 126 + uint32_t set; 127 + uint32_t idx; 128 + uint32_t lo, hi; 137 129 138 - idx = set = env->sregs[SFR_RW_MM_TLB_SEL]; 139 - set >>= 4; 140 - set &= 3; 141 - idx &= 15; 130 + idx = set = env->sregs[SFR_RW_MM_TLB_SEL]; 131 + set >>= 4; 132 + set &= 3; 133 + idx &= 15; 142 134 143 - /* Update the mirror regs. */ 144 - hi = env->tlbsets[srs - 1][set][idx].hi; 145 - lo = env->tlbsets[srs - 1][set][idx].lo; 146 - env->sregs[SFR_RW_MM_TLB_HI] = hi; 147 - env->sregs[SFR_RW_MM_TLB_LO] = lo; 148 - } 135 + /* Update the mirror regs. */ 136 + hi = env->tlbsets[srs - 1][set][idx].hi; 137 + lo = env->tlbsets[srs - 1][set][idx].lo; 138 + env->sregs[SFR_RW_MM_TLB_HI] = hi; 139 + env->sregs[SFR_RW_MM_TLB_LO] = lo; 140 + } 149 141 #endif 150 - env->regs[reg] = env->sregs[srs][sreg]; 142 + env->regs[reg] = env->sregs[srs][sreg]; 151 143 } 152 144 153 145 static void cris_ccs_rshift(CPUCRISState *env) 154 146 { 155 - uint32_t ccs; 147 + uint32_t ccs; 156 148 157 - /* Apply the ccs shift. */ 158 - ccs = env->pregs[PR_CCS]; 159 - ccs = (ccs & 0xc0000000) | ((ccs & 0x0fffffff) >> 10); 160 - if (ccs & U_FLAG) 161 - { 162 - /* Enter user mode. */ 163 - env->ksp = env->regs[R_SP]; 164 - env->regs[R_SP] = env->pregs[PR_USP]; 165 - } 149 + /* Apply the ccs shift. */ 150 + ccs = env->pregs[PR_CCS]; 151 + ccs = (ccs & 0xc0000000) | ((ccs & 0x0fffffff) >> 10); 152 + if (ccs & U_FLAG) { 153 + /* Enter user mode. */ 154 + env->ksp = env->regs[R_SP]; 155 + env->regs[R_SP] = env->pregs[PR_USP]; 156 + } 166 157 167 - env->pregs[PR_CCS] = ccs; 158 + env->pregs[PR_CCS] = ccs; 168 159 } 169 160 170 161 void helper_rfe(CPUCRISState *env) 171 162 { 172 - int rflag = env->pregs[PR_CCS] & R_FLAG; 163 + int rflag = env->pregs[PR_CCS] & R_FLAG; 173 164 174 - D_LOG("rfe: erp=%x pid=%x ccs=%x btarget=%x\n", 175 - env->pregs[PR_ERP], env->pregs[PR_PID], 176 - env->pregs[PR_CCS], 177 - env->btarget); 165 + D_LOG("rfe: erp=%x pid=%x ccs=%x btarget=%x\n", 166 + env->pregs[PR_ERP], env->pregs[PR_PID], 167 + env->pregs[PR_CCS], 168 + env->btarget); 178 169 179 - cris_ccs_rshift(env); 170 + cris_ccs_rshift(env); 180 171 181 - /* RFE sets the P_FLAG only if the R_FLAG is not set. */ 182 - if (!rflag) 183 - env->pregs[PR_CCS] |= P_FLAG; 172 + /* RFE sets the P_FLAG only if the R_FLAG is not set. */ 173 + if (!rflag) { 174 + env->pregs[PR_CCS] |= P_FLAG; 175 + } 184 176 } 185 177 186 178 void helper_rfn(CPUCRISState *env) 187 179 { 188 - int rflag = env->pregs[PR_CCS] & R_FLAG; 180 + int rflag = env->pregs[PR_CCS] & R_FLAG; 189 181 190 - D_LOG("rfn: erp=%x pid=%x ccs=%x btarget=%x\n", 191 - env->pregs[PR_ERP], env->pregs[PR_PID], 192 - env->pregs[PR_CCS], 193 - env->btarget); 182 + D_LOG("rfn: erp=%x pid=%x ccs=%x btarget=%x\n", 183 + env->pregs[PR_ERP], env->pregs[PR_PID], 184 + env->pregs[PR_CCS], 185 + env->btarget); 194 186 195 - cris_ccs_rshift(env); 187 + cris_ccs_rshift(env); 196 188 197 - /* Set the P_FLAG only if the R_FLAG is not set. */ 198 - if (!rflag) 199 - env->pregs[PR_CCS] |= P_FLAG; 189 + /* Set the P_FLAG only if the R_FLAG is not set. */ 190 + if (!rflag) { 191 + env->pregs[PR_CCS] |= P_FLAG; 192 + } 200 193 201 - /* Always set the M flag. */ 202 - env->pregs[PR_CCS] |= M_FLAG_V32; 194 + /* Always set the M flag. */ 195 + env->pregs[PR_CCS] |= M_FLAG_V32; 203 196 } 204 197 205 198 uint32_t helper_btst(CPUCRISState *env, uint32_t t0, uint32_t t1, uint32_t ccs) 206 199 { 207 - /* FIXME: clean this up. */ 200 + /* FIXME: clean this up. */ 208 201 209 - /* des ref: 210 - The N flag is set according to the selected bit in the dest reg. 211 - The Z flag is set if the selected bit and all bits to the right are 212 - zero. 213 - The X flag is cleared. 214 - Other flags are left untouched. 215 - The destination reg is not affected.*/ 216 - unsigned int fz, sbit, bset, mask, masked_t0; 202 + /* 203 + * des ref: 204 + * The N flag is set according to the selected bit in the dest reg. 205 + * The Z flag is set if the selected bit and all bits to the right are 206 + * zero. 207 + * The X flag is cleared. 208 + * Other flags are left untouched. 209 + * The destination reg is not affected. 210 + */ 211 + unsigned int fz, sbit, bset, mask, masked_t0; 217 212 218 - sbit = t1 & 31; 219 - bset = !!(t0 & (1 << sbit)); 220 - mask = sbit == 31 ? -1 : (1 << (sbit + 1)) - 1; 221 - masked_t0 = t0 & mask; 222 - fz = !(masked_t0 | bset); 213 + sbit = t1 & 31; 214 + bset = !!(t0 & (1 << sbit)); 215 + mask = sbit == 31 ? -1 : (1 << (sbit + 1)) - 1; 216 + masked_t0 = t0 & mask; 217 + fz = !(masked_t0 | bset); 223 218 224 - /* Clear the X, N and Z flags. */ 225 - ccs = ccs & ~(X_FLAG | N_FLAG | Z_FLAG); 226 - if (env->pregs[PR_VR] < 32) 227 - ccs &= ~(V_FLAG | C_FLAG); 228 - /* Set the N and Z flags accordingly. */ 229 - ccs |= (bset << 3) | (fz << 2); 230 - return ccs; 219 + /* Clear the X, N and Z flags. */ 220 + ccs = ccs & ~(X_FLAG | N_FLAG | Z_FLAG); 221 + if (env->pregs[PR_VR] < 32) { 222 + ccs &= ~(V_FLAG | C_FLAG); 223 + } 224 + /* Set the N and Z flags accordingly. */ 225 + ccs |= (bset << 3) | (fz << 2); 226 + return ccs; 231 227 } 232 228 233 229 static inline uint32_t evaluate_flags_writeback(CPUCRISState *env, 234 230 uint32_t flags, uint32_t ccs) 235 231 { 236 - unsigned int x, z, mask; 232 + unsigned int x, z, mask; 237 233 238 - /* Extended arithmetics, leave the z flag alone. */ 239 - x = env->cc_x; 240 - mask = env->cc_mask | X_FLAG; 241 - if (x) { 242 - z = flags & Z_FLAG; 243 - mask = mask & ~z; 244 - } 245 - flags &= mask; 234 + /* Extended arithmetics, leave the z flag alone. */ 235 + x = env->cc_x; 236 + mask = env->cc_mask | X_FLAG; 237 + if (x) { 238 + z = flags & Z_FLAG; 239 + mask = mask & ~z; 240 + } 241 + flags &= mask; 246 242 247 - /* all insn clear the x-flag except setf or clrf. */ 248 - ccs &= ~mask; 249 - ccs |= flags; 250 - return ccs; 243 + /* all insn clear the x-flag except setf or clrf. */ 244 + ccs &= ~mask; 245 + ccs |= flags; 246 + return ccs; 251 247 } 252 248 253 249 uint32_t helper_evaluate_flags_muls(CPUCRISState *env, 254 250 uint32_t ccs, uint32_t res, uint32_t mof) 255 251 { 256 - uint32_t flags = 0; 257 - int64_t tmp; 258 - int dneg; 252 + uint32_t flags = 0; 253 + int64_t tmp; 254 + int dneg; 259 255 260 - dneg = ((int32_t)res) < 0; 256 + dneg = ((int32_t)res) < 0; 261 257 262 - tmp = mof; 263 - tmp <<= 32; 264 - tmp |= res; 265 - if (tmp == 0) 266 - flags |= Z_FLAG; 267 - else if (tmp < 0) 268 - flags |= N_FLAG; 269 - if ((dneg && mof != -1) 270 - || (!dneg && mof != 0)) 271 - flags |= V_FLAG; 272 - return evaluate_flags_writeback(env, flags, ccs); 258 + tmp = mof; 259 + tmp <<= 32; 260 + tmp |= res; 261 + if (tmp == 0) { 262 + flags |= Z_FLAG; 263 + } else if (tmp < 0) { 264 + flags |= N_FLAG; 265 + } 266 + if ((dneg && mof != -1) || (!dneg && mof != 0)) { 267 + flags |= V_FLAG; 268 + } 269 + return evaluate_flags_writeback(env, flags, ccs); 273 270 } 274 271 275 272 uint32_t helper_evaluate_flags_mulu(CPUCRISState *env, 276 273 uint32_t ccs, uint32_t res, uint32_t mof) 277 274 { 278 - uint32_t flags = 0; 279 - uint64_t tmp; 275 + uint32_t flags = 0; 276 + uint64_t tmp; 280 277 281 - tmp = mof; 282 - tmp <<= 32; 283 - tmp |= res; 284 - if (tmp == 0) 285 - flags |= Z_FLAG; 286 - else if (tmp >> 63) 287 - flags |= N_FLAG; 288 - if (mof) 289 - flags |= V_FLAG; 278 + tmp = mof; 279 + tmp <<= 32; 280 + tmp |= res; 281 + if (tmp == 0) { 282 + flags |= Z_FLAG; 283 + } else if (tmp >> 63) { 284 + flags |= N_FLAG; 285 + } 286 + if (mof) { 287 + flags |= V_FLAG; 288 + } 290 289 291 - return evaluate_flags_writeback(env, flags, ccs); 290 + return evaluate_flags_writeback(env, flags, ccs); 292 291 } 293 292 294 293 uint32_t helper_evaluate_flags_mcp(CPUCRISState *env, uint32_t ccs, 295 294 uint32_t src, uint32_t dst, uint32_t res) 296 295 { 297 - uint32_t flags = 0; 296 + uint32_t flags = 0; 298 297 299 - src = src & 0x80000000; 300 - dst = dst & 0x80000000; 298 + src = src & 0x80000000; 299 + dst = dst & 0x80000000; 301 300 302 - if ((res & 0x80000000L) != 0L) 303 - { 304 - flags |= N_FLAG; 305 - if (!src && !dst) 306 - flags |= V_FLAG; 307 - else if (src & dst) 308 - flags |= R_FLAG; 309 - } 310 - else 311 - { 312 - if (res == 0L) 313 - flags |= Z_FLAG; 314 - if (src & dst) 315 - flags |= V_FLAG; 316 - if (dst | src) 317 - flags |= R_FLAG; 318 - } 301 + if ((res & 0x80000000L) != 0L) { 302 + flags |= N_FLAG; 303 + if (!src && !dst) { 304 + flags |= V_FLAG; 305 + } else if (src & dst) { 306 + flags |= R_FLAG; 307 + } 308 + } else { 309 + if (res == 0L) { 310 + flags |= Z_FLAG; 311 + } 312 + if (src & dst) { 313 + flags |= V_FLAG; 314 + } 315 + if (dst | src) { 316 + flags |= R_FLAG; 317 + } 318 + } 319 319 320 - return evaluate_flags_writeback(env, flags, ccs); 320 + return evaluate_flags_writeback(env, flags, ccs); 321 321 } 322 322 323 323 uint32_t helper_evaluate_flags_alu_4(CPUCRISState *env, uint32_t ccs, 324 324 uint32_t src, uint32_t dst, uint32_t res) 325 325 { 326 - uint32_t flags = 0; 326 + uint32_t flags = 0; 327 327 328 - src = src & 0x80000000; 329 - dst = dst & 0x80000000; 328 + src = src & 0x80000000; 329 + dst = dst & 0x80000000; 330 330 331 - if ((res & 0x80000000L) != 0L) 332 - { 333 - flags |= N_FLAG; 334 - if (!src && !dst) 335 - flags |= V_FLAG; 336 - else if (src & dst) 337 - flags |= C_FLAG; 338 - } 339 - else 340 - { 341 - if (res == 0L) 342 - flags |= Z_FLAG; 343 - if (src & dst) 344 - flags |= V_FLAG; 345 - if (dst | src) 346 - flags |= C_FLAG; 347 - } 331 + if ((res & 0x80000000L) != 0L) { 332 + flags |= N_FLAG; 333 + if (!src && !dst) { 334 + flags |= V_FLAG; 335 + } else if (src & dst) { 336 + flags |= C_FLAG; 337 + } 338 + } else { 339 + if (res == 0L) { 340 + flags |= Z_FLAG; 341 + } 342 + if (src & dst) { 343 + flags |= V_FLAG; 344 + } 345 + if (dst | src) { 346 + flags |= C_FLAG; 347 + } 348 + } 348 349 349 - return evaluate_flags_writeback(env, flags, ccs); 350 + return evaluate_flags_writeback(env, flags, ccs); 350 351 } 351 352 352 353 uint32_t helper_evaluate_flags_sub_4(CPUCRISState *env, uint32_t ccs, 353 354 uint32_t src, uint32_t dst, uint32_t res) 354 355 { 355 - uint32_t flags = 0; 356 + uint32_t flags = 0; 356 357 357 - src = (~src) & 0x80000000; 358 - dst = dst & 0x80000000; 358 + src = (~src) & 0x80000000; 359 + dst = dst & 0x80000000; 359 360 360 - if ((res & 0x80000000L) != 0L) 361 - { 362 - flags |= N_FLAG; 363 - if (!src && !dst) 364 - flags |= V_FLAG; 365 - else if (src & dst) 366 - flags |= C_FLAG; 367 - } 368 - else 369 - { 370 - if (res == 0L) 371 - flags |= Z_FLAG; 372 - if (src & dst) 373 - flags |= V_FLAG; 374 - if (dst | src) 375 - flags |= C_FLAG; 376 - } 361 + if ((res & 0x80000000L) != 0L) { 362 + flags |= N_FLAG; 363 + if (!src && !dst) { 364 + flags |= V_FLAG; 365 + } else if (src & dst) { 366 + flags |= C_FLAG; 367 + } 368 + } else { 369 + if (res == 0L) { 370 + flags |= Z_FLAG; 371 + } 372 + if (src & dst) { 373 + flags |= V_FLAG; 374 + } 375 + if (dst | src) { 376 + flags |= C_FLAG; 377 + } 378 + } 377 379 378 - flags ^= C_FLAG; 379 - return evaluate_flags_writeback(env, flags, ccs); 380 + flags ^= C_FLAG; 381 + return evaluate_flags_writeback(env, flags, ccs); 380 382 } 381 383 382 384 uint32_t helper_evaluate_flags_move_4(CPUCRISState *env, 383 385 uint32_t ccs, uint32_t res) 384 386 { 385 - uint32_t flags = 0; 387 + uint32_t flags = 0; 386 388 387 - if ((int32_t)res < 0) 388 - flags |= N_FLAG; 389 - else if (res == 0L) 390 - flags |= Z_FLAG; 389 + if ((int32_t)res < 0) { 390 + flags |= N_FLAG; 391 + } else if (res == 0L) { 392 + flags |= Z_FLAG; 393 + } 391 394 392 - return evaluate_flags_writeback(env, flags, ccs); 395 + return evaluate_flags_writeback(env, flags, ccs); 393 396 } 397 + 394 398 uint32_t helper_evaluate_flags_move_2(CPUCRISState *env, 395 399 uint32_t ccs, uint32_t res) 396 400 { 397 - uint32_t flags = 0; 401 + uint32_t flags = 0; 398 402 399 - if ((int16_t)res < 0L) 400 - flags |= N_FLAG; 401 - else if (res == 0) 402 - flags |= Z_FLAG; 403 + if ((int16_t)res < 0L) { 404 + flags |= N_FLAG; 405 + } else if (res == 0) { 406 + flags |= Z_FLAG; 407 + } 403 408 404 - return evaluate_flags_writeback(env, flags, ccs); 409 + return evaluate_flags_writeback(env, flags, ccs); 405 410 } 406 411 407 - /* TODO: This is expensive. We could split things up and only evaluate part of 408 - CCR on a need to know basis. For now, we simply re-evaluate everything. */ 412 + /* 413 + * TODO: This is expensive. We could split things up and only evaluate part of 414 + * CCR on a need to know basis. For now, we simply re-evaluate everything. 415 + */ 409 416 void helper_evaluate_flags(CPUCRISState *env) 410 417 { 411 - uint32_t src, dst, res; 412 - uint32_t flags = 0; 418 + uint32_t src, dst, res; 419 + uint32_t flags = 0; 413 420 414 - src = env->cc_src; 415 - dst = env->cc_dest; 416 - res = env->cc_result; 421 + src = env->cc_src; 422 + dst = env->cc_dest; 423 + res = env->cc_result; 417 424 418 - if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) 419 - src = ~src; 425 + if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) { 426 + src = ~src; 427 + } 420 428 421 - /* Now, evaluate the flags. This stuff is based on 422 - Per Zander's CRISv10 simulator. */ 423 - switch (env->cc_size) 424 - { 425 - case 1: 426 - if ((res & 0x80L) != 0L) 427 - { 428 - flags |= N_FLAG; 429 - if (((src & 0x80L) == 0L) 430 - && ((dst & 0x80L) == 0L)) 431 - { 432 - flags |= V_FLAG; 433 - } 434 - else if (((src & 0x80L) != 0L) 435 - && ((dst & 0x80L) != 0L)) 436 - { 437 - flags |= C_FLAG; 438 - } 439 - } 440 - else 441 - { 442 - if ((res & 0xFFL) == 0L) 443 - { 444 - flags |= Z_FLAG; 445 - } 446 - if (((src & 0x80L) != 0L) 447 - && ((dst & 0x80L) != 0L)) 448 - { 449 - flags |= V_FLAG; 450 - } 451 - if ((dst & 0x80L) != 0L 452 - || (src & 0x80L) != 0L) 453 - { 454 - flags |= C_FLAG; 455 - } 456 - } 457 - break; 458 - case 2: 459 - if ((res & 0x8000L) != 0L) 460 - { 461 - flags |= N_FLAG; 462 - if (((src & 0x8000L) == 0L) 463 - && ((dst & 0x8000L) == 0L)) 464 - { 465 - flags |= V_FLAG; 466 - } 467 - else if (((src & 0x8000L) != 0L) 468 - && ((dst & 0x8000L) != 0L)) 469 - { 470 - flags |= C_FLAG; 471 - } 472 - } 473 - else 474 - { 475 - if ((res & 0xFFFFL) == 0L) 476 - { 477 - flags |= Z_FLAG; 478 - } 479 - if (((src & 0x8000L) != 0L) 480 - && ((dst & 0x8000L) != 0L)) 481 - { 482 - flags |= V_FLAG; 483 - } 484 - if ((dst & 0x8000L) != 0L 485 - || (src & 0x8000L) != 0L) 486 - { 487 - flags |= C_FLAG; 488 - } 489 - } 490 - break; 491 - case 4: 492 - if ((res & 0x80000000L) != 0L) 493 - { 494 - flags |= N_FLAG; 495 - if (((src & 0x80000000L) == 0L) 496 - && ((dst & 0x80000000L) == 0L)) 497 - { 498 - flags |= V_FLAG; 499 - } 500 - else if (((src & 0x80000000L) != 0L) && 501 - ((dst & 0x80000000L) != 0L)) 502 - { 503 - flags |= C_FLAG; 504 - } 505 - } 506 - else 507 - { 508 - if (res == 0L) 509 - flags |= Z_FLAG; 510 - if (((src & 0x80000000L) != 0L) 511 - && ((dst & 0x80000000L) != 0L)) 512 - flags |= V_FLAG; 513 - if ((dst & 0x80000000L) != 0L 514 - || (src & 0x80000000L) != 0L) 515 - flags |= C_FLAG; 516 - } 517 - break; 518 - default: 519 - break; 520 - } 429 + /* 430 + * Now, evaluate the flags. This stuff is based on 431 + * Per Zander's CRISv10 simulator. 432 + */ 433 + switch (env->cc_size) { 434 + case 1: 435 + if ((res & 0x80L) != 0L) { 436 + flags |= N_FLAG; 437 + if (((src & 0x80L) == 0L) && ((dst & 0x80L) == 0L)) { 438 + flags |= V_FLAG; 439 + } else if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) { 440 + flags |= C_FLAG; 441 + } 442 + } else { 443 + if ((res & 0xFFL) == 0L) { 444 + flags |= Z_FLAG; 445 + } 446 + if (((src & 0x80L) != 0L) && ((dst & 0x80L) != 0L)) { 447 + flags |= V_FLAG; 448 + } 449 + if ((dst & 0x80L) != 0L || (src & 0x80L) != 0L) { 450 + flags |= C_FLAG; 451 + } 452 + } 453 + break; 454 + case 2: 455 + if ((res & 0x8000L) != 0L) { 456 + flags |= N_FLAG; 457 + if (((src & 0x8000L) == 0L) && ((dst & 0x8000L) == 0L)) { 458 + flags |= V_FLAG; 459 + } else if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) { 460 + flags |= C_FLAG; 461 + } 462 + } else { 463 + if ((res & 0xFFFFL) == 0L) { 464 + flags |= Z_FLAG; 465 + } 466 + if (((src & 0x8000L) != 0L) && ((dst & 0x8000L) != 0L)) { 467 + flags |= V_FLAG; 468 + } 469 + if ((dst & 0x8000L) != 0L || (src & 0x8000L) != 0L) { 470 + flags |= C_FLAG; 471 + } 472 + } 473 + break; 474 + case 4: 475 + if ((res & 0x80000000L) != 0L) { 476 + flags |= N_FLAG; 477 + if (((src & 0x80000000L) == 0L) && ((dst & 0x80000000L) == 0L)) { 478 + flags |= V_FLAG; 479 + } else if (((src & 0x80000000L) != 0L) && 480 + ((dst & 0x80000000L) != 0L)) { 481 + flags |= C_FLAG; 482 + } 483 + } else { 484 + if (res == 0L) { 485 + flags |= Z_FLAG; 486 + } 487 + if (((src & 0x80000000L) != 0L) && ((dst & 0x80000000L) != 0L)) { 488 + flags |= V_FLAG; 489 + } 490 + if ((dst & 0x80000000L) != 0L || (src & 0x80000000L) != 0L) { 491 + flags |= C_FLAG; 492 + } 493 + } 494 + break; 495 + default: 496 + break; 497 + } 521 498 522 - if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) 523 - flags ^= C_FLAG; 499 + if (env->cc_op == CC_OP_SUB || env->cc_op == CC_OP_CMP) { 500 + flags ^= C_FLAG; 501 + } 524 502 525 - env->pregs[PR_CCS] = evaluate_flags_writeback(env, flags, 526 - env->pregs[PR_CCS]); 503 + env->pregs[PR_CCS] = evaluate_flags_writeback(env, flags, 504 + env->pregs[PR_CCS]); 527 505 } 528 506 529 507 void helper_top_evaluate_flags(CPUCRISState *env) 530 508 { 531 - switch (env->cc_op) 532 - { 533 - case CC_OP_MCP: 534 - env->pregs[PR_CCS] = helper_evaluate_flags_mcp(env, 535 - env->pregs[PR_CCS], env->cc_src, 536 - env->cc_dest, env->cc_result); 537 - break; 538 - case CC_OP_MULS: 539 - env->pregs[PR_CCS] = helper_evaluate_flags_muls(env, 540 - env->pregs[PR_CCS], env->cc_result, 541 - env->pregs[PR_MOF]); 542 - break; 543 - case CC_OP_MULU: 544 - env->pregs[PR_CCS] = helper_evaluate_flags_mulu(env, 545 - env->pregs[PR_CCS], env->cc_result, 546 - env->pregs[PR_MOF]); 547 - break; 548 - case CC_OP_MOVE: 549 - case CC_OP_AND: 550 - case CC_OP_OR: 551 - case CC_OP_XOR: 552 - case CC_OP_ASR: 553 - case CC_OP_LSR: 554 - case CC_OP_LSL: 555 - switch (env->cc_size) 556 - { 557 - case 4: 558 - env->pregs[PR_CCS] = 559 - helper_evaluate_flags_move_4(env, 560 - env->pregs[PR_CCS], 561 - env->cc_result); 562 - break; 563 - case 2: 564 - env->pregs[PR_CCS] = 565 - helper_evaluate_flags_move_2(env, 566 - env->pregs[PR_CCS], 567 - env->cc_result); 568 - break; 569 - default: 570 - helper_evaluate_flags(env); 571 - break; 572 - } 573 - break; 574 - case CC_OP_FLAGS: 575 - /* live. */ 576 - break; 577 - case CC_OP_SUB: 578 - case CC_OP_CMP: 579 - if (env->cc_size == 4) 580 - env->pregs[PR_CCS] = 581 - helper_evaluate_flags_sub_4(env, 582 - env->pregs[PR_CCS], 583 - env->cc_src, env->cc_dest, 584 - env->cc_result); 585 - else 586 - helper_evaluate_flags(env); 587 - break; 588 - default: 589 - { 590 - switch (env->cc_size) 591 - { 592 - case 4: 593 - env->pregs[PR_CCS] = 594 - helper_evaluate_flags_alu_4(env, 595 - env->pregs[PR_CCS], 596 - env->cc_src, env->cc_dest, 597 - env->cc_result); 598 - break; 599 - default: 600 - helper_evaluate_flags(env); 601 - break; 602 - } 603 - } 604 - break; 605 - } 509 + switch (env->cc_op) { 510 + case CC_OP_MCP: 511 + env->pregs[PR_CCS] 512 + = helper_evaluate_flags_mcp(env, env->pregs[PR_CCS], 513 + env->cc_src, env->cc_dest, 514 + env->cc_result); 515 + break; 516 + case CC_OP_MULS: 517 + env->pregs[PR_CCS] 518 + = helper_evaluate_flags_muls(env, env->pregs[PR_CCS], 519 + env->cc_result, env->pregs[PR_MOF]); 520 + break; 521 + case CC_OP_MULU: 522 + env->pregs[PR_CCS] 523 + = helper_evaluate_flags_mulu(env, env->pregs[PR_CCS], 524 + env->cc_result, env->pregs[PR_MOF]); 525 + break; 526 + case CC_OP_MOVE: 527 + case CC_OP_AND: 528 + case CC_OP_OR: 529 + case CC_OP_XOR: 530 + case CC_OP_ASR: 531 + case CC_OP_LSR: 532 + case CC_OP_LSL: 533 + switch (env->cc_size) { 534 + case 4: 535 + env->pregs[PR_CCS] = 536 + helper_evaluate_flags_move_4(env, 537 + env->pregs[PR_CCS], 538 + env->cc_result); 539 + break; 540 + case 2: 541 + env->pregs[PR_CCS] = 542 + helper_evaluate_flags_move_2(env, 543 + env->pregs[PR_CCS], 544 + env->cc_result); 545 + break; 546 + default: 547 + helper_evaluate_flags(env); 548 + break; 549 + } 550 + break; 551 + case CC_OP_FLAGS: 552 + /* live. */ 553 + break; 554 + case CC_OP_SUB: 555 + case CC_OP_CMP: 556 + if (env->cc_size == 4) { 557 + env->pregs[PR_CCS] = 558 + helper_evaluate_flags_sub_4(env, 559 + env->pregs[PR_CCS], 560 + env->cc_src, env->cc_dest, 561 + env->cc_result); 562 + } else { 563 + helper_evaluate_flags(env); 564 + } 565 + break; 566 + default: 567 + switch (env->cc_size) { 568 + case 4: 569 + env->pregs[PR_CCS] = 570 + helper_evaluate_flags_alu_4(env, 571 + env->pregs[PR_CCS], 572 + env->cc_src, env->cc_dest, 573 + env->cc_result); 574 + break; 575 + default: 576 + helper_evaluate_flags(env); 577 + break; 578 + } 579 + break; 580 + } 606 581 }
+1 -1
target/cris/translate.c
··· 3097 3097 * delayslot, like in real hw. 3098 3098 */ 3099 3099 pc_start = tb->pc & ~1; 3100 - dc->cpu = cris_env_get_cpu(env); 3100 + dc->cpu = env_archcpu(env); 3101 3101 dc->tb = tb; 3102 3102 3103 3103 dc->is_jmp = DISAS_NEXT;
+34
target/hppa/cpu-param.h
··· 1 + /* 2 + * PA-RISC cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef HPPA_CPU_PARAM_H 9 + #define HPPA_CPU_PARAM_H 1 10 + 11 + #ifdef TARGET_HPPA64 12 + # define TARGET_LONG_BITS 64 13 + # define TARGET_REGISTER_BITS 64 14 + # define TARGET_VIRT_ADDR_SPACE_BITS 64 15 + # define TARGET_PHYS_ADDR_SPACE_BITS 64 16 + #elif defined(CONFIG_USER_ONLY) 17 + # define TARGET_LONG_BITS 32 18 + # define TARGET_REGISTER_BITS 32 19 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 20 + # define TARGET_PHYS_ADDR_SPACE_BITS 32 21 + #else 22 + /* 23 + * In order to form the GVA from space:offset, 24 + * we need a 64-bit virtual address space. 25 + */ 26 + # define TARGET_LONG_BITS 64 27 + # define TARGET_REGISTER_BITS 32 28 + # define TARGET_VIRT_ADDR_SPACE_BITS 64 29 + # define TARGET_PHYS_ADDR_SPACE_BITS 32 30 + #endif 31 + #define TARGET_PAGE_BITS 12 32 + #define NB_MMU_MODES 5 33 + 34 + #endif
+1 -1
target/hppa/cpu.c
··· 134 134 HPPACPU *cpu = HPPA_CPU(obj); 135 135 CPUHPPAState *env = &cpu->env; 136 136 137 - cs->env_ptr = env; 137 + cpu_set_cpustate_pointers(cpu); 138 138 cs->exception_index = -1; 139 139 cpu_hppa_loaded_fr0(env); 140 140 cpu_hppa_put_psw(env, PSW_W);
+4 -34
target/hppa/cpu.h
··· 22 22 23 23 #include "qemu-common.h" 24 24 #include "cpu-qom.h" 25 + #include "exec/cpu-defs.h" 25 26 26 - #ifdef TARGET_HPPA64 27 - #define TARGET_LONG_BITS 64 28 - #define TARGET_VIRT_ADDR_SPACE_BITS 64 29 - #define TARGET_REGISTER_BITS 64 30 - #define TARGET_PHYS_ADDR_SPACE_BITS 64 31 - #elif defined(CONFIG_USER_ONLY) 32 - #define TARGET_LONG_BITS 32 33 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 34 - #define TARGET_REGISTER_BITS 32 35 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 36 - #else 37 - /* In order to form the GVA from space:offset, 38 - we need a 64-bit virtual address space. */ 39 - #define TARGET_LONG_BITS 64 40 - #define TARGET_VIRT_ADDR_SPACE_BITS 64 41 - #define TARGET_REGISTER_BITS 32 42 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 43 - #endif 44 27 45 28 /* PA-RISC 1.x processors have a strong memory model. */ 46 29 /* ??? While we do not yet implement PA-RISC 2.0, those processors have ··· 48 31 basis. It's probably easier to fall back to a strong memory model. */ 49 32 #define TCG_GUEST_DEFAULT_MO TCG_MO_ALL 50 33 51 - #define CPUArchState struct CPUHPPAState 52 - 53 - #include "exec/cpu-defs.h" 54 - 55 - #define TARGET_PAGE_BITS 12 56 - 57 34 #define ALIGNED_ONLY 58 - #define NB_MMU_MODES 5 59 35 #define MMU_KERNEL_IDX 0 60 36 #define MMU_USER_IDX 3 61 37 #define MMU_PHYS_IDX 4 ··· 221 197 target_ureg cr_back[2]; /* back of cr17/cr18 */ 222 198 target_ureg shadow[7]; /* shadow registers */ 223 199 224 - /* Those resources are used only in QEMU core */ 225 - CPU_COMMON 226 - 227 200 /* ??? The number of entries isn't specified by the architecture. */ 228 201 /* ??? Implement a unified itlb/dtlb for the moment. */ 229 202 /* ??? We should use a more intelligent data structure. */ ··· 242 215 CPUState parent_obj; 243 216 /*< public >*/ 244 217 218 + CPUNegativeOffsetState neg; 245 219 CPUHPPAState env; 246 220 QEMUTimer *alarm_timer; 247 221 }; 248 222 249 - static inline HPPACPU *hppa_env_get_cpu(CPUHPPAState *env) 250 - { 251 - return container_of(env, HPPACPU, env); 252 - } 253 223 254 - #define ENV_GET_CPU(e) CPU(hppa_env_get_cpu(e)) 255 - #define ENV_OFFSET offsetof(HPPACPU, env) 224 + typedef CPUHPPAState CPUArchState; 225 + typedef HPPACPU ArchCPU; 256 226 257 227 #include "exec/cpu-all.h" 258 228
+1 -2
target/hppa/helper.c
··· 71 71 /* If PSW_P changes, it affects how we translate addresses. */ 72 72 if ((psw ^ old_psw) & PSW_P) { 73 73 #ifndef CONFIG_USER_ONLY 74 - CPUState *src = CPU(hppa_env_get_cpu(env)); 75 - tlb_flush_by_mmuidx(src, 0xf); 74 + tlb_flush_by_mmuidx(env_cpu(env), 0xf); 76 75 #endif 77 76 } 78 77 }
+2 -2
target/hppa/int_helper.c
··· 77 77 { 78 78 env->cr[CR_EIRR] &= ~val; 79 79 qemu_mutex_lock_iothread(); 80 - eval_interrupt(hppa_env_get_cpu(env)); 80 + eval_interrupt(env_archcpu(env)); 81 81 qemu_mutex_unlock_iothread(); 82 82 } 83 83 ··· 85 85 { 86 86 env->cr[CR_EIEM] = val; 87 87 qemu_mutex_lock_iothread(); 88 - eval_interrupt(hppa_env_get_cpu(env)); 88 + eval_interrupt(env_archcpu(env)); 89 89 qemu_mutex_unlock_iothread(); 90 90 } 91 91 #endif /* !CONFIG_USER_ONLY */
+4 -6
target/hppa/mem_helper.c
··· 56 56 57 57 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent) 58 58 { 59 - CPUState *cs = CPU(hppa_env_get_cpu(env)); 59 + CPUState *cs = env_cpu(env); 60 60 unsigned i, n = 1 << (2 * ent->page_size); 61 61 uint64_t addr = ent->va_b; 62 62 ··· 329 329 330 330 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) 331 331 { 332 - CPUState *src = CPU(hppa_env_get_cpu(env)); 332 + CPUState *src = env_cpu(env); 333 333 CPUState *cpu; 334 334 trace_hppa_tlb_ptlb(env); 335 335 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); ··· 346 346 number of pages/entries (we choose all), and is local to the cpu. */ 347 347 void HELPER(ptlbe)(CPUHPPAState *env) 348 348 { 349 - CPUState *src = CPU(hppa_env_get_cpu(env)); 350 349 trace_hppa_tlb_ptlbe(env); 351 350 memset(env->tlb, 0, sizeof(env->tlb)); 352 - tlb_flush_by_mmuidx(src, 0xf); 351 + tlb_flush_by_mmuidx(env_cpu(env), 0xf); 353 352 } 354 353 355 354 void cpu_hppa_change_prot_id(CPUHPPAState *env) 356 355 { 357 356 if (env->psw & PSW_P) { 358 - CPUState *src = CPU(hppa_env_get_cpu(env)); 359 - tlb_flush_by_mmuidx(src, 0xf); 357 + tlb_flush_by_mmuidx(env_cpu(env), 0xf); 360 358 } 361 359 } 362 360
+4 -6
target/hppa/op_helper.c
··· 29 29 30 30 void QEMU_NORETURN HELPER(excp)(CPUHPPAState *env, int excp) 31 31 { 32 - HPPACPU *cpu = hppa_env_get_cpu(env); 33 - CPUState *cs = CPU(cpu); 32 + CPUState *cs = env_cpu(env); 34 33 35 34 cs->exception_index = excp; 36 35 cpu_loop_exit(cs); ··· 38 37 39 38 void QEMU_NORETURN hppa_dynamic_excp(CPUHPPAState *env, int excp, uintptr_t ra) 40 39 { 41 - HPPACPU *cpu = hppa_env_get_cpu(env); 42 - CPUState *cs = CPU(cpu); 40 + CPUState *cs = env_cpu(env); 43 41 44 42 cs->exception_index = excp; 45 43 cpu_loop_exit_restore(cs, ra); ··· 77 75 } 78 76 #else 79 77 /* FIXME -- we can do better. */ 80 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); 78 + cpu_loop_exit_atomic(env_cpu(env), ra); 81 79 #endif 82 80 } 83 81 ··· 630 628 #ifndef CONFIG_USER_ONLY 631 629 void HELPER(write_interval_timer)(CPUHPPAState *env, target_ureg val) 632 630 { 633 - HPPACPU *cpu = hppa_env_get_cpu(env); 631 + HPPACPU *cpu = env_archcpu(env); 634 632 uint64_t current = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); 635 633 uint64_t timeout; 636 634
+2 -2
target/i386/bpt_helper.c
··· 53 53 54 54 static int hw_breakpoint_insert(CPUX86State *env, int index) 55 55 { 56 - CPUState *cs = CPU(x86_env_get_cpu(env)); 56 + CPUState *cs = env_cpu(env); 57 57 target_ulong dr7 = env->dr[7]; 58 58 target_ulong drN = env->dr[index]; 59 59 int err = 0; ··· 97 97 98 98 static void hw_breakpoint_remove(CPUX86State *env, int index) 99 99 { 100 - CPUState *cs = CPU(x86_env_get_cpu(env)); 100 + CPUState *cs = env_cpu(env); 101 101 102 102 switch (hw_breakpoint_type(env->dr[7], index)) { 103 103 case DR7_TYPE_BP_INST:
+28
target/i386/cpu-param.h
··· 1 + /* 2 + * i386 cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2003 Fabrice Bellard 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef I386_CPU_PARAM_H 9 + #define I386_CPU_PARAM_H 1 10 + 11 + #ifdef TARGET_X86_64 12 + # define TARGET_LONG_BITS 64 13 + # define TARGET_PHYS_ADDR_SPACE_BITS 52 14 + /* 15 + * ??? This is really 48 bits, sign-extended, but the only thing 16 + * accessible to userland with bit 48 set is the VSYSCALL, and that 17 + * is handled via other mechanisms. 18 + */ 19 + # define TARGET_VIRT_ADDR_SPACE_BITS 47 20 + #else 21 + # define TARGET_LONG_BITS 32 22 + # define TARGET_PHYS_ADDR_SPACE_BITS 36 23 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 24 + #endif 25 + #define TARGET_PAGE_BITS 12 26 + #define NB_MMU_MODES 3 27 + 28 + #endif
+3 -4
target/i386/cpu.c
··· 4222 4222 uint32_t *eax, uint32_t *ebx, 4223 4223 uint32_t *ecx, uint32_t *edx) 4224 4224 { 4225 - X86CPU *cpu = x86_env_get_cpu(env); 4226 - CPUState *cs = CPU(cpu); 4225 + X86CPU *cpu = env_archcpu(env); 4226 + CPUState *cs = env_cpu(env); 4227 4227 uint32_t pkg_offset; 4228 4228 uint32_t limit; 4229 4229 uint32_t signature[3]; ··· 5592 5592 5593 5593 static void x86_cpu_initfn(Object *obj) 5594 5594 { 5595 - CPUState *cs = CPU(obj); 5596 5595 X86CPU *cpu = X86_CPU(obj); 5597 5596 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 5598 5597 CPUX86State *env = &cpu->env; 5599 5598 FeatureWord w; 5600 5599 5601 - cs->env_ptr = env; 5600 + cpu_set_cpustate_pointers(cpu); 5602 5601 5603 5602 object_property_add(obj, "family", "int", 5604 5603 x86_cpuid_version_get_family,
+5 -35
target/i386/cpu.h
··· 1 - 2 1 /* 3 2 * i386 virtual CPU header 4 3 * ··· 24 23 #include "qemu-common.h" 25 24 #include "cpu-qom.h" 26 25 #include "hyperv-proto.h" 27 - 28 - #ifdef TARGET_X86_64 29 - #define TARGET_LONG_BITS 64 30 - #else 31 - #define TARGET_LONG_BITS 32 32 - #endif 33 - 34 26 #include "exec/cpu-defs.h" 35 27 36 28 /* The x86 has a strong memory model with some store-after-load re-ordering */ ··· 50 42 #define I386_ELF_MACHINE EM_386 51 43 #define ELF_MACHINE_UNAME "i686" 52 44 #endif 53 - 54 - #define CPUArchState struct CPUX86State 55 45 56 46 enum { 57 47 R_EAX = 0, ··· 956 946 #define MAX_FIXED_COUNTERS 3 957 947 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) 958 948 959 - #define NB_MMU_MODES 3 960 949 #define TARGET_INSN_START_EXTRA_WORDS 1 961 950 962 951 #define NB_OPMASK_REGS 8 ··· 1300 1289 /* Fields up to this point are cleared by a CPU reset */ 1301 1290 struct {} end_reset_fields; 1302 1291 1303 - CPU_COMMON 1304 - 1305 - /* Fields after CPU_COMMON are preserved across CPU reset. */ 1292 + /* Fields after this point are preserved across CPU reset. */ 1306 1293 1307 1294 /* processor features (e.g. for CPUID insn) */ 1308 1295 /* Minimum level/xlevel/xlevel2, based on CPU model + features */ ··· 1380 1367 CPUState parent_obj; 1381 1368 /*< public >*/ 1382 1369 1370 + CPUNegativeOffsetState neg; 1383 1371 CPUX86State env; 1384 1372 1385 1373 bool hyperv_vapic; ··· 1491 1479 int32_t hv_max_vps; 1492 1480 }; 1493 1481 1494 - static inline X86CPU *x86_env_get_cpu(CPUX86State *env) 1495 - { 1496 - return container_of(env, X86CPU, env); 1497 - } 1498 - 1499 - #define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e)) 1500 - 1501 - #define ENV_OFFSET offsetof(X86CPU, env) 1502 1482 1503 1483 #ifndef CONFIG_USER_ONLY 1504 1484 extern struct VMStateDescription vmstate_x86_cpu; ··· 1695 1675 /* hw/pc.c */ 1696 1676 uint64_t cpu_get_tsc(CPUX86State *env); 1697 1677 1698 - #define TARGET_PAGE_BITS 12 1699 - 1700 - #ifdef TARGET_X86_64 1701 - #define TARGET_PHYS_ADDR_SPACE_BITS 52 1702 - /* ??? This is really 48 bits, sign-extended, but the only thing 1703 - accessible to userland with bit 48 set is the VSYSCALL, and that 1704 - is handled via other mechanisms. */ 1705 - #define TARGET_VIRT_ADDR_SPACE_BITS 47 1706 - #else 1707 - #define TARGET_PHYS_ADDR_SPACE_BITS 36 1708 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 1709 - #endif 1710 - 1711 1678 /* XXX: This value should match the one returned by CPUID 1712 1679 * and in exec.c */ 1713 1680 # if defined(TARGET_X86_64) ··· 1775 1742 1776 1743 /* translate.c */ 1777 1744 void tcg_x86_init(void); 1745 + 1746 + typedef CPUX86State CPUArchState; 1747 + typedef X86CPU ArchCPU; 1778 1748 1779 1749 #include "exec/cpu-all.h" 1780 1750 #include "svm.h"
+1 -1
target/i386/excp_helper.c
··· 90 90 int next_eip_addend, 91 91 uintptr_t retaddr) 92 92 { 93 - CPUState *cs = CPU(x86_env_get_cpu(env)); 93 + CPUState *cs = env_cpu(env); 94 94 95 95 if (!is_int) { 96 96 cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
+1 -1
target/i386/fpu_helper.c
··· 1477 1477 env->pkru = 0; 1478 1478 } 1479 1479 if (env->pkru != old_pkru) { 1480 - CPUState *cs = CPU(x86_env_get_cpu(env)); 1480 + CPUState *cs = env_cpu(env); 1481 1481 tlb_flush(cs); 1482 1482 } 1483 1483 }
+3 -3
target/i386/hax-all.c
··· 67 67 68 68 hax_fd hax_vcpu_get_fd(CPUArchState *env) 69 69 { 70 - struct hax_vcpu_state *vcpu = ENV_GET_CPU(env)->hax_vcpu; 70 + struct hax_vcpu_state *vcpu = env_cpu(env)->hax_vcpu; 71 71 if (!vcpu) { 72 72 return HAX_INVALID_FD; 73 73 } ··· 409 409 410 410 static int hax_vcpu_interrupt(CPUArchState *env) 411 411 { 412 - CPUState *cpu = ENV_GET_CPU(env); 412 + CPUState *cpu = env_cpu(env); 413 413 struct hax_vcpu_state *vcpu = cpu->hax_vcpu; 414 414 struct hax_tunnel *ht = vcpu->tunnel; 415 415 ··· 461 461 static int hax_vcpu_hax_exec(CPUArchState *env) 462 462 { 463 463 int ret = 0; 464 - CPUState *cpu = ENV_GET_CPU(env); 464 + CPUState *cpu = env_cpu(env); 465 465 X86CPU *x86_cpu = X86_CPU(cpu); 466 466 struct hax_vcpu_state *vcpu = cpu->hax_vcpu; 467 467 struct hax_tunnel *ht = vcpu->tunnel;
+6 -10
target/i386/helper.c
··· 622 622 623 623 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) 624 624 { 625 - X86CPU *cpu = x86_env_get_cpu(env); 625 + X86CPU *cpu = env_archcpu(env); 626 626 int pe_state; 627 627 628 628 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0); ··· 664 664 the PDPT */ 665 665 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3) 666 666 { 667 - X86CPU *cpu = x86_env_get_cpu(env); 668 - 669 667 env->cr[3] = new_cr3; 670 668 if (env->cr[0] & CR0_PG_MASK) { 671 669 qemu_log_mask(CPU_LOG_MMU, 672 670 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3); 673 - tlb_flush(CPU(cpu)); 671 + tlb_flush(env_cpu(env)); 674 672 } 675 673 } 676 674 677 675 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) 678 676 { 679 - X86CPU *cpu = x86_env_get_cpu(env); 680 677 uint32_t hflags; 681 678 682 679 #if defined(DEBUG_MMU) ··· 685 682 if ((new_cr4 ^ env->cr[4]) & 686 683 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK | 687 684 CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) { 688 - tlb_flush(CPU(cpu)); 685 + tlb_flush(env_cpu(env)); 689 686 } 690 687 691 688 /* Clear bits we're going to recompute. */ ··· 977 974 978 975 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access) 979 976 { 980 - X86CPU *cpu = x86_env_get_cpu(env); 981 - CPUState *cs = CPU(cpu); 977 + X86CPU *cpu = env_archcpu(env); 978 + CPUState *cs = env_cpu(env); 982 979 983 980 if (kvm_enabled() || whpx_enabled()) { 984 981 env->tpr_access_type = access; ··· 996 993 target_ulong *base, unsigned int *limit, 997 994 unsigned int *flags) 998 995 { 999 - X86CPU *cpu = x86_env_get_cpu(env); 1000 - CPUState *cs = CPU(cpu); 996 + CPUState *cs = env_cpu(env); 1001 997 SegmentCache *dt; 1002 998 target_ulong ptr; 1003 999 uint32_t e1, e2;
+11 -11
target/i386/hvf/x86_decode.c
··· 75 75 VM_PANIC_EX("%s invalid size %d\n", __func__, size); 76 76 break; 77 77 } 78 - target_ulong va = linear_rip(ENV_GET_CPU(env), RIP(env)) + decode->len; 79 - vmx_read_mem(ENV_GET_CPU(env), &val, va, size); 78 + target_ulong va = linear_rip(env_cpu(env), RIP(env)) + decode->len; 79 + vmx_read_mem(env_cpu(env), &val, va, size); 80 80 decode->len += size; 81 81 82 82 return val; ··· 1772 1772 if (4 == decode->modrm.rm) { 1773 1773 ptr += get_sib_val(env, decode, &seg); 1774 1774 } else if (!decode->modrm.mod && 5 == decode->modrm.rm) { 1775 - if (x86_is_long_mode(ENV_GET_CPU(env))) { 1775 + if (x86_is_long_mode(env_cpu(env))) { 1776 1776 ptr += RIP(env) + decode->len; 1777 1777 } else { 1778 1778 ptr = decode->displacement; ··· 1877 1877 decode->addr_size_override = byte; 1878 1878 break; 1879 1879 case PREFIX_REX ... (PREFIX_REX + 0xf): 1880 - if (x86_is_long_mode(ENV_GET_CPU(env))) { 1880 + if (x86_is_long_mode(env_cpu(env))) { 1881 1881 decode->rex.rex = byte; 1882 1882 break; 1883 1883 } ··· 1892 1892 void set_addressing_size(CPUX86State *env, struct x86_decode *decode) 1893 1893 { 1894 1894 decode->addressing_size = -1; 1895 - if (x86_is_real(ENV_GET_CPU(env)) || x86_is_v8086(ENV_GET_CPU(env))) { 1895 + if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) { 1896 1896 if (decode->addr_size_override) { 1897 1897 decode->addressing_size = 4; 1898 1898 } else { 1899 1899 decode->addressing_size = 2; 1900 1900 } 1901 - } else if (!x86_is_long_mode(ENV_GET_CPU(env))) { 1901 + } else if (!x86_is_long_mode(env_cpu(env))) { 1902 1902 /* protected */ 1903 1903 struct vmx_segment cs; 1904 - vmx_read_segment_descriptor(ENV_GET_CPU(env), &cs, R_CS); 1904 + vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS); 1905 1905 /* check db */ 1906 1906 if ((cs.ar >> 14) & 1) { 1907 1907 if (decode->addr_size_override) { ··· 1929 1929 void set_operand_size(CPUX86State *env, struct x86_decode *decode) 1930 1930 { 1931 1931 decode->operand_size = -1; 1932 - if (x86_is_real(ENV_GET_CPU(env)) || x86_is_v8086(ENV_GET_CPU(env))) { 1932 + if (x86_is_real(env_cpu(env)) || x86_is_v8086(env_cpu(env))) { 1933 1933 if (decode->op_size_override) { 1934 1934 decode->operand_size = 4; 1935 1935 } else { 1936 1936 decode->operand_size = 2; 1937 1937 } 1938 - } else if (!x86_is_long_mode(ENV_GET_CPU(env))) { 1938 + } else if (!x86_is_long_mode(env_cpu(env))) { 1939 1939 /* protected */ 1940 1940 struct vmx_segment cs; 1941 - vmx_read_segment_descriptor(ENV_GET_CPU(env), &cs, R_CS); 1941 + vmx_read_segment_descriptor(env_cpu(env), &cs, R_CS); 1942 1942 /* check db */ 1943 1943 if ((cs.ar >> 14) & 1) { 1944 1944 if (decode->op_size_override) { ··· 2188 2188 default: 2189 2189 break; 2190 2190 } 2191 - return linear_addr_size(ENV_GET_CPU(env), addr, decode->addressing_size, seg); 2191 + return linear_addr_size(env_cpu(env), addr, decode->addressing_size, seg); 2192 2192 }
+33 -27
target/i386/hvf/x86_emu.c
··· 182 182 write_val_to_reg(ptr, val, size); 183 183 return; 184 184 } 185 - vmx_write_mem(ENV_GET_CPU(env), ptr, &val, size); 185 + vmx_write_mem(env_cpu(env), ptr, &val, size); 186 186 } 187 187 188 188 uint8_t *read_mmio(struct CPUX86State *env, target_ulong ptr, int bytes) 189 189 { 190 - vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, ptr, bytes); 190 + vmx_read_mem(env_cpu(env), env->hvf_emul->mmio_buf, ptr, bytes); 191 191 return env->hvf_emul->mmio_buf; 192 192 } 193 193 ··· 399 399 { 400 400 switch (decode->opcode[0]) { 401 401 case 0xe6: 402 - hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 1, 1, 1); 402 + hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 1, 1, 1); 403 403 break; 404 404 case 0xe7: 405 - hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &RAX(env), 1, 405 + hvf_handle_io(env_cpu(env), decode->op[0].val, &RAX(env), 1, 406 406 decode->operand_size, 1); 407 407 break; 408 408 case 0xee: 409 - hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 1, 1, 1); 409 + hvf_handle_io(env_cpu(env), DX(env), &AL(env), 1, 1, 1); 410 410 break; 411 411 case 0xef: 412 - hvf_handle_io(ENV_GET_CPU(env), DX(env), &RAX(env), 1, decode->operand_size, 1); 412 + hvf_handle_io(env_cpu(env), DX(env), &RAX(env), 1, 413 + decode->operand_size, 1); 413 414 break; 414 415 default: 415 416 VM_PANIC("Bad out opcode\n"); ··· 423 424 target_ulong val = 0; 424 425 switch (decode->opcode[0]) { 425 426 case 0xe4: 426 - hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &AL(env), 0, 1, 1); 427 + hvf_handle_io(env_cpu(env), decode->op[0].val, &AL(env), 0, 1, 1); 427 428 break; 428 429 case 0xe5: 429 - hvf_handle_io(ENV_GET_CPU(env), decode->op[0].val, &val, 0, decode->operand_size, 1); 430 + hvf_handle_io(env_cpu(env), decode->op[0].val, &val, 0, 431 + decode->operand_size, 1); 430 432 if (decode->operand_size == 2) { 431 433 AX(env) = val; 432 434 } else { ··· 434 436 } 435 437 break; 436 438 case 0xec: 437 - hvf_handle_io(ENV_GET_CPU(env), DX(env), &AL(env), 0, 1, 1); 439 + hvf_handle_io(env_cpu(env), DX(env), &AL(env), 0, 1, 1); 438 440 break; 439 441 case 0xed: 440 - hvf_handle_io(ENV_GET_CPU(env), DX(env), &val, 0, decode->operand_size, 1); 442 + hvf_handle_io(env_cpu(env), DX(env), &val, 0, decode->operand_size, 1); 441 443 if (decode->operand_size == 2) { 442 444 AX(env) = val; 443 445 } else { ··· 484 486 485 487 static void exec_ins_single(struct CPUX86State *env, struct x86_decode *decode) 486 488 { 487 - target_ulong addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, 488 - R_ES); 489 + target_ulong addr = linear_addr_size(env_cpu(env), RDI(env), 490 + decode->addressing_size, R_ES); 489 491 490 - hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 0, 492 + hvf_handle_io(env_cpu(env), DX(env), env->hvf_emul->mmio_buf, 0, 491 493 decode->operand_size, 1); 492 - vmx_write_mem(ENV_GET_CPU(env), addr, env->hvf_emul->mmio_buf, decode->operand_size); 494 + vmx_write_mem(env_cpu(env), addr, env->hvf_emul->mmio_buf, 495 + decode->operand_size); 493 496 494 497 string_increment_reg(env, R_EDI, decode); 495 498 } ··· 509 512 { 510 513 target_ulong addr = decode_linear_addr(env, decode, RSI(env), R_DS); 511 514 512 - vmx_read_mem(ENV_GET_CPU(env), env->hvf_emul->mmio_buf, addr, decode->operand_size); 513 - hvf_handle_io(ENV_GET_CPU(env), DX(env), env->hvf_emul->mmio_buf, 1, 515 + vmx_read_mem(env_cpu(env), env->hvf_emul->mmio_buf, addr, 516 + decode->operand_size); 517 + hvf_handle_io(env_cpu(env), DX(env), env->hvf_emul->mmio_buf, 1, 514 518 decode->operand_size, 1); 515 519 516 520 string_increment_reg(env, R_ESI, decode); ··· 534 538 target_ulong val; 535 539 536 540 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); 537 - dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, 538 - R_ES); 541 + dst_addr = linear_addr_size(env_cpu(env), RDI(env), 542 + decode->addressing_size, R_ES); 539 543 540 544 val = read_val_ext(env, src_addr, decode->operand_size); 541 545 write_val_ext(env, dst_addr, val, decode->operand_size); ··· 561 565 target_ulong dst_addr; 562 566 563 567 src_addr = decode_linear_addr(env, decode, RSI(env), R_DS); 564 - dst_addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, 565 - R_ES); 568 + dst_addr = linear_addr_size(env_cpu(env), RDI(env), 569 + decode->addressing_size, R_ES); 566 570 567 571 decode->op[0].type = X86_VAR_IMMEDIATE; 568 572 decode->op[0].val = read_val_ext(env, src_addr, decode->operand_size); ··· 591 595 target_ulong addr; 592 596 target_ulong val; 593 597 594 - addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, R_ES); 598 + addr = linear_addr_size(env_cpu(env), RDI(env), 599 + decode->addressing_size, R_ES); 595 600 val = read_reg(env, R_EAX, decode->operand_size); 596 - vmx_write_mem(ENV_GET_CPU(env), addr, &val, decode->operand_size); 601 + vmx_write_mem(env_cpu(env), addr, &val, decode->operand_size); 597 602 598 603 string_increment_reg(env, R_EDI, decode); 599 604 } ··· 614 619 { 615 620 target_ulong addr; 616 621 617 - addr = linear_addr_size(ENV_GET_CPU(env), RDI(env), decode->addressing_size, R_ES); 622 + addr = linear_addr_size(env_cpu(env), RDI(env), 623 + decode->addressing_size, R_ES); 618 624 decode->op[1].type = X86_VAR_IMMEDIATE; 619 - vmx_read_mem(ENV_GET_CPU(env), &decode->op[1].val, addr, decode->operand_size); 625 + vmx_read_mem(env_cpu(env), &decode->op[1].val, addr, decode->operand_size); 620 626 621 627 EXEC_2OP_FLAGS_CMD(env, decode, -, SET_FLAGS_OSZAPC_SUB, false); 622 628 string_increment_reg(env, R_EDI, decode); ··· 641 647 target_ulong val = 0; 642 648 643 649 addr = decode_linear_addr(env, decode, RSI(env), R_DS); 644 - vmx_read_mem(ENV_GET_CPU(env), &val, addr, decode->operand_size); 650 + vmx_read_mem(env_cpu(env), &val, addr, decode->operand_size); 645 651 write_reg(env, R_EAX, val, decode->operand_size); 646 652 647 653 string_increment_reg(env, R_ESI, decode); ··· 753 759 754 760 static void exec_rdmsr(struct CPUX86State *env, struct x86_decode *decode) 755 761 { 756 - simulate_rdmsr(ENV_GET_CPU(env)); 762 + simulate_rdmsr(env_cpu(env)); 757 763 RIP(env) += decode->len; 758 764 } 759 765 ··· 851 857 852 858 static void exec_wrmsr(struct CPUX86State *env, struct x86_decode *decode) 853 859 { 854 - simulate_wrmsr(ENV_GET_CPU(env)); 860 + simulate_wrmsr(env_cpu(env)); 855 861 RIP(env) += decode->len; 856 862 } 857 863
+2 -2
target/i386/mem_helper.c
··· 89 89 } 90 90 CC_SRC = eflags; 91 91 #else 92 - cpu_loop_exit_atomic(ENV_GET_CPU(env), GETPC()); 92 + cpu_loop_exit_atomic(env_cpu(env), GETPC()); 93 93 #endif /* CONFIG_ATOMIC64 */ 94 94 } 95 95 ··· 158 158 } 159 159 CC_SRC = eflags; 160 160 } else { 161 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); 161 + cpu_loop_exit_atomic(env_cpu(env), ra); 162 162 } 163 163 } 164 164 #endif
+11 -13
target/i386/misc_helper.c
··· 133 133 break; 134 134 case 8: 135 135 if (!(env->hflags2 & HF2_VINTR_MASK)) { 136 - val = cpu_get_apic_tpr(x86_env_get_cpu(env)->apic_state); 136 + val = cpu_get_apic_tpr(env_archcpu(env)->apic_state); 137 137 } else { 138 138 val = env->v_tpr; 139 139 } ··· 158 158 case 8: 159 159 if (!(env->hflags2 & HF2_VINTR_MASK)) { 160 160 qemu_mutex_lock_iothread(); 161 - cpu_set_apic_tpr(x86_env_get_cpu(env)->apic_state, t0); 161 + cpu_set_apic_tpr(env_archcpu(env)->apic_state, t0); 162 162 qemu_mutex_unlock_iothread(); 163 163 } 164 164 env->v_tpr = t0 & 0x0f; ··· 180 180 181 181 void helper_invlpg(CPUX86State *env, target_ulong addr) 182 182 { 183 - X86CPU *cpu = x86_env_get_cpu(env); 183 + X86CPU *cpu = env_archcpu(env); 184 184 185 185 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0, GETPC()); 186 186 tlb_flush_page(CPU(cpu), addr); ··· 247 247 env->sysenter_eip = val; 248 248 break; 249 249 case MSR_IA32_APICBASE: 250 - cpu_set_apic_base(x86_env_get_cpu(env)->apic_state, val); 250 + cpu_set_apic_base(env_archcpu(env)->apic_state, val); 251 251 break; 252 252 case MSR_EFER: 253 253 { ··· 404 404 val = env->sysenter_eip; 405 405 break; 406 406 case MSR_IA32_APICBASE: 407 - val = cpu_get_apic_base(x86_env_get_cpu(env)->apic_state); 407 + val = cpu_get_apic_base(env_archcpu(env)->apic_state); 408 408 break; 409 409 case MSR_EFER: 410 410 val = env->efer; ··· 561 561 562 562 void helper_hlt(CPUX86State *env, int next_eip_addend) 563 563 { 564 - X86CPU *cpu = x86_env_get_cpu(env); 564 + X86CPU *cpu = env_archcpu(env); 565 565 566 566 cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0, GETPC()); 567 567 env->eip += next_eip_addend; ··· 580 580 581 581 void helper_mwait(CPUX86State *env, int next_eip_addend) 582 582 { 583 - CPUState *cs; 584 - X86CPU *cpu; 583 + CPUState *cs = env_cpu(env); 584 + X86CPU *cpu = env_archcpu(env); 585 585 586 586 if ((uint32_t)env->regs[R_ECX] != 0) { 587 587 raise_exception_ra(env, EXCP0D_GPF, GETPC()); ··· 589 589 cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0, GETPC()); 590 590 env->eip += next_eip_addend; 591 591 592 - cpu = x86_env_get_cpu(env); 593 - cs = CPU(cpu); 594 592 /* XXX: not complete but not completely erroneous */ 595 593 if (cs->cpu_index != 0 || CPU_NEXT(cs) != NULL) { 596 594 do_pause(cpu); ··· 601 599 602 600 void helper_pause(CPUX86State *env, int next_eip_addend) 603 601 { 604 - X86CPU *cpu = x86_env_get_cpu(env); 602 + X86CPU *cpu = env_archcpu(env); 605 603 606 604 cpu_svm_check_intercept_param(env, SVM_EXIT_PAUSE, 0, GETPC()); 607 605 env->eip += next_eip_addend; ··· 611 609 612 610 void helper_debug(CPUX86State *env) 613 611 { 614 - CPUState *cs = CPU(x86_env_get_cpu(env)); 612 + CPUState *cs = env_cpu(env); 615 613 616 614 cs->exception_index = EXCP_DEBUG; 617 615 cpu_loop_exit(cs); ··· 631 629 632 630 void helper_wrpkru(CPUX86State *env, uint32_t ecx, uint64_t val) 633 631 { 634 - CPUState *cs = CPU(x86_env_get_cpu(env)); 632 + CPUState *cs = env_cpu(env); 635 633 636 634 if ((env->cr[4] & CR4_PKE_MASK) == 0) { 637 635 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
+7 -7
target/i386/seg_helper.c
··· 137 137 uint32_t *esp_ptr, int dpl, 138 138 uintptr_t retaddr) 139 139 { 140 - X86CPU *cpu = x86_env_get_cpu(env); 140 + X86CPU *cpu = env_archcpu(env); 141 141 int type, index, shift; 142 142 143 143 #if 0 ··· 830 830 831 831 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level) 832 832 { 833 - X86CPU *cpu = x86_env_get_cpu(env); 833 + X86CPU *cpu = env_archcpu(env); 834 834 int index; 835 835 836 836 #if 0 ··· 972 972 #if defined(CONFIG_USER_ONLY) 973 973 void helper_syscall(CPUX86State *env, int next_eip_addend) 974 974 { 975 - CPUState *cs = CPU(x86_env_get_cpu(env)); 975 + CPUState *cs = env_cpu(env); 976 976 977 977 cs->exception_index = EXCP_SYSCALL; 978 978 env->exception_next_eip = env->eip + next_eip_addend; ··· 1172 1172 static void handle_even_inj(CPUX86State *env, int intno, int is_int, 1173 1173 int error_code, int is_hw, int rm) 1174 1174 { 1175 - CPUState *cs = CPU(x86_env_get_cpu(env)); 1175 + CPUState *cs = env_cpu(env); 1176 1176 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, 1177 1177 control.event_inj)); 1178 1178 ··· 1312 1312 1313 1313 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw) 1314 1314 { 1315 - do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw); 1315 + do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw); 1316 1316 } 1317 1317 1318 1318 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) ··· 1763 1763 target_ulong ssp, old_ssp, offset, sp; 1764 1764 1765 1765 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift); 1766 - LOG_PCALL_STATE(CPU(x86_env_get_cpu(env))); 1766 + LOG_PCALL_STATE(env_cpu(env)); 1767 1767 if ((new_cs & 0xfffc) == 0) { 1768 1768 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); 1769 1769 } ··· 2167 2167 } 2168 2168 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n", 2169 2169 new_cs, new_eip, shift, addend); 2170 - LOG_PCALL_STATE(CPU(x86_env_get_cpu(env))); 2170 + LOG_PCALL_STATE(env_cpu(env)); 2171 2171 if ((new_cs & 0xfffc) == 0) { 2172 2172 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr); 2173 2173 }
+2 -2
target/i386/smm_helper.c
··· 204 204 205 205 void helper_rsm(CPUX86State *env) 206 206 { 207 - X86CPU *cpu = x86_env_get_cpu(env); 208 - CPUState *cs = CPU(cpu); 207 + X86CPU *cpu = env_archcpu(env); 208 + CPUState *cs = env_cpu(env); 209 209 target_ulong sm_state; 210 210 int i, offset; 211 211 uint32_t val;
+11 -11
target/i386/svm_helper.c
··· 84 84 static inline void svm_save_seg(CPUX86State *env, hwaddr addr, 85 85 const SegmentCache *sc) 86 86 { 87 - CPUState *cs = CPU(x86_env_get_cpu(env)); 87 + CPUState *cs = env_cpu(env); 88 88 89 89 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector), 90 90 sc->selector); ··· 99 99 static inline void svm_load_seg(CPUX86State *env, hwaddr addr, 100 100 SegmentCache *sc) 101 101 { 102 - CPUState *cs = CPU(x86_env_get_cpu(env)); 102 + CPUState *cs = env_cpu(env); 103 103 unsigned int flags; 104 104 105 105 sc->selector = x86_lduw_phys(cs, ··· 122 122 123 123 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) 124 124 { 125 - CPUState *cs = CPU(x86_env_get_cpu(env)); 125 + CPUState *cs = env_cpu(env); 126 126 target_ulong addr; 127 127 uint64_t nested_ctl; 128 128 uint32_t event_inj; ··· 314 314 env->hflags2 |= HF2_GIF_MASK; 315 315 316 316 if (int_ctl & V_IRQ_MASK) { 317 - CPUState *cs = CPU(x86_env_get_cpu(env)); 317 + CPUState *cs = env_cpu(env); 318 318 319 319 cs->interrupt_request |= CPU_INTERRUPT_VIRQ; 320 320 } ··· 379 379 380 380 void helper_vmload(CPUX86State *env, int aflag) 381 381 { 382 - CPUState *cs = CPU(x86_env_get_cpu(env)); 382 + CPUState *cs = env_cpu(env); 383 383 target_ulong addr; 384 384 385 385 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC()); ··· 419 419 420 420 void helper_vmsave(CPUX86State *env, int aflag) 421 421 { 422 - CPUState *cs = CPU(x86_env_get_cpu(env)); 422 + CPUState *cs = env_cpu(env); 423 423 target_ulong addr; 424 424 425 425 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC()); ··· 482 482 483 483 void helper_invlpga(CPUX86State *env, int aflag) 484 484 { 485 - X86CPU *cpu = x86_env_get_cpu(env); 485 + X86CPU *cpu = env_archcpu(env); 486 486 target_ulong addr; 487 487 488 488 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC()); ··· 501 501 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, 502 502 uint64_t param, uintptr_t retaddr) 503 503 { 504 - CPUState *cs = CPU(x86_env_get_cpu(env)); 504 + CPUState *cs = env_cpu(env); 505 505 506 506 if (likely(!(env->hflags & HF_GUEST_MASK))) { 507 507 return; ··· 583 583 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, 584 584 uint32_t next_eip_addend) 585 585 { 586 - CPUState *cs = CPU(x86_env_get_cpu(env)); 586 + CPUState *cs = env_cpu(env); 587 587 588 588 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { 589 589 /* FIXME: this should be read in at vmrun (faster this way?) */ ··· 604 604 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1, 605 605 uintptr_t retaddr) 606 606 { 607 - CPUState *cs = CPU(x86_env_get_cpu(env)); 607 + CPUState *cs = env_cpu(env); 608 608 609 609 cpu_restore_state(cs, retaddr, true); 610 610 ··· 625 625 626 626 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) 627 627 { 628 - CPUState *cs = CPU(x86_env_get_cpu(env)); 628 + CPUState *cs = env_cpu(env); 629 629 uint32_t int_ctl; 630 630 631 631 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
+17
target/lm32/cpu-param.h
··· 1 + /* 2 + * LatticeMico32 cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2010 Michael Walle <michael@walle.cc> 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef LM32_CPU_PARAM_H 9 + #define LM32_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + #define TARGET_PAGE_BITS 12 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 14 + #define TARGET_VIRT_ADDR_SPACE_BITS 32 15 + #define NB_MMU_MODES 1 16 + 17 + #endif
+1 -2
target/lm32/cpu.c
··· 142 142 143 143 static void lm32_cpu_initfn(Object *obj) 144 144 { 145 - CPUState *cs = CPU(obj); 146 145 LM32CPU *cpu = LM32_CPU(obj); 147 146 CPULM32State *env = &cpu->env; 148 147 149 - cs->env_ptr = env; 148 + cpu_set_cpustate_pointers(cpu); 150 149 151 150 env->flags = 0; 152 151 }
+5 -20
target/lm32/cpu.h
··· 20 20 #ifndef LM32_CPU_H 21 21 #define LM32_CPU_H 22 22 23 - #define TARGET_LONG_BITS 32 24 - 25 - #define CPUArchState struct CPULM32State 26 - 27 23 #include "qemu-common.h" 28 24 #include "cpu-qom.h" 29 25 #include "exec/cpu-defs.h" 30 - struct CPULM32State; 26 + 31 27 typedef struct CPULM32State CPULM32State; 32 28 33 - #define NB_MMU_MODES 1 34 - #define TARGET_PAGE_BITS 12 35 29 static inline int cpu_mmu_index(CPULM32State *env, bool ifetch) 36 30 { 37 31 return 0; 38 32 } 39 - 40 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 41 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 42 33 43 34 /* Exceptions indices */ 44 35 enum { ··· 168 159 /* Fields up to this point are cleared by a CPU reset */ 169 160 struct {} end_reset_fields; 170 161 171 - CPU_COMMON 172 - 173 162 /* Fields from here on are preserved across CPU reset. */ 174 163 uint32_t eba; /* exception base address */ 175 164 uint32_t deba; /* debug exception base address */ ··· 195 184 CPUState parent_obj; 196 185 /*< public >*/ 197 186 187 + CPUNegativeOffsetState neg; 198 188 CPULM32State env; 199 189 200 190 uint32_t revision; ··· 204 194 uint32_t features; 205 195 }; 206 196 207 - static inline LM32CPU *lm32_env_get_cpu(CPULM32State *env) 208 - { 209 - return container_of(env, LM32CPU, env); 210 - } 211 - 212 - #define ENV_GET_CPU(e) CPU(lm32_env_get_cpu(e)) 213 - 214 - #define ENV_OFFSET offsetof(LM32CPU, env) 215 197 216 198 #ifndef CONFIG_USER_ONLY 217 199 extern const struct VMStateDescription vmstate_lm32_cpu; ··· 264 246 bool lm32_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 265 247 MMUAccessType access_type, int mmu_idx, 266 248 bool probe, uintptr_t retaddr); 249 + 250 + typedef CPULM32State CPUArchState; 251 + typedef LM32CPU ArchCPU; 267 252 268 253 #include "exec/cpu-all.h" 269 254
+6 -13
target/lm32/helper.c
··· 58 58 59 59 void lm32_breakpoint_insert(CPULM32State *env, int idx, target_ulong address) 60 60 { 61 - LM32CPU *cpu = lm32_env_get_cpu(env); 62 - 63 - cpu_breakpoint_insert(CPU(cpu), address, BP_CPU, 61 + cpu_breakpoint_insert(env_cpu(env), address, BP_CPU, 64 62 &env->cpu_breakpoint[idx]); 65 63 } 66 64 67 65 void lm32_breakpoint_remove(CPULM32State *env, int idx) 68 66 { 69 - LM32CPU *cpu = lm32_env_get_cpu(env); 70 - 71 67 if (!env->cpu_breakpoint[idx]) { 72 68 return; 73 69 } 74 70 75 - cpu_breakpoint_remove_by_ref(CPU(cpu), env->cpu_breakpoint[idx]); 71 + cpu_breakpoint_remove_by_ref(env_cpu(env), env->cpu_breakpoint[idx]); 76 72 env->cpu_breakpoint[idx] = NULL; 77 73 } 78 74 79 75 void lm32_watchpoint_insert(CPULM32State *env, int idx, target_ulong address, 80 76 lm32_wp_t wp_type) 81 77 { 82 - LM32CPU *cpu = lm32_env_get_cpu(env); 83 78 int flags = 0; 84 79 85 80 switch (wp_type) { ··· 98 93 } 99 94 100 95 if (flags != 0) { 101 - cpu_watchpoint_insert(CPU(cpu), address, 1, flags, 102 - &env->cpu_watchpoint[idx]); 96 + cpu_watchpoint_insert(env_cpu(env), address, 1, flags, 97 + &env->cpu_watchpoint[idx]); 103 98 } 104 99 } 105 100 106 101 void lm32_watchpoint_remove(CPULM32State *env, int idx) 107 102 { 108 - LM32CPU *cpu = lm32_env_get_cpu(env); 109 - 110 103 if (!env->cpu_watchpoint[idx]) { 111 104 return; 112 105 } 113 106 114 - cpu_watchpoint_remove_by_ref(CPU(cpu), env->cpu_watchpoint[idx]); 107 + cpu_watchpoint_remove_by_ref(env_cpu(env), env->cpu_watchpoint[idx]); 115 108 env->cpu_watchpoint[idx] = NULL; 116 109 } 117 110 118 111 static bool check_watchpoints(CPULM32State *env) 119 112 { 120 - LM32CPU *cpu = lm32_env_get_cpu(env); 113 + LM32CPU *cpu = env_archcpu(env); 121 114 int i; 122 115 123 116 for (i = 0; i < cpu->num_watchpoints; i++) {
+3 -3
target/lm32/op_helper.c
··· 16 16 #if !defined(CONFIG_USER_ONLY) 17 17 void raise_exception(CPULM32State *env, int index) 18 18 { 19 - CPUState *cs = CPU(lm32_env_get_cpu(env)); 19 + CPUState *cs = env_cpu(env); 20 20 21 21 cs->exception_index = index; 22 22 cpu_loop_exit(cs); ··· 29 29 30 30 void HELPER(hlt)(CPULM32State *env) 31 31 { 32 - CPUState *cs = CPU(lm32_env_get_cpu(env)); 32 + CPUState *cs = env_cpu(env); 33 33 34 34 cs->halted = 1; 35 35 cs->exception_index = EXCP_HLT; ··· 39 39 void HELPER(ill)(CPULM32State *env) 40 40 { 41 41 #ifndef CONFIG_USER_ONLY 42 - CPUState *cs = CPU(lm32_env_get_cpu(env)); 42 + CPUState *cs = env_cpu(env); 43 43 fprintf(stderr, "VM paused due to illegal instruction. " 44 44 "Connect a debugger or switch to the monitor console " 45 45 "to find out more.\n");
+1 -1
target/lm32/translate.c
··· 1053 1053 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) 1054 1054 { 1055 1055 CPULM32State *env = cs->env_ptr; 1056 - LM32CPU *cpu = lm32_env_get_cpu(env); 1056 + LM32CPU *cpu = env_archcpu(env); 1057 1057 struct DisasContext ctx, *dc = &ctx; 1058 1058 uint32_t pc_start; 1059 1059 uint32_t page_start;
+22
target/m68k/cpu-param.h
··· 1 + /* 2 + * m68k cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2005-2007 CodeSourcery 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef M68K_CPU_PARAM_H 9 + #define M68K_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + /* 13 + * Coldfire Linux uses 8k pages 14 + * and m68k linux uses 4k pages 15 + * use the smallest one 16 + */ 17 + #define TARGET_PAGE_BITS 12 18 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 19 + #define TARGET_VIRT_ADDR_SPACE_BITS 32 20 + #define NB_MMU_MODES 2 21 + 22 + #endif
+1 -3
target/m68k/cpu.c
··· 238 238 239 239 static void m68k_cpu_initfn(Object *obj) 240 240 { 241 - CPUState *cs = CPU(obj); 242 241 M68kCPU *cpu = M68K_CPU(obj); 243 - CPUM68KState *env = &cpu->env; 244 242 245 - cs->env_ptr = env; 243 + cpu_set_cpustate_pointers(cpu); 246 244 } 247 245 248 246 static const VMStateDescription vmstate_m68k_cpu = {
+4 -24
target/m68k/cpu.h
··· 21 21 #ifndef M68K_CPU_H 22 22 #define M68K_CPU_H 23 23 24 - #define TARGET_LONG_BITS 32 25 - 26 - #define CPUArchState struct CPUM68KState 27 - 28 24 #include "qemu-common.h" 29 25 #include "exec/cpu-defs.h" 30 26 #include "cpu-qom.h" ··· 82 78 #define M68K_MAX_TTR 2 83 79 #define TTR(type, index) ttr[((type & ACCESS_CODE) == ACCESS_CODE) * 2 + index] 84 80 85 - #define NB_MMU_MODES 2 86 81 #define TARGET_INSN_START_EXTRA_WORDS 1 87 82 88 83 typedef CPU_LDoubleU FPReg; ··· 148 143 /* Fields up to this point are cleared by a CPU reset */ 149 144 struct {} end_reset_fields; 150 145 151 - CPU_COMMON 152 - 153 146 /* Fields from here on are preserved across CPU reset. */ 154 147 uint32_t features; 155 148 } CPUM68KState; ··· 165 158 CPUState parent_obj; 166 159 /*< public >*/ 167 160 161 + CPUNegativeOffsetState neg; 168 162 CPUM68KState env; 169 163 }; 170 164 171 - static inline M68kCPU *m68k_env_get_cpu(CPUM68KState *env) 172 - { 173 - return container_of(env, M68kCPU, env); 174 - } 175 - 176 - #define ENV_GET_CPU(e) CPU(m68k_env_get_cpu(e)) 177 - 178 - #define ENV_OFFSET offsetof(M68kCPU, env) 179 165 180 166 void m68k_cpu_do_interrupt(CPUState *cpu); 181 167 bool m68k_cpu_exec_interrupt(CPUState *cpu, int int_req); ··· 502 488 503 489 void register_m68k_insns (CPUM68KState *env); 504 490 505 - /* Coldfire Linux uses 8k pages 506 - * and m68k linux uses 4k pages 507 - * use the smallest one 508 - */ 509 - #define TARGET_PAGE_BITS 12 510 - 511 491 enum { 512 492 /* 1 bit to define user level / supervisor access */ 513 493 ACCESS_SUPER = 0x01, ··· 521 501 ACCESS_CODE = 0x10, /* Code fetch access */ 522 502 ACCESS_DATA = 0x20, /* Data load/store access */ 523 503 }; 524 - 525 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 526 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 527 504 528 505 #define M68K_CPU_TYPE_SUFFIX "-" TYPE_M68K_CPU 529 506 #define M68K_CPU_TYPE_NAME(model) model M68K_CPU_TYPE_SUFFIX ··· 549 526 unsigned size, MMUAccessType access_type, 550 527 int mmu_idx, MemTxAttrs attrs, 551 528 MemTxResult response, uintptr_t retaddr); 529 + 530 + typedef CPUM68KState CPUArchState; 531 + typedef M68kCPU ArchCPU; 552 532 553 533 #include "exec/cpu-all.h" 554 534
+12 -21
target/m68k/helper.c
··· 168 168 169 169 void HELPER(cf_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val) 170 170 { 171 - M68kCPU *cpu = m68k_env_get_cpu(env); 172 - 173 171 switch (reg) { 174 172 case M68K_CR_CACR: 175 173 env->cacr = val; ··· 186 184 break; 187 185 /* TODO: Implement control registers. */ 188 186 default: 189 - cpu_abort(CPU(cpu), 187 + cpu_abort(env_cpu(env), 190 188 "Unimplemented control register write 0x%x = 0x%x\n", 191 189 reg, val); 192 190 } ··· 194 192 195 193 void HELPER(m68k_movec_to)(CPUM68KState *env, uint32_t reg, uint32_t val) 196 194 { 197 - M68kCPU *cpu = m68k_env_get_cpu(env); 198 - 199 195 switch (reg) { 200 196 /* MC680[1234]0 */ 201 197 case M68K_CR_SFC: ··· 248 244 env->mmu.ttr[M68K_DTTR1] = val; 249 245 return; 250 246 } 251 - cpu_abort(CPU(cpu), "Unimplemented control register write 0x%x = 0x%x\n", 247 + cpu_abort(env_cpu(env), 248 + "Unimplemented control register write 0x%x = 0x%x\n", 252 249 reg, val); 253 250 } 254 251 255 252 uint32_t HELPER(m68k_movec_from)(CPUM68KState *env, uint32_t reg) 256 253 { 257 - M68kCPU *cpu = m68k_env_get_cpu(env); 258 - 259 254 switch (reg) { 260 255 /* MC680[1234]0 */ 261 256 case M68K_CR_SFC: ··· 292 287 case M68K_CR_DTT1: 293 288 return env->mmu.ttr[M68K_DTTR1]; 294 289 } 295 - cpu_abort(CPU(cpu), "Unimplemented control register read 0x%x\n", 290 + cpu_abort(env_cpu(env), "Unimplemented control register read 0x%x\n", 296 291 reg); 297 292 } 298 293 ··· 388 383 uint32_t last_logical, last_physical; 389 384 int32_t size; 390 385 int last_attr = -1, attr = -1; 391 - M68kCPU *cpu = m68k_env_get_cpu(env); 392 - CPUState *cs = CPU(cpu); 386 + CPUState *cs = env_cpu(env); 393 387 MemTxResult txres; 394 388 395 389 if (env->mmu.tcr & M68K_TCR_PAGE_8K) { ··· 630 624 int *prot, target_ulong address, 631 625 int access_type, target_ulong *page_size) 632 626 { 633 - M68kCPU *cpu = m68k_env_get_cpu(env); 634 - CPUState *cs = CPU(cpu); 627 + CPUState *cs = env_cpu(env); 635 628 uint32_t entry; 636 629 uint32_t next; 637 630 target_ulong page_mask; ··· 1175 1168 z = n; \ 1176 1169 break; \ 1177 1170 default: \ 1178 - cpu_abort(CPU(m68k_env_get_cpu(env)), "Bad CC_OP %d", op); \ 1171 + cpu_abort(env_cpu(env), "Bad CC_OP %d", op); \ 1179 1172 } \ 1180 1173 } while (0) 1181 1174 ··· 1358 1351 #if defined(CONFIG_SOFTMMU) 1359 1352 void HELPER(ptest)(CPUM68KState *env, uint32_t addr, uint32_t is_read) 1360 1353 { 1361 - M68kCPU *cpu = m68k_env_get_cpu(env); 1362 - CPUState *cs = CPU(cpu); 1363 1354 hwaddr physical; 1364 1355 int access_type; 1365 1356 int prot; ··· 1384 1375 if (ret == 0) { 1385 1376 addr &= TARGET_PAGE_MASK; 1386 1377 physical += addr & (page_size - 1); 1387 - tlb_set_page(cs, addr, physical, 1378 + tlb_set_page(env_cpu(env), addr, physical, 1388 1379 prot, access_type & ACCESS_SUPER ? 1389 1380 MMU_KERNEL_IDX : MMU_USER_IDX, page_size); 1390 1381 } ··· 1392 1383 1393 1384 void HELPER(pflush)(CPUM68KState *env, uint32_t addr, uint32_t opmode) 1394 1385 { 1395 - M68kCPU *cpu = m68k_env_get_cpu(env); 1386 + CPUState *cs = env_cpu(env); 1396 1387 1397 1388 switch (opmode) { 1398 1389 case 0: /* Flush page entry if not global */ 1399 1390 case 1: /* Flush page entry */ 1400 - tlb_flush_page(CPU(cpu), addr); 1391 + tlb_flush_page(cs, addr); 1401 1392 break; 1402 1393 case 2: /* Flush all except global entries */ 1403 - tlb_flush(CPU(cpu)); 1394 + tlb_flush(cs); 1404 1395 break; 1405 1396 case 3: /* Flush all entries */ 1406 - tlb_flush(CPU(cpu)); 1397 + tlb_flush(cs); 1407 1398 break; 1408 1399 } 1409 1400 }
+2 -2
target/m68k/m68k-semi.c
··· 421 421 case HOSTED_INIT_SIM: 422 422 #if defined(CONFIG_USER_ONLY) 423 423 { 424 - CPUState *cs = CPU(m68k_env_get_cpu(env)); 424 + CPUState *cs = env_cpu(env); 425 425 TaskState *ts = cs->opaque; 426 426 /* Allocate the heap using sbrk. */ 427 427 if (!ts->heap_limit) { ··· 454 454 #endif 455 455 return; 456 456 default: 457 - cpu_abort(CPU(m68k_env_get_cpu(env)), "Unsupported semihosting syscall %d\n", nr); 457 + cpu_abort(env_cpu(env), "Unsupported semihosting syscall %d\n", nr); 458 458 result = 0; 459 459 } 460 460 failed:
+7 -7
target/m68k/op_helper.c
··· 196 196 197 197 static void cf_interrupt_all(CPUM68KState *env, int is_hw) 198 198 { 199 - CPUState *cs = CPU(m68k_env_get_cpu(env)); 199 + CPUState *cs = env_cpu(env); 200 200 uint32_t sp; 201 201 uint32_t sr; 202 202 uint32_t fmt; ··· 274 274 { 275 275 if (m68k_feature(env, M68K_FEATURE_QUAD_MULDIV)) { 276 276 /* all except 68000 */ 277 - CPUState *cs = CPU(m68k_env_get_cpu(env)); 277 + CPUState *cs = env_cpu(env); 278 278 switch (format) { 279 279 case 4: 280 280 *sp -= 4; ··· 299 299 300 300 static void m68k_interrupt_all(CPUM68KState *env, int is_hw) 301 301 { 302 - CPUState *cs = CPU(m68k_env_get_cpu(env)); 302 + CPUState *cs = env_cpu(env); 303 303 uint32_t sp; 304 304 uint32_t retaddr; 305 305 uint32_t vector; ··· 507 507 508 508 static void raise_exception_ra(CPUM68KState *env, int tt, uintptr_t raddr) 509 509 { 510 - CPUState *cs = CPU(m68k_env_get_cpu(env)); 510 + CPUState *cs = env_cpu(env); 511 511 512 512 cs->exception_index = tt; 513 513 cpu_loop_exit_restore(cs, raddr); ··· 781 781 #endif 782 782 { 783 783 /* Tell the main loop we need to serialize this insn. */ 784 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); 784 + cpu_loop_exit_atomic(env_cpu(env), ra); 785 785 } 786 786 } else { 787 787 /* We're executing in a serial context -- no need to be atomic. */ ··· 1037 1037 env->cc_c = 0 <= ub ? val < 0 || val > ub : val > ub && val < 0; 1038 1038 1039 1039 if (val < 0 || val > ub) { 1040 - CPUState *cs = CPU(m68k_env_get_cpu(env)); 1040 + CPUState *cs = env_cpu(env); 1041 1041 1042 1042 /* Recover PC and CC_OP for the beginning of the insn. */ 1043 1043 cpu_restore_state(cs, GETPC(), true); ··· 1068 1068 env->cc_c = lb <= ub ? val < lb || val > ub : val > ub && val < lb; 1069 1069 1070 1070 if (env->cc_c) { 1071 - CPUState *cs = CPU(m68k_env_get_cpu(env)); 1071 + CPUState *cs = env_cpu(env); 1072 1072 1073 1073 /* Recover PC and CC_OP for the beginning of the insn. */ 1074 1074 cpu_restore_state(cs, GETPC(), true);
+1 -3
target/m68k/translate.c
··· 4777 4777 4778 4778 DISAS_INSN(wdebug) 4779 4779 { 4780 - M68kCPU *cpu = m68k_env_get_cpu(env); 4781 - 4782 4780 if (IS_USER(s)) { 4783 4781 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE); 4784 4782 return; 4785 4783 } 4786 4784 /* TODO: Implement wdebug. */ 4787 - cpu_abort(CPU(cpu), "WDEBUG not implemented"); 4785 + cpu_abort(env_cpu(env), "WDEBUG not implemented"); 4788 4786 } 4789 4787 #endif 4790 4788
+18
target/microblaze/cpu-param.h
··· 1 + /* 2 + * MicroBlaze cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2009 Edgar E. Iglesias 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef MICROBLAZE_CPU_PARAM_H 9 + #define MICROBLAZE_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 64 12 + #define TARGET_PHYS_ADDR_SPACE_BITS 64 13 + #define TARGET_VIRT_ADDR_SPACE_BITS 64 14 + /* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */ 15 + #define TARGET_PAGE_BITS 12 16 + #define NB_MMU_MODES 3 17 + 18 + #endif
+1 -2
target/microblaze/cpu.c
··· 221 221 222 222 static void mb_cpu_initfn(Object *obj) 223 223 { 224 - CPUState *cs = CPU(obj); 225 224 MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj); 226 225 CPUMBState *env = &cpu->env; 227 226 228 - cs->env_ptr = env; 227 + cpu_set_cpustate_pointers(cpu); 229 228 230 229 set_float_rounding_mode(float_round_nearest_even, &env->fp_status); 231 230
+22 -41
target/microblaze/cpu.h
··· 22 22 23 23 #include "qemu-common.h" 24 24 #include "cpu-qom.h" 25 - 26 - #define TARGET_LONG_BITS 64 27 - 28 - #define CPUArchState struct CPUMBState 29 - 30 25 #include "exec/cpu-defs.h" 31 26 #include "fpu/softfloat-types.h" 32 - struct CPUMBState; 27 + 33 28 typedef struct CPUMBState CPUMBState; 34 29 #if !defined(CONFIG_USER_ONLY) 35 30 #include "mmu.h" ··· 228 223 #define CC_NE 1 229 224 #define CC_EQ 0 230 225 231 - #define NB_MMU_MODES 3 232 - 233 226 #define STREAM_EXCEPTION (1 << 0) 234 227 #define STREAM_ATOMIC (1 << 1) 235 228 #define STREAM_TEST (1 << 2) ··· 273 266 /* Fields up to this point are cleared by a CPU reset */ 274 267 struct {} end_reset_fields; 275 268 276 - CPU_COMMON 277 - 278 269 /* These fields are preserved on reset. */ 279 270 280 271 struct { ··· 293 284 CPUState parent_obj; 294 285 295 286 /*< public >*/ 287 + 288 + CPUNegativeOffsetState neg; 289 + CPUMBState env; 296 290 297 291 /* Microblaze Configuration Settings */ 298 292 struct { ··· 313 307 char *version; 314 308 uint8_t pvr; 315 309 } cfg; 316 - 317 - CPUMBState env; 318 310 }; 319 311 320 - static inline MicroBlazeCPU *mb_env_get_cpu(CPUMBState *env) 321 - { 322 - return container_of(env, MicroBlazeCPU, env); 323 - } 324 - 325 - #define ENV_GET_CPU(e) CPU(mb_env_get_cpu(e)) 326 - 327 - #define ENV_OFFSET offsetof(MicroBlazeCPU, env) 328 312 329 313 void mb_cpu_do_interrupt(CPUState *cs); 330 314 bool mb_cpu_exec_interrupt(CPUState *cs, int int_req); ··· 340 324 int cpu_mb_signal_handler(int host_signum, void *pinfo, 341 325 void *puc); 342 326 343 - /* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */ 344 - #define TARGET_PAGE_BITS 12 345 - 346 - #define TARGET_PHYS_ADDR_SPACE_BITS 64 347 - #define TARGET_VIRT_ADDR_SPACE_BITS 64 348 - 349 327 #define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU 350 328 351 329 #define cpu_signal_handler cpu_mb_signal_handler ··· 359 337 #define MMU_USER_IDX 2 360 338 /* See NB_MMU_MODES further up the file. */ 361 339 362 - static inline int cpu_mmu_index (CPUMBState *env, bool ifetch) 363 - { 364 - MicroBlazeCPU *cpu = mb_env_get_cpu(env); 365 - 366 - /* Are we in nommu mode?. */ 367 - if (!(env->sregs[SR_MSR] & MSR_VM) || !cpu->cfg.use_mmu) { 368 - return MMU_NOMMU_IDX; 369 - } 370 - 371 - if (env->sregs[SR_MSR] & MSR_UM) { 372 - return MMU_USER_IDX; 373 - } 374 - return MMU_KERNEL_IDX; 375 - } 376 - 377 340 bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, 378 341 MMUAccessType access_type, int mmu_idx, 379 342 bool probe, uintptr_t retaddr); 343 + 344 + typedef CPUMBState CPUArchState; 345 + typedef MicroBlazeCPU ArchCPU; 380 346 381 347 #include "exec/cpu-all.h" 382 348 ··· 395 361 int mmu_idx, MemTxAttrs attrs, 396 362 MemTxResult response, uintptr_t retaddr); 397 363 #endif 364 + 365 + static inline int cpu_mmu_index(CPUMBState *env, bool ifetch) 366 + { 367 + MicroBlazeCPU *cpu = env_archcpu(env); 368 + 369 + /* Are we in nommu mode?. */ 370 + if (!(env->sregs[SR_MSR] & MSR_VM) || !cpu->cfg.use_mmu) { 371 + return MMU_NOMMU_IDX; 372 + } 373 + 374 + if (env->sregs[SR_MSR] & MSR_UM) { 375 + return MMU_USER_IDX; 376 + } 377 + return MMU_KERNEL_IDX; 378 + } 398 379 399 380 #endif
+2 -3
target/microblaze/mmu.c
··· 34 34 35 35 static void mmu_flush_idx(CPUMBState *env, unsigned int idx) 36 36 { 37 - CPUState *cs = CPU(mb_env_get_cpu(env)); 37 + CPUState *cs = env_cpu(env); 38 38 struct microblaze_mmu *mmu = &env->mmu; 39 39 unsigned int tlb_size; 40 40 uint32_t tlb_tag, end, t; ··· 228 228 229 229 void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v) 230 230 { 231 - MicroBlazeCPU *cpu = mb_env_get_cpu(env); 232 231 uint64_t tmp64; 233 232 unsigned int i; 234 233 qemu_log_mask(CPU_LOG_MMU, ··· 269 268 /* Changes to the zone protection reg flush the QEMU TLB. 270 269 Fortunately, these are very uncommon. */ 271 270 if (v != env->mmu.regs[rn]) { 272 - tlb_flush(CPU(cpu)); 271 + tlb_flush(env_cpu(env)); 273 272 } 274 273 env->mmu.regs[rn] = v; 275 274 break;
+1 -1
target/microblaze/op_helper.c
··· 65 65 66 66 void helper_raise_exception(CPUMBState *env, uint32_t index) 67 67 { 68 - CPUState *cs = CPU(mb_env_get_cpu(env)); 68 + CPUState *cs = env_cpu(env); 69 69 70 70 cs->exception_index = index; 71 71 cpu_loop_exit(cs);
+1 -1
target/microblaze/translate.c
··· 1604 1604 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) 1605 1605 { 1606 1606 CPUMBState *env = cs->env_ptr; 1607 - MicroBlazeCPU *cpu = mb_env_get_cpu(env); 1607 + MicroBlazeCPU *cpu = env_archcpu(env); 1608 1608 uint32_t pc_start; 1609 1609 struct DisasContext ctx; 1610 1610 struct DisasContext *dc = &ctx;
+29
target/mips/cpu-param.h
··· 1 + /* 2 + * MIPS cpu parameters for qemu. 3 + * 4 + * SPDX-License-Identifier: LGPL-2.0+ 5 + */ 6 + 7 + #ifndef MIPS_CPU_PARAM_H 8 + #define MIPS_CPU_PARAM_H 1 9 + 10 + #ifdef TARGET_MIPS64 11 + # define TARGET_LONG_BITS 64 12 + #else 13 + # define TARGET_LONG_BITS 32 14 + #endif 15 + #ifdef TARGET_MIPS64 16 + #define TARGET_PHYS_ADDR_SPACE_BITS 48 17 + #define TARGET_VIRT_ADDR_SPACE_BITS 48 18 + #else 19 + #define TARGET_PHYS_ADDR_SPACE_BITS 40 20 + # ifdef CONFIG_USER_ONLY 21 + # define TARGET_VIRT_ADDR_SPACE_BITS 31 22 + # else 23 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 24 + #endif 25 + #endif 26 + #define TARGET_PAGE_BITS 12 27 + #define NB_MMU_MODES 4 28 + 29 + #endif
+1 -2
target/mips/cpu.c
··· 152 152 153 153 static void mips_cpu_initfn(Object *obj) 154 154 { 155 - CPUState *cs = CPU(obj); 156 155 MIPSCPU *cpu = MIPS_CPU(obj); 157 156 CPUMIPSState *env = &cpu->env; 158 157 MIPSCPUClass *mcc = MIPS_CPU_GET_CLASS(obj); 159 158 160 - cs->env_ptr = env; 159 + cpu_set_cpustate_pointers(cpu); 161 160 env->cpu_model = mcc->cpu_def; 162 161 } 163 162
+5 -16
target/mips/cpu.h
··· 3 3 4 4 #define ALIGNED_ONLY 5 5 6 - #define CPUArchState struct CPUMIPSState 7 - 8 6 #include "qemu-common.h" 9 7 #include "cpu-qom.h" 10 - #include "mips-defs.h" 11 8 #include "exec/cpu-defs.h" 12 9 #include "fpu/softfloat.h" 10 + #include "mips-defs.h" 13 11 14 12 #define TCG_GUEST_DEFAULT_MO (0) 15 - 16 - struct CPUMIPSState; 17 13 18 14 typedef struct CPUMIPSTLBContext CPUMIPSTLBContext; 19 15 ··· 103 99 #define FP_UNIMPLEMENTED 32 104 100 }; 105 101 106 - #define NB_MMU_MODES 4 107 102 #define TARGET_INSN_START_EXTRA_WORDS 2 108 103 109 104 typedef struct CPUMIPSMVPContext CPUMIPSMVPContext; ··· 1046 1041 /* Fields up to this point are cleared by a CPU reset */ 1047 1042 struct {} end_reset_fields; 1048 1043 1049 - CPU_COMMON 1050 - 1051 1044 /* Fields from here on are preserved across CPU reset. */ 1052 1045 CPUMIPSMVPContext *mvp; 1053 1046 #if !defined(CONFIG_USER_ONLY) ··· 1073 1066 CPUState parent_obj; 1074 1067 /*< public >*/ 1075 1068 1069 + CPUNegativeOffsetState neg; 1076 1070 CPUMIPSState env; 1077 1071 }; 1078 1072 1079 - static inline MIPSCPU *mips_env_get_cpu(CPUMIPSState *env) 1080 - { 1081 - return container_of(env, MIPSCPU, env); 1082 - } 1083 - 1084 - #define ENV_GET_CPU(e) CPU(mips_env_get_cpu(e)) 1085 - 1086 - #define ENV_OFFSET offsetof(MIPSCPU, env) 1087 1073 1088 1074 void mips_cpu_list(void); 1089 1075 ··· 1116 1102 { 1117 1103 return hflags_mmu_index(env->hflags); 1118 1104 } 1105 + 1106 + typedef CPUMIPSState CPUArchState; 1107 + typedef MIPSCPU ArchCPU; 1119 1108 1120 1109 #include "exec/cpu-all.h" 1121 1110
+5 -10
target/mips/helper.c
··· 339 339 340 340 void cpu_mips_tlb_flush(CPUMIPSState *env) 341 341 { 342 - MIPSCPU *cpu = mips_env_get_cpu(env); 343 - 344 342 /* Flush qemu's TLB and discard all shadowed entries. */ 345 - tlb_flush(CPU(cpu)); 343 + tlb_flush(env_cpu(env)); 346 344 env->tlb->tlb_in_use = env->tlb->nb_tlb; 347 345 } 348 346 ··· 404 402 #if defined(TARGET_MIPS64) 405 403 if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) { 406 404 /* Access to at least one of the 64-bit segments has been disabled */ 407 - tlb_flush(CPU(mips_env_get_cpu(env))); 405 + tlb_flush(env_cpu(env)); 408 406 } 409 407 #endif 410 408 if (env->CP0_Config3 & (1 << CP0C3_MT)) { ··· 449 447 static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, 450 448 int rw, int tlb_error) 451 449 { 452 - CPUState *cs = CPU(mips_env_get_cpu(env)); 450 + CPUState *cs = env_cpu(env); 453 451 int exception = 0, error_code = 0; 454 452 455 453 if (rw == MMU_INST_FETCH) { ··· 1394 1392 #if !defined(CONFIG_USER_ONLY) 1395 1393 void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra) 1396 1394 { 1397 - MIPSCPU *cpu = mips_env_get_cpu(env); 1398 - CPUState *cs; 1395 + CPUState *cs = env_cpu(env); 1399 1396 r4k_tlb_t *tlb; 1400 1397 target_ulong addr; 1401 1398 target_ulong end; ··· 1421 1418 /* 1k pages are not supported. */ 1422 1419 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); 1423 1420 if (tlb->V0) { 1424 - cs = CPU(cpu); 1425 1421 addr = tlb->VPN & ~mask; 1426 1422 #if defined(TARGET_MIPS64) 1427 1423 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { ··· 1435 1431 } 1436 1432 } 1437 1433 if (tlb->V1) { 1438 - cs = CPU(cpu); 1439 1434 addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); 1440 1435 #if defined(TARGET_MIPS64) 1441 1436 if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { ··· 1456 1451 int error_code, 1457 1452 uintptr_t pc) 1458 1453 { 1459 - CPUState *cs = CPU(mips_env_get_cpu(env)); 1454 + CPUState *cs = env_cpu(env); 1460 1455 1461 1456 qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n", 1462 1457 __func__, exception, error_code);
-15
target/mips/mips-defs.h
··· 5 5 //#define USE_HOST_FLOAT_REGS 6 6 7 7 /* Real pages are variable size... */ 8 - #define TARGET_PAGE_BITS 12 9 8 #define MIPS_TLB_MAX 128 10 - 11 - #if defined(TARGET_MIPS64) 12 - #define TARGET_LONG_BITS 64 13 - #define TARGET_PHYS_ADDR_SPACE_BITS 48 14 - #define TARGET_VIRT_ADDR_SPACE_BITS 48 15 - #else 16 - #define TARGET_LONG_BITS 32 17 - #define TARGET_PHYS_ADDR_SPACE_BITS 40 18 - # ifdef CONFIG_USER_ONLY 19 - # define TARGET_VIRT_ADDR_SPACE_BITS 31 20 - # else 21 - # define TARGET_VIRT_ADDR_SPACE_BITS 32 22 - #endif 23 - #endif 24 9 25 10 /* 26 11 * bit definitions for insn_flags (ISAs/ASEs flags)
+11 -14
target/mips/op_helper.c
··· 350 350 int rw, uintptr_t retaddr) 351 351 { 352 352 hwaddr paddr; 353 - CPUState *cs = CPU(mips_env_get_cpu(env)); 353 + CPUState *cs = env_cpu(env); 354 354 355 355 paddr = cpu_mips_translate_address(env, address, rw); 356 356 ··· 699 699 return env; 700 700 } 701 701 702 - cs = CPU(mips_env_get_cpu(env)); 702 + cs = env_cpu(env); 703 703 vpe_idx = tc_idx / cs->nr_threads; 704 704 *tc = tc_idx % cs->nr_threads; 705 705 other_cs = qemu_get_cpu(vpe_idx); ··· 1298 1298 1299 1299 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1) 1300 1300 { 1301 - MIPSCPU *cpu = mips_env_get_cpu(env); 1301 + MIPSCPU *cpu = env_archcpu(env); 1302 1302 1303 1303 env->active_tc.CP0_TCHalt = arg1 & 0x1; 1304 1304 ··· 1314 1314 { 1315 1315 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1316 1316 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1317 - MIPSCPU *other_cpu = mips_env_get_cpu(other); 1317 + MIPSCPU *other_cpu = env_archcpu(other); 1318 1318 1319 1319 // TODO: Halt TC / Restart (if allocated+active) TC. 1320 1320 ··· 1427 1427 1428 1428 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1) 1429 1429 { 1430 - CPUState *cs = CPU(mips_env_get_cpu(env)); 1430 + CPUState *cs = env_cpu(env); 1431 1431 1432 1432 env->CP0_SegCtl0 = arg1 & CP0SC0_MASK; 1433 1433 tlb_flush(cs); ··· 1435 1435 1436 1436 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1) 1437 1437 { 1438 - CPUState *cs = CPU(mips_env_get_cpu(env)); 1438 + CPUState *cs = env_cpu(env); 1439 1439 1440 1440 env->CP0_SegCtl1 = arg1 & CP0SC1_MASK; 1441 1441 tlb_flush(cs); ··· 1443 1443 1444 1444 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1) 1445 1445 { 1446 - CPUState *cs = CPU(mips_env_get_cpu(env)); 1446 + CPUState *cs = env_cpu(env); 1447 1447 1448 1448 env->CP0_SegCtl2 = arg1 & CP0SC2_MASK; 1449 1449 tlb_flush(cs); ··· 1666 1666 /* If the ASID changes, flush qemu's TLB. */ 1667 1667 if ((old & env->CP0_EntryHi_ASID_mask) != 1668 1668 (val & env->CP0_EntryHi_ASID_mask)) { 1669 - tlb_flush(CPU(mips_env_get_cpu(env))); 1669 + tlb_flush(env_cpu(env)); 1670 1670 } 1671 1671 } 1672 1672 ··· 1686 1686 1687 1687 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) 1688 1688 { 1689 - MIPSCPU *cpu = mips_env_get_cpu(env); 1690 1689 uint32_t val, old; 1691 1690 1692 1691 old = env->CP0_Status; ··· 1706 1705 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break; 1707 1706 case MIPS_HFLAG_KM: qemu_log("\n"); break; 1708 1707 default: 1709 - cpu_abort(CPU(cpu), "Invalid MMU mode!\n"); 1708 + cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); 1710 1709 break; 1711 1710 } 1712 1711 } ··· 2485 2484 2486 2485 static void debug_post_eret(CPUMIPSState *env) 2487 2486 { 2488 - MIPSCPU *cpu = mips_env_get_cpu(env); 2489 - 2490 2487 if (qemu_loglevel_mask(CPU_LOG_EXEC)) { 2491 2488 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, 2492 2489 env->active_tc.PC, env->CP0_EPC); ··· 2502 2499 case MIPS_HFLAG_SM: qemu_log(", SM\n"); break; 2503 2500 case MIPS_HFLAG_KM: qemu_log("\n"); break; 2504 2501 default: 2505 - cpu_abort(CPU(cpu), "Invalid MMU mode!\n"); 2502 + cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); 2506 2503 break; 2507 2504 } 2508 2505 } ··· 2633 2630 2634 2631 void helper_wait(CPUMIPSState *env) 2635 2632 { 2636 - CPUState *cs = CPU(mips_env_get_cpu(env)); 2633 + CPUState *cs = env_cpu(env); 2637 2634 2638 2635 cs->halted = 1; 2639 2636 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
+1 -2
target/mips/translate.c
··· 30119 30119 30120 30120 void cpu_state_reset(CPUMIPSState *env) 30121 30121 { 30122 - MIPSCPU *cpu = mips_env_get_cpu(env); 30123 - CPUState *cs = CPU(cpu); 30122 + CPUState *cs = env_cpu(env); 30124 30123 30125 30124 /* Reset registers to their default values */ 30126 30125 env->CP0_PRid = env->cpu_model->CP0_PRid;
+1 -3
target/mips/translate_init.inc.c
··· 871 871 872 872 static void mmu_init (CPUMIPSState *env, const mips_def_t *def) 873 873 { 874 - MIPSCPU *cpu = mips_env_get_cpu(env); 875 - 876 874 env->tlb = g_malloc0(sizeof(CPUMIPSTLBContext)); 877 875 878 876 switch (def->mmu_type) { ··· 889 887 case MMU_TYPE_R6000: 890 888 case MMU_TYPE_R8000: 891 889 default: 892 - cpu_abort(CPU(cpu), "MMU type not supported\n"); 890 + cpu_abort(env_cpu(env), "MMU type not supported\n"); 893 891 } 894 892 } 895 893 #endif /* CONFIG_USER_ONLY */
+17
target/moxie/cpu-param.h
··· 1 + /* 2 + * Moxie cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2008, 2010, 2013 Anthony Green 5 + * SPDX-License-Identifier: LGPL-2.1+ 6 + */ 7 + 8 + #ifndef MOXIE_CPU_PARAM_H 9 + #define MOXIE_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + #define TARGET_PAGE_BITS 12 /* 4k */ 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 14 + #define TARGET_VIRT_ADDR_SPACE_BITS 32 15 + #define NB_MMU_MODES 1 16 + 17 + #endif
+1 -2
target/moxie/cpu.c
··· 74 74 75 75 static void moxie_cpu_initfn(Object *obj) 76 76 { 77 - CPUState *cs = CPU(obj); 78 77 MoxieCPU *cpu = MOXIE_CPU(obj); 79 78 80 - cs->env_ptr = &cpu->env; 79 + cpu_set_cpustate_pointers(cpu); 81 80 } 82 81 83 82 static ObjectClass *moxie_cpu_class_by_name(const char *cpu_model)
+5 -24
target/moxie/cpu.h
··· 21 21 #define MOXIE_CPU_H 22 22 23 23 #include "qemu-common.h" 24 - 25 - #define TARGET_LONG_BITS 32 26 - 27 - #define CPUArchState struct CPUMoxieState 24 + #include "exec/cpu-defs.h" 28 25 29 26 #define MOXIE_EX_DIV0 0 30 27 #define MOXIE_EX_BAD 1 ··· 33 30 #define MOXIE_EX_MMU_MISS 4 34 31 #define MOXIE_EX_BREAK 16 35 32 36 - #include "exec/cpu-defs.h" 37 - 38 - #define TARGET_PAGE_BITS 12 /* 4k */ 39 - 40 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 41 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 42 - 43 - #define NB_MMU_MODES 1 44 - 45 33 typedef struct CPUMoxieState { 46 34 47 35 uint32_t flags; /* general execution flags */ ··· 57 45 58 46 /* Fields up to this point are cleared by a CPU reset */ 59 47 struct {} end_reset_fields; 60 - 61 - CPU_COMMON 62 - 63 48 } CPUMoxieState; 64 49 65 50 #include "qom/cpu.h" ··· 99 84 CPUState parent_obj; 100 85 /*< public >*/ 101 86 87 + CPUNegativeOffsetState neg; 102 88 CPUMoxieState env; 103 89 } MoxieCPU; 104 90 105 - static inline MoxieCPU *moxie_env_get_cpu(CPUMoxieState *env) 106 - { 107 - return container_of(env, MoxieCPU, env); 108 - } 109 - 110 - #define ENV_GET_CPU(e) CPU(moxie_env_get_cpu(e)) 111 - 112 - #define ENV_OFFSET offsetof(MoxieCPU, env) 113 91 114 92 void moxie_cpu_do_interrupt(CPUState *cs); 115 93 void moxie_cpu_dump_state(CPUState *cpu, FILE *f, int flags); ··· 128 106 { 129 107 return 0; 130 108 } 109 + 110 + typedef CPUMoxieState CPUArchState; 111 + typedef MoxieCPU ArchCPU; 131 112 132 113 #include "exec/cpu-all.h" 133 114
+2 -2
target/moxie/helper.c
··· 28 28 29 29 void helper_raise_exception(CPUMoxieState *env, int ex) 30 30 { 31 - CPUState *cs = CPU(moxie_env_get_cpu(env)); 31 + CPUState *cs = env_cpu(env); 32 32 33 33 cs->exception_index = ex; 34 34 /* Stash the exception type. */ ··· 65 65 66 66 void helper_debug(CPUMoxieState *env) 67 67 { 68 - CPUState *cs = CPU(moxie_env_get_cpu(env)); 68 + CPUState *cs = env_cpu(env); 69 69 70 70 cs->exception_index = EXCP_DEBUG; 71 71 cpu_loop_exit(cs);
+1 -1
target/moxie/translate.c
··· 816 816 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) 817 817 { 818 818 CPUMoxieState *env = cs->env_ptr; 819 - MoxieCPU *cpu = moxie_env_get_cpu(env); 819 + MoxieCPU *cpu = env_archcpu(env); 820 820 DisasContext ctx; 821 821 target_ulong pc_start; 822 822 int num_insns;
+21
target/nios2/cpu-param.h
··· 1 + /* 2 + * Altera Nios II cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2012 Chris Wulff <crwulff@gmail.com> 5 + * SPDX-License-Identifier: LGPL-2.1+ 6 + */ 7 + 8 + #ifndef NIOS2_CPU_PARAM_H 9 + #define NIOS2_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + #define TARGET_PAGE_BITS 12 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 14 + #ifdef CONFIG_USER_ONLY 15 + # define TARGET_VIRT_ADDR_SPACE_BITS 31 16 + #else 17 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 18 + #endif 19 + #define NB_MMU_MODES 2 20 + 21 + #endif
+2 -4
target/nios2/cpu.c
··· 66 66 67 67 static void nios2_cpu_initfn(Object *obj) 68 68 { 69 - CPUState *cs = CPU(obj); 70 69 Nios2CPU *cpu = NIOS2_CPU(obj); 71 - CPUNios2State *env = &cpu->env; 72 70 73 - cs->env_ptr = env; 71 + cpu_set_cpustate_pointers(cpu); 74 72 75 73 #if !defined(CONFIG_USER_ONLY) 76 - mmu_init(env); 74 + mmu_init(&cpu->env); 77 75 #endif 78 76 } 79 77
+6 -27
target/nios2/cpu.h
··· 22 22 #define NIOS2_CPU_H 23 23 24 24 #include "qemu-common.h" 25 - 26 - #define TARGET_LONG_BITS 32 27 - 28 - #define CPUArchState struct CPUNios2State 29 - 30 25 #include "exec/cpu-defs.h" 31 26 #include "qom/cpu.h" 32 - struct CPUNios2State; 27 + 33 28 typedef struct CPUNios2State CPUNios2State; 34 29 #if !defined(CONFIG_USER_ONLY) 35 30 #include "mmu.h" ··· 164 159 165 160 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 166 161 167 - #define NB_MMU_MODES 2 168 - 169 162 struct CPUNios2State { 170 163 uint32_t regs[NUM_CORE_REGS]; 171 164 ··· 174 167 175 168 uint32_t irq_pending; 176 169 #endif 177 - 178 - CPU_COMMON 179 170 }; 180 171 181 172 /** ··· 189 180 CPUState parent_obj; 190 181 /*< public >*/ 191 182 183 + CPUNegativeOffsetState neg; 192 184 CPUNios2State env; 185 + 193 186 bool mmu_present; 194 187 uint32_t pid_num_bits; 195 188 uint32_t tlb_num_ways; ··· 201 194 uint32_t fast_tlb_miss_addr; 202 195 } Nios2CPU; 203 196 204 - static inline Nios2CPU *nios2_env_get_cpu(CPUNios2State *env) 205 - { 206 - return NIOS2_CPU(container_of(env, Nios2CPU, env)); 207 - } 208 - 209 - #define ENV_GET_CPU(e) CPU(nios2_env_get_cpu(e)) 210 - 211 - #define ENV_OFFSET offsetof(Nios2CPU, env) 212 197 213 198 void nios2_tcg_init(void); 214 199 void nios2_cpu_do_interrupt(CPUState *cs); ··· 225 210 226 211 void do_nios2_semihosting(CPUNios2State *env); 227 212 228 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 229 - #ifdef CONFIG_USER_ONLY 230 - # define TARGET_VIRT_ADDR_SPACE_BITS 31 231 - #else 232 - # define TARGET_VIRT_ADDR_SPACE_BITS 32 233 - #endif 234 - 235 213 #define CPU_RESOLVING_TYPE TYPE_NIOS2_CPU 236 214 237 215 #define cpu_gen_code cpu_nios2_gen_code 238 216 #define cpu_signal_handler cpu_nios2_signal_handler 239 217 240 218 #define CPU_SAVE_VERSION 1 241 - 242 - #define TARGET_PAGE_BITS 12 243 219 244 220 /* MMU modes definitions */ 245 221 #define MMU_MODE0_SUFFIX _kernel ··· 261 237 { 262 238 return env->regs[CR_STATUS] & CR_STATUS_PIE; 263 239 } 240 + 241 + typedef CPUNios2State CPUArchState; 242 + typedef Nios2CPU ArchCPU; 264 243 265 244 #include "exec/cpu-all.h" 266 245
+7 -7
target/nios2/mmu.c
··· 61 61 Nios2MMULookup *lu, 62 62 target_ulong vaddr, int rw, int mmu_idx) 63 63 { 64 - Nios2CPU *cpu = nios2_env_get_cpu(env); 64 + Nios2CPU *cpu = env_archcpu(env); 65 65 int pid = (env->mmu.tlbmisc_wr & CR_TLBMISC_PID_MASK) >> 4; 66 66 int vpn = vaddr >> 12; 67 67 ··· 102 102 103 103 static void mmu_flush_pid(CPUNios2State *env, uint32_t pid) 104 104 { 105 - CPUState *cs = ENV_GET_CPU(env); 106 - Nios2CPU *cpu = nios2_env_get_cpu(env); 105 + CPUState *cs = env_cpu(env); 106 + Nios2CPU *cpu = env_archcpu(env); 107 107 int idx; 108 108 MMU_LOG(qemu_log("TLB Flush PID %d\n", pid)); 109 109 ··· 126 126 127 127 void mmu_write(CPUNios2State *env, uint32_t rn, uint32_t v) 128 128 { 129 - CPUState *cs = ENV_GET_CPU(env); 130 - Nios2CPU *cpu = nios2_env_get_cpu(env); 129 + CPUState *cs = env_cpu(env); 130 + Nios2CPU *cpu = env_archcpu(env); 131 131 132 132 MMU_LOG(qemu_log("mmu_write %08X = %08X\n", rn, v)); 133 133 ··· 244 244 245 245 void mmu_init(CPUNios2State *env) 246 246 { 247 - Nios2CPU *cpu = nios2_env_get_cpu(env); 247 + Nios2CPU *cpu = env_archcpu(env); 248 248 Nios2MMU *mmu = &env->mmu; 249 249 250 250 MMU_LOG(qemu_log("mmu_init\n")); ··· 255 255 256 256 void dump_mmu(CPUNios2State *env) 257 257 { 258 - Nios2CPU *cpu = nios2_env_get_cpu(env); 258 + Nios2CPU *cpu = env_archcpu(env); 259 259 int i; 260 260 261 261 qemu_printf("MMU: ways %d, entries %d, pid bits %d\n",
+1 -1
target/nios2/op_helper.c
··· 46 46 47 47 void helper_raise_exception(CPUNios2State *env, uint32_t index) 48 48 { 49 - CPUState *cs = ENV_GET_CPU(env); 49 + CPUState *cs = env_cpu(env); 50 50 cs->exception_index = index; 51 51 cpu_loop_exit(cs); 52 52 }
+17
target/openrisc/cpu-param.h
··· 1 + /* 2 + * OpenRISC cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com> 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef OPENRISC_CPU_PARAM_H 9 + #define OPENRISC_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + #define TARGET_PAGE_BITS 13 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 14 + #define TARGET_VIRT_ADDR_SPACE_BITS 32 15 + #define NB_MMU_MODES 3 16 + 17 + #endif
+1 -2
target/openrisc/cpu.c
··· 92 92 93 93 static void openrisc_cpu_initfn(Object *obj) 94 94 { 95 - CPUState *cs = CPU(obj); 96 95 OpenRISCCPU *cpu = OPENRISC_CPU(obj); 97 96 98 - cs->env_ptr = &cpu->env; 97 + cpu_set_cpustate_pointers(cpu); 99 98 } 100 99 101 100 /* CPU models */
+7 -24
target/openrisc/cpu.h
··· 20 20 #ifndef OPENRISC_CPU_H 21 21 #define OPENRISC_CPU_H 22 22 23 - #define TARGET_LONG_BITS 32 24 - 25 - #define CPUArchState struct CPUOpenRISCState 26 - 27 - /* cpu_openrisc_map_address_* in CPUOpenRISCTLBContext need this decl. */ 28 - struct OpenRISCCPU; 29 - 30 23 #include "qemu-common.h" 31 24 #include "exec/cpu-defs.h" 32 25 #include "qom/cpu.h" 26 + 27 + /* cpu_openrisc_map_address_* in CPUOpenRISCTLBContext need this decl. */ 28 + struct OpenRISCCPU; 33 29 34 30 #define TYPE_OPENRISC_CPU "or1k-cpu" 35 31 ··· 56 52 void (*parent_reset)(CPUState *cpu); 57 53 } OpenRISCCPUClass; 58 54 59 - #define NB_MMU_MODES 3 60 55 #define TARGET_INSN_START_EXTRA_WORDS 1 61 56 62 57 enum { ··· 64 59 MMU_SUPERVISOR_IDX = 1, 65 60 MMU_USER_IDX = 2, 66 61 }; 67 - 68 - #define TARGET_PAGE_BITS 13 69 - 70 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 71 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 72 62 73 63 #define SET_FP_CAUSE(reg, v) do {\ 74 64 (reg) = ((reg) & ~(0x3f << 12)) | \ ··· 296 286 /* Fields up to this point are cleared by a CPU reset */ 297 287 struct {} end_reset_fields; 298 288 299 - CPU_COMMON 300 - 301 289 /* Fields from here on are preserved across CPU reset. */ 302 290 uint32_t cpucfgr; /* CPU configure register */ 303 291 ··· 323 311 CPUState parent_obj; 324 312 /*< public >*/ 325 313 314 + CPUNegativeOffsetState neg; 326 315 CPUOpenRISCState env; 327 - 328 316 } OpenRISCCPU; 329 317 330 - static inline OpenRISCCPU *openrisc_env_get_cpu(CPUOpenRISCState *env) 331 - { 332 - return container_of(env, OpenRISCCPU, env); 333 - } 334 - 335 - #define ENV_GET_CPU(e) CPU(openrisc_env_get_cpu(e)) 336 - 337 - #define ENV_OFFSET offsetof(OpenRISCCPU, env) 338 318 339 319 void cpu_openrisc_list(void); 340 320 void openrisc_cpu_do_interrupt(CPUState *cpu); ··· 372 352 #define OPENRISC_CPU_TYPE_SUFFIX "-" TYPE_OPENRISC_CPU 373 353 #define OPENRISC_CPU_TYPE_NAME(model) model OPENRISC_CPU_TYPE_SUFFIX 374 354 #define CPU_RESOLVING_TYPE TYPE_OPENRISC_CPU 355 + 356 + typedef CPUOpenRISCState CPUArchState; 357 + typedef OpenRISCCPU ArchCPU; 375 358 376 359 #include "exec/cpu-all.h" 377 360
+2 -3
target/openrisc/exception_helper.c
··· 25 25 26 26 void HELPER(exception)(CPUOpenRISCState *env, uint32_t excp) 27 27 { 28 - OpenRISCCPU *cpu = openrisc_env_get_cpu(env); 28 + OpenRISCCPU *cpu = env_archcpu(env); 29 29 30 30 raise_exception(cpu, excp); 31 31 } 32 32 33 33 static void QEMU_NORETURN do_range(CPUOpenRISCState *env, uintptr_t pc) 34 34 { 35 - OpenRISCCPU *cpu = openrisc_env_get_cpu(env); 36 - CPUState *cs = CPU(cpu); 35 + CPUState *cs = env_cpu(env); 37 36 38 37 cs->exception_index = EXCP_RANGE; 39 38 cpu_loop_exit_restore(cs, pc);
+4 -4
target/openrisc/sys_helper.c
··· 30 30 void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) 31 31 { 32 32 #ifndef CONFIG_USER_ONLY 33 - OpenRISCCPU *cpu = openrisc_env_get_cpu(env); 34 - CPUState *cs = CPU(cpu); 33 + OpenRISCCPU *cpu = env_archcpu(env); 34 + CPUState *cs = env_cpu(env); 35 35 target_ulong mr; 36 36 int idx; 37 37 ··· 194 194 target_ulong spr) 195 195 { 196 196 #ifndef CONFIG_USER_ONLY 197 - OpenRISCCPU *cpu = openrisc_env_get_cpu(env); 198 - CPUState *cs = CPU(cpu); 197 + OpenRISCCPU *cpu = env_archcpu(env); 198 + CPUState *cs = env_cpu(env); 199 199 int idx; 200 200 201 201 switch (spr) {
+37
target/ppc/cpu-param.h
··· 1 + /* 2 + * PowerPC cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2007 Jocelyn Mayer 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef PPC_CPU_PARAM_H 9 + #define PPC_CPU_PARAM_H 1 10 + 11 + #ifdef TARGET_PPC64 12 + # define TARGET_LONG_BITS 64 13 + /* 14 + * Note that the official physical address space bits is 62-M where M 15 + * is implementation dependent. I've not looked up M for the set of 16 + * cpus we emulate at the system level. 17 + */ 18 + #define TARGET_PHYS_ADDR_SPACE_BITS 62 19 + /* 20 + * Note that the PPC environment architecture talks about 80 bit virtual 21 + * addresses, with segmentation. Obviously that's not all visible to a 22 + * single process, which is all we're concerned with here. 23 + */ 24 + # ifdef TARGET_ABI32 25 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 26 + # else 27 + # define TARGET_VIRT_ADDR_SPACE_BITS 64 28 + # endif 29 + #else 30 + # define TARGET_LONG_BITS 32 31 + # define TARGET_PHYS_ADDR_SPACE_BITS 36 32 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 33 + #endif 34 + #define TARGET_PAGE_BITS 12 35 + #define NB_MMU_MODES 10 36 + 37 + #endif
+10 -51
target/ppc/cpu.h
··· 22 22 23 23 #include "qemu-common.h" 24 24 #include "qemu/int128.h" 25 + #include "exec/cpu-defs.h" 26 + #include "cpu-qom.h" 27 + #include "exec/cpu-defs.h" 28 + #include "cpu-qom.h" 25 29 26 30 /* #define PPC_EMULATE_32BITS_HYPV */ 27 31 28 - #if defined(TARGET_PPC64) 29 - /* PowerPC 64 definitions */ 30 - #define TARGET_LONG_BITS 64 31 - #define TARGET_PAGE_BITS 12 32 - 33 32 #define TCG_GUEST_DEFAULT_MO 0 34 33 35 - /* 36 - * Note that the official physical address space bits is 62-M where M 37 - * is implementation dependent. I've not looked up M for the set of 38 - * cpus we emulate at the system level. 39 - */ 40 - #define TARGET_PHYS_ADDR_SPACE_BITS 62 41 - 42 - /* 43 - * Note that the PPC environment architecture talks about 80 bit 44 - * virtual addresses, with segmentation. Obviously that's not all 45 - * visible to a single process, which is all we're concerned with 46 - * here. 47 - */ 48 - #ifdef TARGET_ABI32 49 - # define TARGET_VIRT_ADDR_SPACE_BITS 32 50 - #else 51 - # define TARGET_VIRT_ADDR_SPACE_BITS 64 52 - #endif 53 - 54 34 #define TARGET_PAGE_BITS_64K 16 55 35 #define TARGET_PAGE_BITS_16M 24 56 36 57 - #else /* defined(TARGET_PPC64) */ 58 - /* PowerPC 32 definitions */ 59 - #define TARGET_LONG_BITS 32 60 - #define TARGET_PAGE_BITS 12 61 - 62 - #define TARGET_PHYS_ADDR_SPACE_BITS 36 63 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 64 - 65 - #endif /* defined(TARGET_PPC64) */ 66 - 67 - #define CPUArchState struct CPUPPCState 68 - 69 - #include "exec/cpu-defs.h" 70 - #include "cpu-qom.h" 71 - 72 37 #if defined(TARGET_PPC64) 73 38 #define PPC_ELF_MACHINE EM_PPC64 74 39 #else ··· 974 939 * + real/paged mode combinations. The other two modes are for 975 940 * external PID load/store. 976 941 */ 977 - #define NB_MMU_MODES 10 978 942 #define MMU_MODE8_SUFFIX _epl 979 943 #define MMU_MODE9_SUFFIX _eps 980 944 #define PPC_TLB_EPID_LOAD 8 ··· 1034 998 1035 999 /* when a memory exception occurs, the access type is stored here */ 1036 1000 int access_type; 1037 - 1038 - CPU_COMMON 1039 1001 1040 1002 /* MMU context - only relevant for full system emulation */ 1041 1003 #if !defined(CONFIG_USER_ONLY) ··· 1220 1182 CPUState parent_obj; 1221 1183 /*< public >*/ 1222 1184 1185 + CPUNegativeOffsetState neg; 1223 1186 CPUPPCState env; 1187 + 1224 1188 int vcpu_id; 1225 1189 uint32_t compat_pvr; 1226 1190 PPCVirtualHypervisor *vhyp; ··· 1239 1203 int32_t mig_slb_nr; 1240 1204 }; 1241 1205 1242 - static inline PowerPCCPU *ppc_env_get_cpu(CPUPPCState *env) 1243 - { 1244 - return container_of(env, PowerPCCPU, env); 1245 - } 1246 - 1247 - #define ENV_GET_CPU(e) CPU(ppc_env_get_cpu(e)) 1248 - 1249 - #define ENV_OFFSET offsetof(PowerPCCPU, env) 1250 1206 1251 1207 PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr); 1252 1208 PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr); ··· 1410 1366 uint32_t *compat_pvr, const char *basedesc, 1411 1367 Error **errp); 1412 1368 #endif /* defined(TARGET_PPC64) */ 1369 + 1370 + typedef CPUPPCState CPUArchState; 1371 + typedef PowerPCCPU ArchCPU; 1413 1372 1414 1373 #include "exec/cpu-all.h" 1415 1374 ··· 2485 2444 } 2486 2445 } 2487 2446 2488 - cpu_abort(CPU(ppc_env_get_cpu(env)), "Unknown TLBe: %d\n", id); 2447 + cpu_abort(env_cpu(env), "Unknown TLBe: %d\n", id); 2489 2448 return 0; 2490 2449 } 2491 2450
+7 -7
target/ppc/excp_helper.c
··· 49 49 50 50 static void ppc_hw_interrupt(CPUPPCState *env) 51 51 { 52 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 52 + CPUState *cs = env_cpu(env); 53 53 54 54 cs->exception_index = POWERPC_EXCP_NONE; 55 55 env->error_code = 0; ··· 792 792 793 793 static void ppc_hw_interrupt(CPUPPCState *env) 794 794 { 795 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 795 + PowerPCCPU *cpu = env_archcpu(env); 796 796 bool async_deliver; 797 797 798 798 /* External reset */ ··· 931 931 * It generally means a discrepancy between the wakup conditions in the 932 932 * processor has_work implementation and the logic in this function. 933 933 */ 934 - cpu_abort(CPU(ppc_env_get_cpu(env)), 934 + cpu_abort(env_cpu(env), 935 935 "Wakeup from PM state but interrupt Undelivered"); 936 936 } 937 937 } ··· 974 974 void raise_exception_err_ra(CPUPPCState *env, uint32_t exception, 975 975 uint32_t error_code, uintptr_t raddr) 976 976 { 977 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 977 + CPUState *cs = env_cpu(env); 978 978 979 979 cs->exception_index = exception; 980 980 env->error_code = error_code; ··· 1015 1015 uint32_t excp = hreg_store_msr(env, val, 0); 1016 1016 1017 1017 if (excp != 0) { 1018 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 1018 + CPUState *cs = env_cpu(env); 1019 1019 cpu_interrupt_exittb(cs); 1020 1020 raise_exception(env, excp); 1021 1021 } ··· 1026 1026 { 1027 1027 CPUState *cs; 1028 1028 1029 - cs = CPU(ppc_env_get_cpu(env)); 1029 + cs = env_cpu(env); 1030 1030 cs->halted = 1; 1031 1031 1032 1032 /* ··· 1043 1043 1044 1044 static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) 1045 1045 { 1046 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 1046 + CPUState *cs = env_cpu(env); 1047 1047 1048 1048 /* MSR:POW cannot be set by any form of rfi */ 1049 1049 msr &= ~(1ULL << MSR_POW);
+7 -7
target/ppc/fpu_helper.c
··· 271 271 env->fpscr |= FP_FX; 272 272 /* We must update the target FPR before raising the exception */ 273 273 if (fpscr_ve != 0) { 274 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 274 + CPUState *cs = env_cpu(env); 275 275 276 276 cs->exception_index = POWERPC_EXCP_PROGRAM; 277 277 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC; ··· 315 315 316 316 static inline void float_overflow_excp(CPUPPCState *env) 317 317 { 318 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 318 + CPUState *cs = env_cpu(env); 319 319 320 320 env->fpscr |= 1 << FPSCR_OX; 321 321 /* Update the floating-point exception summary */ ··· 335 335 336 336 static inline void float_underflow_excp(CPUPPCState *env) 337 337 { 338 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 338 + CPUState *cs = env_cpu(env); 339 339 340 340 env->fpscr |= 1 << FPSCR_UX; 341 341 /* Update the floating-point exception summary */ ··· 352 352 353 353 static inline void float_inexact_excp(CPUPPCState *env) 354 354 { 355 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 355 + CPUState *cs = env_cpu(env); 356 356 357 357 env->fpscr |= 1 << FPSCR_FI; 358 358 env->fpscr |= 1 << FPSCR_XX; ··· 442 442 443 443 void helper_fpscr_setbit(CPUPPCState *env, uint32_t bit) 444 444 { 445 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 445 + CPUState *cs = env_cpu(env); 446 446 int prev; 447 447 448 448 prev = (env->fpscr >> bit) & 1; ··· 574 574 575 575 void helper_store_fpscr(CPUPPCState *env, uint64_t arg, uint32_t mask) 576 576 { 577 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 577 + CPUState *cs = env_cpu(env); 578 578 target_ulong prev, new; 579 579 int i; 580 580 ··· 612 612 613 613 static void do_float_check_status(CPUPPCState *env, uintptr_t raddr) 614 614 { 615 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 615 + CPUState *cs = env_cpu(env); 616 616 int status = get_float_exception_flags(&env->fp_status); 617 617 bool inexact_happened = false; 618 618
+2 -2
target/ppc/helper_regs.h
··· 116 116 { 117 117 int excp; 118 118 #if !defined(CONFIG_USER_ONLY) 119 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 119 + CPUState *cs = env_cpu(env); 120 120 #endif 121 121 122 122 excp = 0; ··· 175 175 #if !defined(CONFIG_USER_ONLY) 176 176 static inline void check_tlb_flush(CPUPPCState *env, bool global) 177 177 { 178 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 178 + CPUState *cs = env_cpu(env); 179 179 180 180 /* Handle global flushes first */ 181 181 if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) {
+2 -3
target/ppc/kvm.c
··· 1991 1991 } 1992 1992 1993 1993 static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo) 1994 - { 1995 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 1996 - CPUState *cs = CPU(cpu); 1994 + { 1995 + CPUState *cs = env_cpu(env); 1997 1996 1998 1997 if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) && 1999 1998 !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
+6 -16
target/ppc/misc_helper.c
··· 81 81 82 82 void helper_store_sdr1(CPUPPCState *env, target_ulong val) 83 83 { 84 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 85 - 86 84 if (env->spr[SPR_SDR1] != val) { 87 85 ppc_store_sdr1(env, val); 88 - tlb_flush(CPU(cpu)); 86 + tlb_flush(env_cpu(env)); 89 87 } 90 88 } 91 89 92 90 #if defined(TARGET_PPC64) 93 91 void helper_store_ptcr(CPUPPCState *env, target_ulong val) 94 92 { 95 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 96 - 97 93 if (env->spr[SPR_PTCR] != val) { 98 94 ppc_store_ptcr(env, val); 99 - tlb_flush(CPU(cpu)); 95 + tlb_flush(env_cpu(env)); 100 96 } 101 97 } 102 98 103 99 void helper_store_pcr(CPUPPCState *env, target_ulong value) 104 100 { 105 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 101 + PowerPCCPU *cpu = env_archcpu(env); 106 102 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 107 103 108 104 env->spr[SPR_PCR] = value & pcc->pcr_mask; ··· 111 107 112 108 void helper_store_pidr(CPUPPCState *env, target_ulong val) 113 109 { 114 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 115 - 116 110 env->spr[SPR_BOOKS_PID] = val; 117 - tlb_flush(CPU(cpu)); 111 + tlb_flush(env_cpu(env)); 118 112 } 119 113 120 114 void helper_store_lpidr(CPUPPCState *env, target_ulong val) 121 115 { 122 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 123 - 124 116 env->spr[SPR_LPIDR] = val; 125 117 126 118 /* ··· 129 121 * potentially access and cache entries for the current LPID as 130 122 * well. 131 123 */ 132 - tlb_flush(CPU(cpu)); 124 + tlb_flush(env_cpu(env)); 133 125 } 134 126 135 127 void helper_store_hid0_601(CPUPPCState *env, target_ulong val) ··· 151 143 152 144 void helper_store_403_pbr(CPUPPCState *env, uint32_t num, target_ulong value) 153 145 { 154 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 155 - 156 146 if (likely(env->pb[num] != value)) { 157 147 env->pb[num] = value; 158 148 /* Should be optimized */ 159 - tlb_flush(CPU(cpu)); 149 + tlb_flush(env_cpu(env)); 160 150 } 161 151 } 162 152
+7 -7
target/ppc/mmu-hash64.c
··· 96 96 97 97 void helper_slbia(CPUPPCState *env) 98 98 { 99 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 99 + PowerPCCPU *cpu = env_archcpu(env); 100 100 int n; 101 101 102 102 /* XXX: Warning: slbia never invalidates the first segment */ ··· 118 118 static void __helper_slbie(CPUPPCState *env, target_ulong addr, 119 119 target_ulong global) 120 120 { 121 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 121 + PowerPCCPU *cpu = env_archcpu(env); 122 122 ppc_slb_t *slb; 123 123 124 124 slb = slb_lookup(cpu, addr); ··· 251 251 252 252 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs) 253 253 { 254 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 254 + PowerPCCPU *cpu = env_archcpu(env); 255 255 256 256 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) { 257 257 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM, ··· 261 261 262 262 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb) 263 263 { 264 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 264 + PowerPCCPU *cpu = env_archcpu(env); 265 265 target_ulong rt = 0; 266 266 267 267 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) { ··· 273 273 274 274 target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb) 275 275 { 276 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 276 + PowerPCCPU *cpu = env_archcpu(env); 277 277 target_ulong rt = 0; 278 278 279 279 if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) { ··· 285 285 286 286 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb) 287 287 { 288 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 288 + PowerPCCPU *cpu = env_archcpu(env); 289 289 target_ulong rt = 0; 290 290 291 291 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) { ··· 1163 1163 1164 1164 void helper_store_lpcr(CPUPPCState *env, target_ulong val) 1165 1165 { 1166 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 1166 + PowerPCCPU *cpu = env_archcpu(env); 1167 1167 1168 1168 ppc_store_lpcr(cpu, val); 1169 1169 }
+48 -69
target/ppc/mmu_helper.c
··· 239 239 240 240 static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) 241 241 { 242 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 243 242 ppc6xx_tlb_t *tlb; 244 243 int nr, max; 245 244 ··· 253 252 tlb = &env->tlb.tlb6[nr]; 254 253 pte_invalidate(&tlb->pte0); 255 254 } 256 - tlb_flush(CPU(cpu)); 255 + tlb_flush(env_cpu(env)); 257 256 } 258 257 259 258 static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env, ··· 261 260 int is_code, int match_epn) 262 261 { 263 262 #if !defined(FLUSH_ALL_TLBS) 264 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 263 + CPUState *cs = env_cpu(env); 265 264 ppc6xx_tlb_t *tlb; 266 265 int way, nr; 267 266 ··· 474 473 static inline int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, 475 474 target_ulong eaddr, int rw, int type) 476 475 { 477 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 476 + PowerPCCPU *cpu = env_archcpu(env); 478 477 hwaddr hash; 479 478 target_ulong vsid; 480 479 int ds, pr, target_page_bits; ··· 522 521 ret = ppc6xx_tlb_check(env, ctx, eaddr, rw, type); 523 522 #if defined(DUMP_PAGE_TABLES) 524 523 if (qemu_loglevel_mask(CPU_LOG_MMU)) { 525 - CPUState *cs = ENV_GET_CPU(env); 524 + CPUState *cs = env_cpu(env); 526 525 hwaddr curaddr; 527 526 uint32_t a0, a1, a2, a3; 528 527 ··· 670 669 /* Helpers specific to PowerPC 40x implementations */ 671 670 static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env) 672 671 { 673 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 674 672 ppcemb_tlb_t *tlb; 675 673 int i; 676 674 ··· 678 676 tlb = &env->tlb.tlbe[i]; 679 677 tlb->prot &= ~PAGE_VALID; 680 678 } 681 - tlb_flush(CPU(cpu)); 679 + tlb_flush(env_cpu(env)); 682 680 } 683 681 684 682 static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, ··· 749 747 750 748 void store_40x_sler(CPUPPCState *env, uint32_t val) 751 749 { 752 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 753 - 754 750 /* XXX: TO BE FIXED */ 755 751 if (val != 0x00000000) { 756 - cpu_abort(CPU(cpu), "Little-endian regions are not supported by now\n"); 752 + cpu_abort(env_cpu(env), 753 + "Little-endian regions are not supported by now\n"); 757 754 } 758 755 env->spr[SPR_405_SLER] = val; 759 756 } ··· 863 860 static void booke206_flush_tlb(CPUPPCState *env, int flags, 864 861 const int check_iprot) 865 862 { 866 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 867 863 int tlb_size; 868 864 int i, j; 869 865 ppcmas_tlb_t *tlb = env->tlb.tlbm; ··· 880 876 tlb += booke206_tlb_size(env, i); 881 877 } 882 878 883 - tlb_flush(CPU(cpu)); 879 + tlb_flush(env_cpu(env)); 884 880 } 885 881 886 882 static hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ··· 1275 1271 1276 1272 static void mmu6xx_dump_mmu(CPUPPCState *env) 1277 1273 { 1278 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 1274 + PowerPCCPU *cpu = env_archcpu(env); 1279 1275 ppc6xx_tlb_t *tlb; 1280 1276 target_ulong sr; 1281 1277 int type, way, entry, i; ··· 1347 1343 case POWERPC_MMU_2_03: 1348 1344 case POWERPC_MMU_2_06: 1349 1345 case POWERPC_MMU_2_07: 1350 - dump_slb(ppc_env_get_cpu(env)); 1346 + dump_slb(env_archcpu(env)); 1351 1347 break; 1352 1348 case POWERPC_MMU_3_00: 1353 - if (ppc64_v3_radix(ppc_env_get_cpu(env))) { 1349 + if (ppc64_v3_radix(env_archcpu(env))) { 1354 1350 /* TODO - Unsupported */ 1355 1351 } else { 1356 - dump_slb(ppc_env_get_cpu(env)); 1352 + dump_slb(env_archcpu(env)); 1357 1353 break; 1358 1354 } 1359 1355 #endif ··· 1419 1415 target_ulong eaddr, int rw, int access_type, 1420 1416 int mmu_idx) 1421 1417 { 1422 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 1423 1418 int ret = -1; 1424 1419 bool real_mode = (access_type == ACCESS_CODE && msr_ir == 0) 1425 1420 || (access_type != ACCESS_CODE && msr_dr == 0); ··· 1460 1455 break; 1461 1456 case POWERPC_MMU_MPC8xx: 1462 1457 /* XXX: TODO */ 1463 - cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n"); 1458 + cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 1464 1459 break; 1465 1460 case POWERPC_MMU_REAL: 1466 1461 if (real_mode) { 1467 1462 ret = check_physical(env, ctx, eaddr, rw); 1468 1463 } else { 1469 - cpu_abort(CPU(cpu), 1464 + cpu_abort(env_cpu(env), 1470 1465 "PowerPC in real mode do not do any translation\n"); 1471 1466 } 1472 1467 return -1; 1473 1468 default: 1474 - cpu_abort(CPU(cpu), "Unknown or invalid MMU model\n"); 1469 + cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); 1475 1470 return -1; 1476 1471 } 1477 1472 ··· 1583 1578 static int cpu_ppc_handle_mmu_fault(CPUPPCState *env, target_ulong address, 1584 1579 int rw, int mmu_idx) 1585 1580 { 1586 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 1581 + CPUState *cs = env_cpu(env); 1587 1582 PowerPCCPU *cpu = POWERPC_CPU(cs); 1588 1583 mmu_ctx_t ctx; 1589 1584 int access_type; ··· 1815 1810 static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu, 1816 1811 target_ulong mask) 1817 1812 { 1818 - CPUState *cs = CPU(ppc_env_get_cpu(env)); 1813 + CPUState *cs = env_cpu(env); 1819 1814 target_ulong base, end, page; 1820 1815 1821 1816 base = BATu & ~0x0001FFFF; ··· 1847 1842 { 1848 1843 target_ulong mask; 1849 1844 #if defined(FLUSH_ALL_TLBS) 1850 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 1845 + PowerPCCPU *cpu = env_archcpu(env); 1851 1846 #endif 1852 1847 1853 1848 dump_store_bat(env, 'I', 0, nr, value); ··· 1868 1863 #if !defined(FLUSH_ALL_TLBS) 1869 1864 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1870 1865 #else 1871 - tlb_flush(CPU(cpu)); 1866 + tlb_flush(env_cpu(env)); 1872 1867 #endif 1873 1868 } 1874 1869 } ··· 1883 1878 { 1884 1879 target_ulong mask; 1885 1880 #if defined(FLUSH_ALL_TLBS) 1886 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 1881 + PowerPCCPU *cpu = env_archcpu(env); 1887 1882 #endif 1888 1883 1889 1884 dump_store_bat(env, 'D', 0, nr, value); ··· 1904 1899 #if !defined(FLUSH_ALL_TLBS) 1905 1900 do_invalidate_BAT(env, env->DBAT[0][nr], mask); 1906 1901 #else 1907 - tlb_flush(CPU(cpu)); 1902 + tlb_flush(env_cpu(env)); 1908 1903 #endif 1909 1904 } 1910 1905 } ··· 1919 1914 { 1920 1915 target_ulong mask; 1921 1916 #if defined(FLUSH_ALL_TLBS) 1922 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 1917 + PowerPCCPU *cpu = env_archcpu(env); 1923 1918 int do_inval; 1924 1919 #endif 1925 1920 ··· 1953 1948 } 1954 1949 #if defined(FLUSH_ALL_TLBS) 1955 1950 if (do_inval) { 1956 - tlb_flush(CPU(cpu)); 1951 + tlb_flush(env_cpu(env)); 1957 1952 } 1958 1953 #endif 1959 1954 } ··· 1964 1959 #if !defined(FLUSH_ALL_TLBS) 1965 1960 target_ulong mask; 1966 1961 #else 1967 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 1962 + PowerPCCPU *cpu = env_archcpu(env); 1968 1963 int do_inval; 1969 1964 #endif 1970 1965 ··· 1993 1988 env->DBAT[1][nr] = value; 1994 1989 #if defined(FLUSH_ALL_TLBS) 1995 1990 if (do_inval) { 1996 - tlb_flush(CPU(cpu)); 1991 + tlb_flush(env_cpu(env)); 1997 1992 } 1998 1993 #endif 1999 1994 } ··· 2003 1998 /* TLB management */ 2004 1999 void ppc_tlb_invalidate_all(CPUPPCState *env) 2005 2000 { 2006 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2007 - 2008 2001 #if defined(TARGET_PPC64) 2009 2002 if (env->mmu_model & POWERPC_MMU_64) { 2010 2003 env->tlb_need_flush = 0; 2011 - tlb_flush(CPU(cpu)); 2004 + tlb_flush(env_cpu(env)); 2012 2005 } else 2013 2006 #endif /* defined(TARGET_PPC64) */ 2014 2007 switch (env->mmu_model) { ··· 2021 2014 ppc4xx_tlb_invalidate_all(env); 2022 2015 break; 2023 2016 case POWERPC_MMU_REAL: 2024 - cpu_abort(CPU(cpu), "No TLB for PowerPC 4xx in real mode\n"); 2017 + cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n"); 2025 2018 break; 2026 2019 case POWERPC_MMU_MPC8xx: 2027 2020 /* XXX: TODO */ 2028 - cpu_abort(CPU(cpu), "MPC8xx MMU model is not implemented\n"); 2021 + cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); 2029 2022 break; 2030 2023 case POWERPC_MMU_BOOKE: 2031 - tlb_flush(CPU(cpu)); 2024 + tlb_flush(env_cpu(env)); 2032 2025 break; 2033 2026 case POWERPC_MMU_BOOKE206: 2034 2027 booke206_flush_tlb(env, -1, 0); ··· 2036 2029 case POWERPC_MMU_32B: 2037 2030 case POWERPC_MMU_601: 2038 2031 env->tlb_need_flush = 0; 2039 - tlb_flush(CPU(cpu)); 2032 + tlb_flush(env_cpu(env)); 2040 2033 break; 2041 2034 default: 2042 2035 /* XXX: TODO */ 2043 - cpu_abort(CPU(cpu), "Unknown MMU model %x\n", env->mmu_model); 2036 + cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model); 2044 2037 break; 2045 2038 } 2046 2039 } ··· 2091 2084 /* Special registers manipulation */ 2092 2085 void ppc_store_sdr1(CPUPPCState *env, target_ulong value) 2093 2086 { 2094 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2087 + PowerPCCPU *cpu = env_archcpu(env); 2095 2088 qemu_log_mask(CPU_LOG_MMU, "%s: " TARGET_FMT_lx "\n", __func__, value); 2096 2089 assert(!cpu->vhyp); 2097 2090 #if defined(TARGET_PPC64) ··· 2118 2111 #if defined(TARGET_PPC64) 2119 2112 void ppc_store_ptcr(CPUPPCState *env, target_ulong value) 2120 2113 { 2121 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2114 + PowerPCCPU *cpu = env_archcpu(env); 2122 2115 target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS; 2123 2116 target_ulong patbsize = value & PTCR_PATS; 2124 2117 ··· 2163 2156 (int)srnum, value, env->sr[srnum]); 2164 2157 #if defined(TARGET_PPC64) 2165 2158 if (env->mmu_model & POWERPC_MMU_64) { 2166 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2159 + PowerPCCPU *cpu = env_archcpu(env); 2167 2160 uint64_t esid, vsid; 2168 2161 2169 2162 /* ESID = srnum */ ··· 2190 2183 page = (16 << 20) * srnum; 2191 2184 end = page + (16 << 20); 2192 2185 for (; page != end; page += TARGET_PAGE_SIZE) { 2193 - tlb_flush_page(CPU(cpu), page); 2186 + tlb_flush_page(env_cpu(env), page); 2194 2187 } 2195 2188 } 2196 2189 #else ··· 2212 2205 2213 2206 void helper_tlbiva(CPUPPCState *env, target_ulong addr) 2214 2207 { 2215 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2216 - 2217 2208 /* tlbiva instruction only exists on BookE */ 2218 2209 assert(env->mmu_model == POWERPC_MMU_BOOKE); 2219 2210 /* XXX: TODO */ 2220 - cpu_abort(CPU(cpu), "BookE MMU model is not implemented\n"); 2211 + cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n"); 2221 2212 } 2222 2213 2223 2214 /* Software driven TLBs management */ ··· 2433 2424 void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry, 2434 2425 target_ulong val) 2435 2426 { 2436 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2437 - CPUState *cs = CPU(cpu); 2427 + CPUState *cs = env_cpu(env); 2438 2428 ppcemb_tlb_t *tlb; 2439 2429 target_ulong page, end; 2440 2430 ··· 2529 2519 void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry, 2530 2520 target_ulong value) 2531 2521 { 2532 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2533 2522 ppcemb_tlb_t *tlb; 2534 2523 target_ulong EPN, RPN, size; 2535 2524 int do_flush_tlbs; ··· 2565 2554 } 2566 2555 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF; 2567 2556 if (do_flush_tlbs) { 2568 - tlb_flush(CPU(cpu)); 2557 + tlb_flush(env_cpu(env)); 2569 2558 } 2570 2559 break; 2571 2560 case 1: 2572 2561 RPN = value & 0xFFFFFC0F; 2573 2562 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN) { 2574 - tlb_flush(CPU(cpu)); 2563 + tlb_flush(env_cpu(env)); 2575 2564 } 2576 2565 tlb->RPN = RPN; 2577 2566 break; ··· 2665 2654 2666 2655 static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env) 2667 2656 { 2668 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2669 2657 uint32_t tlbncfg = 0; 2670 2658 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT; 2671 2659 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK); ··· 2675 2663 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb]; 2676 2664 2677 2665 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) { 2678 - cpu_abort(CPU(cpu), "we don't support HES yet\n"); 2666 + cpu_abort(env_cpu(env), "we don't support HES yet\n"); 2679 2667 } 2680 2668 2681 2669 return booke206_get_tlbm(env, tlb, ea, esel); ··· 2683 2671 2684 2672 void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid) 2685 2673 { 2686 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2687 - 2688 2674 env->spr[pidn] = pid; 2689 2675 /* changing PIDs mean we're in a different address space now */ 2690 - tlb_flush(CPU(cpu)); 2676 + tlb_flush(env_cpu(env)); 2691 2677 } 2692 2678 2693 2679 void helper_booke_set_eplc(CPUPPCState *env, target_ulong val) 2694 2680 { 2695 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2696 2681 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK; 2697 - tlb_flush_by_mmuidx(CPU(cpu), 1 << PPC_TLB_EPID_LOAD); 2682 + tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD); 2698 2683 } 2699 2684 void helper_booke_set_epsc(CPUPPCState *env, target_ulong val) 2700 2685 { 2701 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2702 2686 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK; 2703 - tlb_flush_by_mmuidx(CPU(cpu), 1 << PPC_TLB_EPID_STORE); 2687 + tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE); 2704 2688 } 2705 2689 2706 2690 static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb) 2707 2691 { 2708 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2709 - 2710 2692 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) { 2711 - tlb_flush_page(CPU(cpu), tlb->mas2 & MAS2_EPN_MASK); 2693 + tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK); 2712 2694 } else { 2713 - tlb_flush(CPU(cpu)); 2695 + tlb_flush(env_cpu(env)); 2714 2696 } 2715 2697 } 2716 2698 2717 2699 void helper_booke206_tlbwe(CPUPPCState *env) 2718 2700 { 2719 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2720 2701 uint32_t tlbncfg, tlbn; 2721 2702 ppcmas_tlb_t *tlb; 2722 2703 uint32_t size_tlb, size_ps; ··· 2770 2751 } 2771 2752 2772 2753 if (msr_gs) { 2773 - cpu_abort(CPU(cpu), "missing HV implementation\n"); 2754 + cpu_abort(env_cpu(env), "missing HV implementation\n"); 2774 2755 } 2775 2756 2776 2757 if (tlb->mas1 & MAS1_VALID) { ··· 2968 2949 2969 2950 void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address) 2970 2951 { 2971 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2972 2952 int i, j; 2973 2953 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); 2974 2954 ppcmas_tlb_t *tlb = env->tlb.tlbm; ··· 2985 2965 } 2986 2966 tlb += booke206_tlb_size(env, i); 2987 2967 } 2988 - tlb_flush(CPU(cpu)); 2968 + tlb_flush(env_cpu(env)); 2989 2969 } 2990 2970 2991 2971 void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address) 2992 2972 { 2993 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 2994 2973 int i, j; 2995 2974 ppcmas_tlb_t *tlb; 2996 2975 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID); ··· 3026 3005 tlb->mas1 &= ~MAS1_VALID; 3027 3006 } 3028 3007 } 3029 - tlb_flush(CPU(cpu)); 3008 + tlb_flush(env_cpu(env)); 3030 3009 } 3031 3010 3032 3011 void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
+43 -45
target/ppc/translate_init.inc.c
··· 3432 3432 env->dcache_line_size = 32; 3433 3433 env->icache_line_size = 32; 3434 3434 /* Allocate hardware IRQ controller */ 3435 - ppc40x_irq_init(ppc_env_get_cpu(env)); 3435 + ppc40x_irq_init(env_archcpu(env)); 3436 3436 3437 3437 SET_FIT_PERIOD(12, 16, 20, 24); 3438 3438 SET_WDT_PERIOD(16, 20, 24, 28); ··· 3486 3486 env->dcache_line_size = 32; 3487 3487 env->icache_line_size = 32; 3488 3488 /* Allocate hardware IRQ controller */ 3489 - ppc40x_irq_init(ppc_env_get_cpu(env)); 3489 + ppc40x_irq_init(env_archcpu(env)); 3490 3490 3491 3491 SET_FIT_PERIOD(12, 16, 20, 24); 3492 3492 SET_WDT_PERIOD(16, 20, 24, 28); ··· 3538 3538 env->dcache_line_size = 32; 3539 3539 env->icache_line_size = 32; 3540 3540 /* Allocate hardware IRQ controller */ 3541 - ppc40x_irq_init(ppc_env_get_cpu(env)); 3541 + ppc40x_irq_init(env_archcpu(env)); 3542 3542 3543 3543 SET_FIT_PERIOD(12, 16, 20, 24); 3544 3544 SET_WDT_PERIOD(16, 20, 24, 28); ··· 3597 3597 env->dcache_line_size = 32; 3598 3598 env->icache_line_size = 32; 3599 3599 /* Allocate hardware IRQ controller */ 3600 - ppc40x_irq_init(ppc_env_get_cpu(env)); 3600 + ppc40x_irq_init(env_archcpu(env)); 3601 3601 3602 3602 SET_FIT_PERIOD(8, 12, 16, 20); 3603 3603 SET_WDT_PERIOD(16, 20, 24, 28); ··· 3648 3648 env->dcache_line_size = 32; 3649 3649 env->icache_line_size = 32; 3650 3650 /* Allocate hardware IRQ controller */ 3651 - ppc40x_irq_init(ppc_env_get_cpu(env)); 3651 + ppc40x_irq_init(env_archcpu(env)); 3652 3652 3653 3653 SET_FIT_PERIOD(8, 12, 16, 20); 3654 3654 SET_WDT_PERIOD(16, 20, 24, 28); ··· 3714 3714 env->dcache_line_size = 32; 3715 3715 env->icache_line_size = 32; 3716 3716 /* Allocate hardware IRQ controller */ 3717 - ppc40x_irq_init(ppc_env_get_cpu(env)); 3717 + ppc40x_irq_init(env_archcpu(env)); 3718 3718 3719 3719 SET_FIT_PERIOD(8, 12, 16, 20); 3720 3720 SET_WDT_PERIOD(16, 20, 24, 28); ··· 3780 3780 env->dcache_line_size = 32; 3781 3781 env->icache_line_size = 32; 3782 3782 /* Allocate hardware IRQ controller */ 3783 - ppc40x_irq_init(ppc_env_get_cpu(env)); 3783 + ppc40x_irq_init(env_archcpu(env)); 3784 3784 3785 3785 SET_FIT_PERIOD(8, 12, 16, 20); 3786 3786 SET_WDT_PERIOD(16, 20, 24, 28); ··· 3878 3878 init_excp_BookE(env); 3879 3879 env->dcache_line_size = 32; 3880 3880 env->icache_line_size = 32; 3881 - ppc40x_irq_init(ppc_env_get_cpu(env)); 3881 + ppc40x_irq_init(env_archcpu(env)); 3882 3882 3883 3883 SET_FIT_PERIOD(12, 16, 20, 24); 3884 3884 SET_WDT_PERIOD(20, 24, 28, 32); ··· 4186 4186 init_excp_BookE(env); 4187 4187 env->dcache_line_size = 32; 4188 4188 env->icache_line_size = 32; 4189 - ppc40x_irq_init(ppc_env_get_cpu(env)); 4189 + ppc40x_irq_init(env_archcpu(env)); 4190 4190 4191 4191 SET_FIT_PERIOD(12, 16, 20, 24); 4192 4192 SET_WDT_PERIOD(20, 24, 28, 32); ··· 4392 4392 env->dcache_line_size = 32; 4393 4393 env->icache_line_size = 32; 4394 4394 /* Allocate hardware IRQ controller */ 4395 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 4395 + ppc6xx_irq_init(env_archcpu(env)); 4396 4396 } 4397 4397 4398 4398 POWERPC_FAMILY(G2)(ObjectClass *oc, void *data) ··· 4472 4472 env->dcache_line_size = 32; 4473 4473 env->icache_line_size = 32; 4474 4474 /* Allocate hardware IRQ controller */ 4475 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 4475 + ppc6xx_irq_init(env_archcpu(env)); 4476 4476 } 4477 4477 4478 4478 POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data) ··· 4727 4727 env->dcache_line_size = 32; 4728 4728 env->icache_line_size = 32; 4729 4729 /* Allocate hardware IRQ controller */ 4730 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 4730 + ppc6xx_irq_init(env_archcpu(env)); 4731 4731 } 4732 4732 4733 4733 POWERPC_FAMILY(e300)(ObjectClass *oc, void *data) ··· 4805 4805 4806 4806 static void init_proc_e500(CPUPPCState *env, int version) 4807 4807 { 4808 - PowerPCCPU *cpu = ppc_env_get_cpu(env); 4809 4808 uint32_t tlbncfg[2]; 4810 4809 uint64_t ivor_mask; 4811 4810 uint64_t ivpr_mask = 0xFFFF0000ULL; ··· 4877 4876 tlbncfg[1] = 0x40028040; 4878 4877 break; 4879 4878 default: 4880 - cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", 4879 + cpu_abort(env_cpu(env), "Unknown CPU: " TARGET_FMT_lx "\n", 4881 4880 env->spr[SPR_PVR]); 4882 4881 } 4883 4882 #endif ··· 4902 4901 l1cfg1 |= 0x0B83820; 4903 4902 break; 4904 4903 default: 4905 - cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", 4904 + cpu_abort(env_cpu(env), "Unknown CPU: " TARGET_FMT_lx "\n", 4906 4905 env->spr[SPR_PVR]); 4907 4906 } 4908 4907 gen_spr_BookE206(env, 0x000000DF, tlbncfg, mmucfg); ··· 5018 5017 5019 5018 init_excp_e200(env, ivpr_mask); 5020 5019 /* Allocate hardware IRQ controller */ 5021 - ppce500_irq_init(ppc_env_get_cpu(env)); 5020 + ppce500_irq_init(env_archcpu(env)); 5022 5021 } 5023 5022 5024 5023 static void init_proc_e500v1(CPUPPCState *env) ··· 5291 5290 env->dcache_line_size = 32; 5292 5291 env->icache_line_size = 64; 5293 5292 /* Allocate hardware IRQ controller */ 5294 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 5293 + ppc6xx_irq_init(env_archcpu(env)); 5295 5294 } 5296 5295 5297 5296 POWERPC_FAMILY(601)(ObjectClass *oc, void *data) ··· 5396 5395 env->dcache_line_size = 32; 5397 5396 env->icache_line_size = 32; 5398 5397 /* Allocate hardware IRQ controller */ 5399 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 5398 + ppc6xx_irq_init(env_archcpu(env)); 5400 5399 } 5401 5400 5402 5401 POWERPC_FAMILY(602)(ObjectClass *oc, void *data) ··· 5466 5465 env->dcache_line_size = 32; 5467 5466 env->icache_line_size = 32; 5468 5467 /* Allocate hardware IRQ controller */ 5469 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 5468 + ppc6xx_irq_init(env_archcpu(env)); 5470 5469 } 5471 5470 5472 5471 POWERPC_FAMILY(603)(ObjectClass *oc, void *data) ··· 5533 5532 env->dcache_line_size = 32; 5534 5533 env->icache_line_size = 32; 5535 5534 /* Allocate hardware IRQ controller */ 5536 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 5535 + ppc6xx_irq_init(env_archcpu(env)); 5537 5536 } 5538 5537 5539 5538 POWERPC_FAMILY(603E)(ObjectClass *oc, void *data) ··· 5594 5593 env->dcache_line_size = 32; 5595 5594 env->icache_line_size = 32; 5596 5595 /* Allocate hardware IRQ controller */ 5597 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 5596 + ppc6xx_irq_init(env_archcpu(env)); 5598 5597 } 5599 5598 5600 5599 POWERPC_FAMILY(604)(ObjectClass *oc, void *data) ··· 5678 5677 env->dcache_line_size = 32; 5679 5678 env->icache_line_size = 32; 5680 5679 /* Allocate hardware IRQ controller */ 5681 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 5680 + ppc6xx_irq_init(env_archcpu(env)); 5682 5681 } 5683 5682 5684 5683 POWERPC_FAMILY(604E)(ObjectClass *oc, void *data) ··· 5749 5748 env->dcache_line_size = 32; 5750 5749 env->icache_line_size = 32; 5751 5750 /* Allocate hardware IRQ controller */ 5752 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 5751 + ppc6xx_irq_init(env_archcpu(env)); 5753 5752 } 5754 5753 5755 5754 POWERPC_FAMILY(740)(ObjectClass *oc, void *data) ··· 5829 5828 env->dcache_line_size = 32; 5830 5829 env->icache_line_size = 32; 5831 5830 /* Allocate hardware IRQ controller */ 5832 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 5831 + ppc6xx_irq_init(env_archcpu(env)); 5833 5832 } 5834 5833 5835 5834 POWERPC_FAMILY(750)(ObjectClass *oc, void *data) ··· 5993 5992 env->dcache_line_size = 32; 5994 5993 env->icache_line_size = 32; 5995 5994 /* Allocate hardware IRQ controller */ 5996 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 5995 + ppc6xx_irq_init(env_archcpu(env)); 5997 5996 } 5998 5997 5999 5998 POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data) ··· 6115 6114 env->dcache_line_size = 32; 6116 6115 env->icache_line_size = 32; 6117 6116 /* Allocate hardware IRQ controller */ 6118 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6117 + ppc6xx_irq_init(env_archcpu(env)); 6119 6118 } 6120 6119 6121 6120 POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data) ··· 6203 6202 env->dcache_line_size = 32; 6204 6203 env->icache_line_size = 32; 6205 6204 /* Allocate hardware IRQ controller */ 6206 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6205 + ppc6xx_irq_init(env_archcpu(env)); 6207 6206 } 6208 6207 6209 6208 POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data) ··· 6291 6290 env->dcache_line_size = 32; 6292 6291 env->icache_line_size = 32; 6293 6292 /* Allocate hardware IRQ controller */ 6294 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6293 + ppc6xx_irq_init(env_archcpu(env)); 6295 6294 } 6296 6295 6297 6296 POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data) ··· 6370 6369 env->dcache_line_size = 32; 6371 6370 env->icache_line_size = 32; 6372 6371 /* Allocate hardware IRQ controller */ 6373 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6372 + ppc6xx_irq_init(env_archcpu(env)); 6374 6373 } 6375 6374 6376 6375 POWERPC_FAMILY(745)(ObjectClass *oc, void *data) ··· 6457 6456 env->dcache_line_size = 32; 6458 6457 env->icache_line_size = 32; 6459 6458 /* Allocate hardware IRQ controller */ 6460 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6459 + ppc6xx_irq_init(env_archcpu(env)); 6461 6460 } 6462 6461 6463 6462 POWERPC_FAMILY(755)(ObjectClass *oc, void *data) ··· 6527 6526 env->dcache_line_size = 32; 6528 6527 env->icache_line_size = 32; 6529 6528 /* Allocate hardware IRQ controller */ 6530 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6529 + ppc6xx_irq_init(env_archcpu(env)); 6531 6530 } 6532 6531 6533 6532 POWERPC_FAMILY(7400)(ObjectClass *oc, void *data) ··· 6612 6611 env->dcache_line_size = 32; 6613 6612 env->icache_line_size = 32; 6614 6613 /* Allocate hardware IRQ controller */ 6615 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6614 + ppc6xx_irq_init(env_archcpu(env)); 6616 6615 } 6617 6616 6618 6617 POWERPC_FAMILY(7410)(ObjectClass *oc, void *data) ··· 6723 6722 env->dcache_line_size = 32; 6724 6723 env->icache_line_size = 32; 6725 6724 /* Allocate hardware IRQ controller */ 6726 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6725 + ppc6xx_irq_init(env_archcpu(env)); 6727 6726 } 6728 6727 6729 6728 POWERPC_FAMILY(7440)(ObjectClass *oc, void *data) ··· 6857 6856 env->dcache_line_size = 32; 6858 6857 env->icache_line_size = 32; 6859 6858 /* Allocate hardware IRQ controller */ 6860 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6859 + ppc6xx_irq_init(env_archcpu(env)); 6861 6860 } 6862 6861 6863 6862 POWERPC_FAMILY(7450)(ObjectClass *oc, void *data) ··· 6994 6993 env->dcache_line_size = 32; 6995 6994 env->icache_line_size = 32; 6996 6995 /* Allocate hardware IRQ controller */ 6997 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 6996 + ppc6xx_irq_init(env_archcpu(env)); 6998 6997 } 6999 6998 7000 6999 POWERPC_FAMILY(7445)(ObjectClass *oc, void *data) ··· 7133 7132 env->dcache_line_size = 32; 7134 7133 env->icache_line_size = 32; 7135 7134 /* Allocate hardware IRQ controller */ 7136 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 7135 + ppc6xx_irq_init(env_archcpu(env)); 7137 7136 } 7138 7137 7139 7138 POWERPC_FAMILY(7455)(ObjectClass *oc, void *data) ··· 7296 7295 env->dcache_line_size = 32; 7297 7296 env->icache_line_size = 32; 7298 7297 /* Allocate hardware IRQ controller */ 7299 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 7298 + ppc6xx_irq_init(env_archcpu(env)); 7300 7299 } 7301 7300 7302 7301 POWERPC_FAMILY(7457)(ObjectClass *oc, void *data) ··· 7434 7433 env->dcache_line_size = 32; 7435 7434 env->icache_line_size = 32; 7436 7435 /* Allocate hardware IRQ controller */ 7437 - ppc6xx_irq_init(ppc_env_get_cpu(env)); 7436 + ppc6xx_irq_init(env_archcpu(env)); 7438 7437 } 7439 7438 7440 7439 POWERPC_FAMILY(e600)(ObjectClass *oc, void *data) ··· 8298 8297 8299 8298 /* Allocate hardware IRQ controller */ 8300 8299 init_excp_970(env); 8301 - ppc970_irq_init(ppc_env_get_cpu(env)); 8300 + ppc970_irq_init(env_archcpu(env)); 8302 8301 } 8303 8302 8304 8303 POWERPC_FAMILY(970)(ObjectClass *oc, void *data) ··· 8372 8371 8373 8372 /* Allocate hardware IRQ controller */ 8374 8373 init_excp_970(env); 8375 - ppc970_irq_init(ppc_env_get_cpu(env)); 8374 + ppc970_irq_init(env_archcpu(env)); 8376 8375 } 8377 8376 8378 8377 POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data) ··· 8487 8486 8488 8487 /* Allocate hardware IRQ controller */ 8489 8488 init_excp_POWER7(env); 8490 - ppcPOWER7_irq_init(ppc_env_get_cpu(env)); 8489 + ppcPOWER7_irq_init(env_archcpu(env)); 8491 8490 } 8492 8491 8493 8492 static bool ppc_pvr_match_power7(PowerPCCPUClass *pcc, uint32_t pvr) ··· 8639 8638 8640 8639 /* Allocate hardware IRQ controller */ 8641 8640 init_excp_POWER8(env); 8642 - ppcPOWER7_irq_init(ppc_env_get_cpu(env)); 8641 + ppcPOWER7_irq_init(env_archcpu(env)); 8643 8642 } 8644 8643 8645 8644 static bool ppc_pvr_match_power8(PowerPCCPUClass *pcc, uint32_t pvr) ··· 8838 8837 8839 8838 /* Allocate hardware IRQ controller */ 8840 8839 init_excp_POWER9(env); 8841 - ppcPOWER9_irq_init(ppc_env_get_cpu(env)); 8840 + ppcPOWER9_irq_init(env_archcpu(env)); 8842 8841 } 8843 8842 8844 8843 static bool ppc_pvr_match_power9(PowerPCCPUClass *pcc, uint32_t pvr) ··· 10474 10473 10475 10474 static void ppc_cpu_instance_init(Object *obj) 10476 10475 { 10477 - CPUState *cs = CPU(obj); 10478 10476 PowerPCCPU *cpu = POWERPC_CPU(obj); 10479 10477 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 10480 10478 CPUPPCState *env = &cpu->env; 10481 10479 10482 - cs->env_ptr = env; 10480 + cpu_set_cpustate_pointers(cpu); 10483 10481 cpu->vcpu_id = UNASSIGNED_CPU_INDEX; 10484 10482 10485 10483 env->msr_mask = pcc->msr_mask;
+23
target/riscv/cpu-param.h
··· 1 + /* 2 + * RISC-V cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2017-2018 SiFive, Inc. 5 + * SPDX-License-Identifier: GPL-2.0+ 6 + */ 7 + 8 + #ifndef RISCV_CPU_PARAM_H 9 + #define RISCV_CPU_PARAM_H 1 10 + 11 + #if defined(TARGET_RISCV64) 12 + # define TARGET_LONG_BITS 64 13 + # define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */ 14 + # define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */ 15 + #elif defined(TARGET_RISCV32) 16 + # define TARGET_LONG_BITS 32 17 + # define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */ 18 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */ 19 + #endif 20 + #define TARGET_PAGE_BITS 12 /* 4 KiB Pages */ 21 + #define NB_MMU_MODES 4 22 + 23 + #endif
+1 -2
target/riscv/cpu.c
··· 367 367 368 368 static void riscv_cpu_init(Object *obj) 369 369 { 370 - CPUState *cs = CPU(obj); 371 370 RISCVCPU *cpu = RISCV_CPU(obj); 372 371 373 - cs->env_ptr = &cpu->env; 372 + cpu_set_cpustate_pointers(cpu); 374 373 } 375 374 376 375 static const VMStateDescription vmstate_riscv_cpu = {
+6 -28
target/riscv/cpu.h
··· 20 20 #ifndef RISCV_CPU_H 21 21 #define RISCV_CPU_H 22 22 23 - /* QEMU addressing/paging config */ 24 - #define TARGET_PAGE_BITS 12 /* 4 KiB Pages */ 25 - #if defined(TARGET_RISCV64) 26 - #define TARGET_LONG_BITS 64 27 - #define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */ 28 - #define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */ 29 - #elif defined(TARGET_RISCV32) 30 - #define TARGET_LONG_BITS 32 31 - #define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */ 32 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */ 33 - #endif 34 - 35 - #define TCG_GUEST_DEFAULT_MO 0 36 - 37 - #define CPUArchState struct CPURISCVState 38 - 39 23 #include "qemu-common.h" 40 24 #include "qom/cpu.h" 41 25 #include "exec/cpu-defs.h" 42 26 #include "fpu/softfloat.h" 43 27 28 + #define TCG_GUEST_DEFAULT_MO 0 29 + 44 30 #define TYPE_RISCV_CPU "riscv-cpu" 45 31 46 32 #define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU ··· 98 84 99 85 #define TRANSLATE_FAIL 1 100 86 #define TRANSLATE_SUCCESS 0 101 - #define NB_MMU_MODES 4 102 87 #define MMU_USER_IDX 3 103 88 104 89 #define MAX_RISCV_PMPS (16) ··· 185 170 #endif 186 171 187 172 float_status fp_status; 188 - 189 - /* QEMU */ 190 - CPU_COMMON 191 173 192 174 /* Fields from here on are preserved across CPU reset. */ 193 175 QEMUTimer *timer; /* Internal timer */ ··· 225 207 /*< private >*/ 226 208 CPUState parent_obj; 227 209 /*< public >*/ 210 + CPUNegativeOffsetState neg; 228 211 CPURISCVState env; 229 212 230 213 /* Configuration Settings */ ··· 236 219 } cfg; 237 220 } RISCVCPU; 238 221 239 - static inline RISCVCPU *riscv_env_get_cpu(CPURISCVState *env) 240 - { 241 - return container_of(env, RISCVCPU, env); 242 - } 243 - 244 222 static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) 245 223 { 246 224 return (env->misa & ext) != 0; ··· 258 236 extern const char * const riscv_fpr_regnames[]; 259 237 extern const char * const riscv_excp_names[]; 260 238 extern const char * const riscv_intr_names[]; 261 - 262 - #define ENV_GET_CPU(e) CPU(riscv_env_get_cpu(e)) 263 - #define ENV_OFFSET offsetof(RISCVCPU, env) 264 239 265 240 void riscv_cpu_do_interrupt(CPUState *cpu); 266 241 int riscv_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); ··· 348 323 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops); 349 324 350 325 void riscv_cpu_register_gdb_regs_for_features(CPUState *cs); 326 + 327 + typedef CPURISCVState CPUArchState; 328 + typedef RISCVCPU ArchCPU; 351 329 352 330 #include "exec/cpu-all.h" 353 331
+4 -6
target/riscv/cpu_helper.c
··· 89 89 static void riscv_cpu_update_mip_irqs_async(CPUState *target_cpu_state, 90 90 run_on_cpu_data data) 91 91 { 92 - CPURISCVState *env = &RISCV_CPU(target_cpu_state)->env; 93 - RISCVCPU *cpu = riscv_env_get_cpu(env); 94 92 struct CpuAsyncInfo *info = (struct CpuAsyncInfo *) data.host_ptr; 95 93 96 94 if (info->new_mip) { 97 - cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 95 + cpu_interrupt(target_cpu_state, CPU_INTERRUPT_HARD); 98 96 } else { 99 - cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); 97 + cpu_reset_interrupt(target_cpu_state, CPU_INTERRUPT_HARD); 100 98 } 101 99 102 100 g_free(info); ··· 212 210 } 213 211 } 214 212 215 - CPUState *cs = CPU(riscv_env_get_cpu(env)); 213 + CPUState *cs = env_cpu(env); 216 214 int va_bits = PGSHIFT + levels * ptidxbits; 217 215 target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; 218 216 target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask; ··· 341 339 static void raise_mmu_exception(CPURISCVState *env, target_ulong address, 342 340 MMUAccessType access_type) 343 341 { 344 - CPUState *cs = CPU(riscv_env_get_cpu(env)); 342 + CPUState *cs = env_cpu(env); 345 343 int page_fault_exceptions = 346 344 (env->priv_ver >= PRIV_VERSION_1_10_0) && 347 345 get_field(env->satp, SATP_MODE) != VM_1_10_MBARE;
+6 -6
target/riscv/csr.c
··· 296 296 if (env->priv_ver <= PRIV_VERSION_1_09_1) { 297 297 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | 298 298 MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_VM)) { 299 - tlb_flush(CPU(riscv_env_get_cpu(env))); 299 + tlb_flush(env_cpu(env)); 300 300 } 301 301 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 302 302 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | ··· 307 307 if (env->priv_ver >= PRIV_VERSION_1_10_0) { 308 308 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV | 309 309 MSTATUS_MPRV | MSTATUS_SUM)) { 310 - tlb_flush(CPU(riscv_env_get_cpu(env))); 310 + tlb_flush(env_cpu(env)); 311 311 } 312 312 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE | 313 313 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM | ··· 382 382 383 383 /* flush translation cache */ 384 384 if (val != env->misa) { 385 - tb_flush(CPU(riscv_env_get_cpu(env))); 385 + tb_flush(env_cpu(env)); 386 386 } 387 387 388 388 env->misa = val; ··· 549 549 static int rmw_mip(CPURISCVState *env, int csrno, target_ulong *ret_value, 550 550 target_ulong new_value, target_ulong write_mask) 551 551 { 552 - RISCVCPU *cpu = riscv_env_get_cpu(env); 552 + RISCVCPU *cpu = env_archcpu(env); 553 553 /* Allow software control of delegable interrupts not claimed by hardware */ 554 554 target_ulong mask = write_mask & delegable_ints & ~env->miclaim; 555 555 uint32_t old_mip; ··· 712 712 return 0; 713 713 } 714 714 if (env->priv_ver <= PRIV_VERSION_1_09_1 && (val ^ env->sptbr)) { 715 - tlb_flush(CPU(riscv_env_get_cpu(env))); 715 + tlb_flush(env_cpu(env)); 716 716 env->sptbr = val & (((target_ulong) 717 717 1 << (TARGET_PHYS_ADDR_SPACE_BITS - PGSHIFT)) - 1); 718 718 } ··· 724 724 return -1; 725 725 } else { 726 726 if((val ^ env->satp) & SATP_ASID) { 727 - tlb_flush(CPU(riscv_env_get_cpu(env))); 727 + tlb_flush(env_cpu(env)); 728 728 } 729 729 env->satp = val; 730 730 }
+3 -4
target/riscv/op_helper.c
··· 28 28 void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, 29 29 uint32_t exception, uintptr_t pc) 30 30 { 31 - CPUState *cs = CPU(riscv_env_get_cpu(env)); 31 + CPUState *cs = env_cpu(env); 32 32 qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception); 33 33 cs->exception_index = exception; 34 34 cpu_loop_exit_restore(cs, pc); ··· 128 128 129 129 void helper_wfi(CPURISCVState *env) 130 130 { 131 - CPUState *cs = CPU(riscv_env_get_cpu(env)); 131 + CPUState *cs = env_cpu(env); 132 132 133 133 if (env->priv == PRV_S && 134 134 env->priv_ver >= PRIV_VERSION_1_10_0 && ··· 143 143 144 144 void helper_tlb_flush(CPURISCVState *env) 145 145 { 146 - RISCVCPU *cpu = riscv_env_get_cpu(env); 147 - CPUState *cs = CPU(cpu); 146 + CPUState *cs = env_cpu(env); 148 147 if (!(env->priv >= PRV_S) || 149 148 (env->priv == PRV_S && 150 149 env->priv_ver >= PRIV_VERSION_1_10_0 &&
+2 -3
target/s390x/cc_helper.c
··· 419 419 static uint32_t do_calc_cc(CPUS390XState *env, uint32_t cc_op, 420 420 uint64_t src, uint64_t dst, uint64_t vr) 421 421 { 422 - S390CPU *cpu = s390_env_get_cpu(env); 423 422 uint32_t r = 0; 424 423 425 424 switch (cc_op) { ··· 543 542 break; 544 543 545 544 default: 546 - cpu_abort(CPU(cpu), "Unknown CC operation: %s\n", cc_name(cc_op)); 545 + cpu_abort(env_cpu(env), "Unknown CC operation: %s\n", cc_name(cc_op)); 547 546 } 548 547 549 548 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __func__, ··· 567 566 void HELPER(load_psw)(CPUS390XState *env, uint64_t mask, uint64_t addr) 568 567 { 569 568 load_psw(env, mask, addr); 570 - cpu_loop_exit(CPU(s390_env_get_cpu(env))); 569 + cpu_loop_exit(env_cpu(env)); 571 570 } 572 571 573 572 void HELPER(sacf)(CPUS390XState *env, uint64_t a1)
+17
target/s390x/cpu-param.h
··· 1 + /* 2 + * S/390 cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2009 Ulrich Hecht 5 + * SPDX-License-Identifier: GPL-2.0+ 6 + */ 7 + 8 + #ifndef S390_CPU_PARAM_H 9 + #define S390_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 64 12 + #define TARGET_PAGE_BITS 12 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 64 14 + #define TARGET_VIRT_ADDR_SPACE_BITS 64 15 + #define NB_MMU_MODES 4 16 + 17 + #endif
+5 -4
target/s390x/cpu.c
··· 285 285 { 286 286 CPUState *cs = CPU(obj); 287 287 S390CPU *cpu = S390_CPU(obj); 288 - CPUS390XState *env = &cpu->env; 289 288 290 - cs->env_ptr = env; 289 + cpu_set_cpustate_pointers(cpu); 291 290 cs->halted = 1; 292 291 cs->exception_index = EXCP_HLT; 293 292 object_property_add(obj, "crash-information", "GuestPanicInformation", 294 293 s390_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL); 295 294 s390_cpu_model_register_props(obj); 296 295 #if !defined(CONFIG_USER_ONLY) 297 - env->tod_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu); 298 - env->cpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); 296 + cpu->env.tod_timer = 297 + timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_tod_timer, cpu); 298 + cpu->env.cpu_timer = 299 + timer_new_ns(QEMU_CLOCK_VIRTUAL, s390x_cpu_timer, cpu); 299 300 s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu); 300 301 #endif 301 302 }
+7 -24
target/s390x/cpu.h
··· 24 24 #include "qemu-common.h" 25 25 #include "cpu-qom.h" 26 26 #include "cpu_models.h" 27 - 28 - #define TARGET_LONG_BITS 64 27 + #include "exec/cpu-defs.h" 29 28 30 29 #define ELF_MACHINE_UNAME "S390X" 31 30 32 - #define CPUArchState struct CPUS390XState 33 - 34 - #include "exec/cpu-defs.h" 35 - 36 31 /* The z/Architecture has a strong memory model with some store-after-load re-ordering */ 37 32 #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) 38 33 39 - #define TARGET_PAGE_BITS 12 40 - 41 - #define TARGET_PHYS_ADDR_SPACE_BITS 64 42 - #define TARGET_VIRT_ADDR_SPACE_BITS 64 43 - 44 - #include "exec/cpu-all.h" 45 - 46 - #define NB_MMU_MODES 4 47 34 #define TARGET_INSN_START_EXTRA_WORDS 1 48 35 49 36 #define MMU_MODE0_SUFFIX _primary ··· 127 114 /* Fields up to this point are cleared by a CPU reset */ 128 115 struct {} end_reset_fields; 129 116 130 - CPU_COMMON 131 - 132 117 #if !defined(CONFIG_USER_ONLY) 133 118 uint32_t core_id; /* PoP "CPU address", same as cpu_index */ 134 119 uint64_t cpuid; ··· 169 154 CPUState parent_obj; 170 155 /*< public >*/ 171 156 157 + CPUNegativeOffsetState neg; 172 158 CPUS390XState env; 173 159 S390CPUModel *model; 174 160 /* needed for live migration */ ··· 176 162 uint32_t irqstate_saved_size; 177 163 }; 178 164 179 - static inline S390CPU *s390_env_get_cpu(CPUS390XState *env) 180 - { 181 - return container_of(env, S390CPU, env); 182 - } 183 - 184 - #define ENV_GET_CPU(e) CPU(s390_env_get_cpu(e)) 185 - 186 - #define ENV_OFFSET offsetof(S390CPU, env) 187 165 188 166 #ifndef CONFIG_USER_ONLY 189 167 extern const struct VMStateDescription vmstate_s390_cpu; ··· 806 784 807 785 /* outside of target/s390x/ */ 808 786 S390CPU *s390_cpu_addr2state(uint16_t cpu_addr); 787 + 788 + typedef CPUS390XState CPUArchState; 789 + typedef S390CPU ArchCPU; 790 + 791 + #include "exec/cpu-all.h" 809 792 810 793 #endif
+1 -1
target/s390x/diag.c
··· 55 55 56 56 void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3, uintptr_t ra) 57 57 { 58 - CPUState *cs = CPU(s390_env_get_cpu(env)); 58 + CPUState *cs = env_cpu(env); 59 59 uint64_t addr = env->regs[r1]; 60 60 uint64_t subcode = env->regs[r3]; 61 61 IplParameterBlock *iplb;
+4 -4
target/s390x/excp_helper.c
··· 36 36 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code, 37 37 int ilen, uintptr_t ra) 38 38 { 39 - CPUState *cs = CPU(s390_env_get_cpu(env)); 39 + CPUState *cs = env_cpu(env); 40 40 41 41 cpu_restore_state(cs, ra, true); 42 42 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n", ··· 51 51 g_assert(dxc <= 0xff); 52 52 #if !defined(CONFIG_USER_ONLY) 53 53 /* Store the DXC into the lowcore */ 54 - stl_phys(CPU(s390_env_get_cpu(env))->as, 54 + stl_phys(env_cpu(env)->as, 55 55 env->psa + offsetof(LowCore, data_exc_code), dxc); 56 56 #endif 57 57 ··· 68 68 g_assert(vxc <= 0xff); 69 69 #if !defined(CONFIG_USER_ONLY) 70 70 /* Always store the VXC into the lowcore, without AFP it is undefined */ 71 - stl_phys(CPU(s390_env_get_cpu(env))->as, 71 + stl_phys(env_cpu(env)->as, 72 72 env->psa + offsetof(LowCore, data_exc_code), vxc); 73 73 #endif 74 74 ··· 297 297 static void do_ext_interrupt(CPUS390XState *env) 298 298 { 299 299 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic()); 300 - S390CPU *cpu = s390_env_get_cpu(env); 300 + S390CPU *cpu = env_archcpu(env); 301 301 uint64_t mask, addr; 302 302 uint16_t cpu_addr; 303 303 LowCore *lowcore;
+1 -3
target/s390x/fpu_helper.c
··· 114 114 115 115 int float_comp_to_cc(CPUS390XState *env, int float_compare) 116 116 { 117 - S390CPU *cpu = s390_env_get_cpu(env); 118 - 119 117 switch (float_compare) { 120 118 case float_relation_equal: 121 119 return 0; ··· 126 124 case float_relation_unordered: 127 125 return 3; 128 126 default: 129 - cpu_abort(CPU(cpu), "unknown return value for float compare\n"); 127 + cpu_abort(env_cpu(env), "unknown return value for float compare\n"); 130 128 } 131 129 } 132 130
+12 -12
target/s390x/gdbstub.c
··· 96 96 switch (n) { 97 97 case S390_A0_REGNUM ... S390_A15_REGNUM: 98 98 env->aregs[n] = ldl_p(mem_buf); 99 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 99 + cpu_synchronize_post_init(env_cpu(env)); 100 100 return 4; 101 101 default: 102 102 return 0; ··· 201 201 case S390_C0_REGNUM ... S390_C15_REGNUM: 202 202 env->cregs[n] = ldtul_p(mem_buf); 203 203 if (tcg_enabled()) { 204 - tlb_flush(ENV_GET_CPU(env)); 204 + tlb_flush(env_cpu(env)); 205 205 } 206 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 206 + cpu_synchronize_post_init(env_cpu(env)); 207 207 return 8; 208 208 default: 209 209 return 0; ··· 251 251 switch (n) { 252 252 case S390_VIRT_CKC_REGNUM: 253 253 env->ckc = ldtul_p(mem_buf); 254 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 254 + cpu_synchronize_post_init(env_cpu(env)); 255 255 return 8; 256 256 case S390_VIRT_CPUTM_REGNUM: 257 257 env->cputm = ldtul_p(mem_buf); 258 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 258 + cpu_synchronize_post_init(env_cpu(env)); 259 259 return 8; 260 260 case S390_VIRT_BEA_REGNUM: 261 261 env->gbea = ldtul_p(mem_buf); 262 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 262 + cpu_synchronize_post_init(env_cpu(env)); 263 263 return 8; 264 264 case S390_VIRT_PREFIX_REGNUM: 265 265 env->psa = ldtul_p(mem_buf); 266 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 266 + cpu_synchronize_post_init(env_cpu(env)); 267 267 return 8; 268 268 case S390_VIRT_PP_REGNUM: 269 269 env->pp = ldtul_p(mem_buf); 270 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 270 + cpu_synchronize_post_init(env_cpu(env)); 271 271 return 8; 272 272 case S390_VIRT_PFT_REGNUM: 273 273 env->pfault_token = ldtul_p(mem_buf); 274 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 274 + cpu_synchronize_post_init(env_cpu(env)); 275 275 return 8; 276 276 case S390_VIRT_PFS_REGNUM: 277 277 env->pfault_select = ldtul_p(mem_buf); 278 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 278 + cpu_synchronize_post_init(env_cpu(env)); 279 279 return 8; 280 280 case S390_VIRT_PFC_REGNUM: 281 281 env->pfault_compare = ldtul_p(mem_buf); 282 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 282 + cpu_synchronize_post_init(env_cpu(env)); 283 283 return 8; 284 284 default: 285 285 return 0; ··· 303 303 static int cpu_write_gs_reg(CPUS390XState *env, uint8_t *mem_buf, int n) 304 304 { 305 305 env->gscb[n] = ldtul_p(mem_buf); 306 - cpu_synchronize_post_init(ENV_GET_CPU(env)); 306 + cpu_synchronize_post_init(env_cpu(env)); 307 307 return 8; 308 308 } 309 309
+3 -4
target/s390x/helper.c
··· 111 111 env->cc_op = (mask >> 44) & 3; 112 112 113 113 if ((old_mask ^ mask) & PSW_MASK_PER) { 114 - s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env))); 114 + s390_cpu_recompute_watchpoints(env_cpu(env)); 115 115 } 116 116 117 117 if (mask & PSW_MASK_WAIT) { 118 - s390_handle_wait(s390_env_get_cpu(env)); 118 + s390_handle_wait(env_archcpu(env)); 119 119 } 120 120 } 121 121 ··· 137 137 138 138 LowCore *cpu_map_lowcore(CPUS390XState *env) 139 139 { 140 - S390CPU *cpu = s390_env_get_cpu(env); 141 140 LowCore *lowcore; 142 141 hwaddr len = sizeof(LowCore); 143 142 144 143 lowcore = cpu_physical_memory_map(env->psa, &len, 1); 145 144 146 145 if (len < sizeof(LowCore)) { 147 - cpu_abort(CPU(cpu), "Could not map lowcore\n"); 146 + cpu_abort(env_cpu(env), "Could not map lowcore\n"); 148 147 } 149 148 150 149 return lowcore;
+1 -2
target/s390x/int_helper.c
··· 109 109 s390_program_interrupt(env, PGM_FIXPT_DIVIDE, ILEN_AUTO, GETPC()); 110 110 } 111 111 #else 112 - S390CPU *cpu = s390_env_get_cpu(env); 113 112 /* 32-bit hosts would need special wrapper functionality - just abort if 114 113 we encounter such a case; it's very unlikely anyways. */ 115 - cpu_abort(CPU(cpu), "128 -> 64/64 division not implemented\n"); 114 + cpu_abort(env_cpu(env), "128 -> 64/64 division not implemented\n"); 116 115 #endif 117 116 } 118 117 return ret;
+2 -4
target/s390x/interrupt.c
··· 23 23 /* Ensure to exit the TB after this call! */ 24 24 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen) 25 25 { 26 - CPUState *cs = CPU(s390_env_get_cpu(env)); 26 + CPUState *cs = env_cpu(env); 27 27 28 28 cs->exception_index = EXCP_PGM; 29 29 env->int_pgm_code = code; ··· 33 33 void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen, 34 34 uintptr_t ra) 35 35 { 36 - S390CPU *cpu = s390_env_get_cpu(env); 37 - 38 36 if (kvm_enabled()) { 39 - kvm_s390_program_interrupt(cpu, code); 37 + kvm_s390_program_interrupt(env_archcpu(env), code); 40 38 } else if (tcg_enabled()) { 41 39 tcg_s390_program_interrupt(env, code, ilen, ra); 42 40 } else {
+12 -18
target/s390x/mem_helper.c
··· 1461 1461 #endif 1462 1462 if ((HAVE_CMPXCHG128 ? 0 : fc + 2 > max) || 1463 1463 (HAVE_ATOMIC128 ? 0 : sc > max)) { 1464 - cpu_loop_exit_atomic(ENV_GET_CPU(env), ra); 1464 + cpu_loop_exit_atomic(env_cpu(env), ra); 1465 1465 } 1466 1466 } 1467 1467 ··· 1617 1617 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) 1618 1618 { 1619 1619 uintptr_t ra = GETPC(); 1620 - S390CPU *cpu = s390_env_get_cpu(env); 1621 1620 bool PERchanged = false; 1622 1621 uint64_t src = a2; 1623 1622 uint32_t i; ··· 1642 1641 } 1643 1642 1644 1643 if (PERchanged && env->psw.mask & PSW_MASK_PER) { 1645 - s390_cpu_recompute_watchpoints(CPU(cpu)); 1644 + s390_cpu_recompute_watchpoints(env_cpu(env)); 1646 1645 } 1647 1646 1648 - tlb_flush(CPU(cpu)); 1647 + tlb_flush(env_cpu(env)); 1649 1648 } 1650 1649 1651 1650 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) 1652 1651 { 1653 1652 uintptr_t ra = GETPC(); 1654 - S390CPU *cpu = s390_env_get_cpu(env); 1655 1653 bool PERchanged = false; 1656 1654 uint64_t src = a2; 1657 1655 uint32_t i; ··· 1675 1673 } 1676 1674 1677 1675 if (PERchanged && env->psw.mask & PSW_MASK_PER) { 1678 - s390_cpu_recompute_watchpoints(CPU(cpu)); 1676 + s390_cpu_recompute_watchpoints(env_cpu(env)); 1679 1677 } 1680 1678 1681 - tlb_flush(CPU(cpu)); 1679 + tlb_flush(env_cpu(env)); 1682 1680 } 1683 1681 1684 1682 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3) ··· 1737 1735 1738 1736 uint32_t HELPER(tprot)(CPUS390XState *env, uint64_t a1, uint64_t a2) 1739 1737 { 1740 - S390CPU *cpu = s390_env_get_cpu(env); 1741 - CPUState *cs = CPU(cpu); 1738 + S390CPU *cpu = env_archcpu(env); 1739 + CPUState *cs = env_cpu(env); 1742 1740 1743 1741 /* 1744 1742 * TODO: we currently don't handle all access protection types ··· 1906 1904 1907 1905 void HELPER(idte)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint32_t m4) 1908 1906 { 1909 - CPUState *cs = CPU(s390_env_get_cpu(env)); 1907 + CPUState *cs = env_cpu(env); 1910 1908 const uintptr_t ra = GETPC(); 1911 1909 uint64_t table, entry, raddr; 1912 1910 uint16_t entries, i, index = 0; ··· 1958 1956 void HELPER(ipte)(CPUS390XState *env, uint64_t pto, uint64_t vaddr, 1959 1957 uint32_t m4) 1960 1958 { 1961 - CPUState *cs = CPU(s390_env_get_cpu(env)); 1959 + CPUState *cs = env_cpu(env); 1962 1960 const uintptr_t ra = GETPC(); 1963 1961 uint64_t page = vaddr & TARGET_PAGE_MASK; 1964 1962 uint64_t pte_addr, pte; ··· 1998 1996 /* flush local tlb */ 1999 1997 void HELPER(ptlb)(CPUS390XState *env) 2000 1998 { 2001 - S390CPU *cpu = s390_env_get_cpu(env); 2002 - 2003 - tlb_flush(CPU(cpu)); 1999 + tlb_flush(env_cpu(env)); 2004 2000 } 2005 2001 2006 2002 /* flush global tlb */ 2007 2003 void HELPER(purge)(CPUS390XState *env) 2008 2004 { 2009 - S390CPU *cpu = s390_env_get_cpu(env); 2010 - 2011 - tlb_flush_all_cpus_synced(CPU(cpu)); 2005 + tlb_flush_all_cpus_synced(env_cpu(env)); 2012 2006 } 2013 2007 2014 2008 /* load using real address */ ··· 2052 2046 /* load real address */ 2053 2047 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr) 2054 2048 { 2055 - CPUState *cs = CPU(s390_env_get_cpu(env)); 2049 + CPUState *cs = env_cpu(env); 2056 2050 uint32_t cc = 0; 2057 2051 uint64_t asc = env->psw.mask & PSW_MASK_ASC; 2058 2052 uint64_t ret;
+25 -25
target/s390x/misc_helper.c
··· 55 55 /* Raise an exception statically from a TB. */ 56 56 void HELPER(exception)(CPUS390XState *env, uint32_t excp) 57 57 { 58 - CPUState *cs = CPU(s390_env_get_cpu(env)); 58 + CPUState *cs = env_cpu(env); 59 59 60 60 HELPER_LOG("%s: exception %d\n", __func__, excp); 61 61 cs->exception_index = excp; ··· 150 150 /* Set Prefix */ 151 151 void HELPER(spx)(CPUS390XState *env, uint64_t a1) 152 152 { 153 - CPUState *cs = CPU(s390_env_get_cpu(env)); 153 + CPUState *cs = env_cpu(env); 154 154 uint32_t prefix = a1 & 0x7fffe000; 155 155 156 156 env->psa = prefix; ··· 256 256 const uint32_t sel2 = r1 & STSI_R1_SEL2_MASK; 257 257 const MachineState *ms = MACHINE(qdev_get_machine()); 258 258 uint16_t total_cpus = 0, conf_cpus = 0, reserved_cpus = 0; 259 - S390CPU *cpu = s390_env_get_cpu(env); 259 + S390CPU *cpu = env_archcpu(env); 260 260 SysIB sysib = { }; 261 261 int i, cc = 0; 262 262 ··· 411 411 #ifndef CONFIG_USER_ONLY 412 412 void HELPER(xsch)(CPUS390XState *env, uint64_t r1) 413 413 { 414 - S390CPU *cpu = s390_env_get_cpu(env); 414 + S390CPU *cpu = env_archcpu(env); 415 415 qemu_mutex_lock_iothread(); 416 416 ioinst_handle_xsch(cpu, r1, GETPC()); 417 417 qemu_mutex_unlock_iothread(); ··· 419 419 420 420 void HELPER(csch)(CPUS390XState *env, uint64_t r1) 421 421 { 422 - S390CPU *cpu = s390_env_get_cpu(env); 422 + S390CPU *cpu = env_archcpu(env); 423 423 qemu_mutex_lock_iothread(); 424 424 ioinst_handle_csch(cpu, r1, GETPC()); 425 425 qemu_mutex_unlock_iothread(); ··· 427 427 428 428 void HELPER(hsch)(CPUS390XState *env, uint64_t r1) 429 429 { 430 - S390CPU *cpu = s390_env_get_cpu(env); 430 + S390CPU *cpu = env_archcpu(env); 431 431 qemu_mutex_lock_iothread(); 432 432 ioinst_handle_hsch(cpu, r1, GETPC()); 433 433 qemu_mutex_unlock_iothread(); ··· 435 435 436 436 void HELPER(msch)(CPUS390XState *env, uint64_t r1, uint64_t inst) 437 437 { 438 - S390CPU *cpu = s390_env_get_cpu(env); 438 + S390CPU *cpu = env_archcpu(env); 439 439 qemu_mutex_lock_iothread(); 440 440 ioinst_handle_msch(cpu, r1, inst >> 16, GETPC()); 441 441 qemu_mutex_unlock_iothread(); ··· 443 443 444 444 void HELPER(rchp)(CPUS390XState *env, uint64_t r1) 445 445 { 446 - S390CPU *cpu = s390_env_get_cpu(env); 446 + S390CPU *cpu = env_archcpu(env); 447 447 qemu_mutex_lock_iothread(); 448 448 ioinst_handle_rchp(cpu, r1, GETPC()); 449 449 qemu_mutex_unlock_iothread(); ··· 451 451 452 452 void HELPER(rsch)(CPUS390XState *env, uint64_t r1) 453 453 { 454 - S390CPU *cpu = s390_env_get_cpu(env); 454 + S390CPU *cpu = env_archcpu(env); 455 455 qemu_mutex_lock_iothread(); 456 456 ioinst_handle_rsch(cpu, r1, GETPC()); 457 457 qemu_mutex_unlock_iothread(); ··· 459 459 460 460 void HELPER(sal)(CPUS390XState *env, uint64_t r1) 461 461 { 462 - S390CPU *cpu = s390_env_get_cpu(env); 462 + S390CPU *cpu = env_archcpu(env); 463 463 464 464 qemu_mutex_lock_iothread(); 465 465 ioinst_handle_sal(cpu, r1, GETPC()); ··· 468 468 469 469 void HELPER(schm)(CPUS390XState *env, uint64_t r1, uint64_t r2, uint64_t inst) 470 470 { 471 - S390CPU *cpu = s390_env_get_cpu(env); 471 + S390CPU *cpu = env_archcpu(env); 472 472 473 473 qemu_mutex_lock_iothread(); 474 474 ioinst_handle_schm(cpu, r1, r2, inst >> 16, GETPC()); ··· 477 477 478 478 void HELPER(ssch)(CPUS390XState *env, uint64_t r1, uint64_t inst) 479 479 { 480 - S390CPU *cpu = s390_env_get_cpu(env); 480 + S390CPU *cpu = env_archcpu(env); 481 481 qemu_mutex_lock_iothread(); 482 482 ioinst_handle_ssch(cpu, r1, inst >> 16, GETPC()); 483 483 qemu_mutex_unlock_iothread(); ··· 485 485 486 486 void HELPER(stcrw)(CPUS390XState *env, uint64_t inst) 487 487 { 488 - S390CPU *cpu = s390_env_get_cpu(env); 488 + S390CPU *cpu = env_archcpu(env); 489 489 490 490 qemu_mutex_lock_iothread(); 491 491 ioinst_handle_stcrw(cpu, inst >> 16, GETPC()); ··· 494 494 495 495 void HELPER(stsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) 496 496 { 497 - S390CPU *cpu = s390_env_get_cpu(env); 497 + S390CPU *cpu = env_archcpu(env); 498 498 qemu_mutex_lock_iothread(); 499 499 ioinst_handle_stsch(cpu, r1, inst >> 16, GETPC()); 500 500 qemu_mutex_unlock_iothread(); ··· 503 503 uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) 504 504 { 505 505 const uintptr_t ra = GETPC(); 506 - S390CPU *cpu = s390_env_get_cpu(env); 506 + S390CPU *cpu = env_archcpu(env); 507 507 QEMUS390FLICState *flic = s390_get_qemu_flic(s390_get_flic()); 508 508 QEMUS390FlicIO *io = NULL; 509 509 LowCore *lowcore; ··· 555 555 556 556 void HELPER(tsch)(CPUS390XState *env, uint64_t r1, uint64_t inst) 557 557 { 558 - S390CPU *cpu = s390_env_get_cpu(env); 558 + S390CPU *cpu = env_archcpu(env); 559 559 qemu_mutex_lock_iothread(); 560 560 ioinst_handle_tsch(cpu, r1, inst >> 16, GETPC()); 561 561 qemu_mutex_unlock_iothread(); ··· 563 563 564 564 void HELPER(chsc)(CPUS390XState *env, uint64_t inst) 565 565 { 566 - S390CPU *cpu = s390_env_get_cpu(env); 566 + S390CPU *cpu = env_archcpu(env); 567 567 qemu_mutex_lock_iothread(); 568 568 ioinst_handle_chsc(cpu, inst >> 16, GETPC()); 569 569 qemu_mutex_unlock_iothread(); ··· 618 618 /* If the instruction has to be nullified, trigger the 619 619 exception immediately. */ 620 620 if (env->cregs[9] & PER_CR9_EVENT_NULLIFICATION) { 621 - CPUState *cs = CPU(s390_env_get_cpu(env)); 621 + CPUState *cs = env_cpu(env); 622 622 623 623 env->per_perc_atmid |= PER_CODE_EVENT_NULLIFICATION; 624 624 env->int_pgm_code = PGM_PER; ··· 702 702 */ 703 703 void HELPER(clp)(CPUS390XState *env, uint32_t r2) 704 704 { 705 - S390CPU *cpu = s390_env_get_cpu(env); 705 + S390CPU *cpu = env_archcpu(env); 706 706 707 707 qemu_mutex_lock_iothread(); 708 708 clp_service_call(cpu, r2, GETPC()); ··· 711 711 712 712 void HELPER(pcilg)(CPUS390XState *env, uint32_t r1, uint32_t r2) 713 713 { 714 - S390CPU *cpu = s390_env_get_cpu(env); 714 + S390CPU *cpu = env_archcpu(env); 715 715 716 716 qemu_mutex_lock_iothread(); 717 717 pcilg_service_call(cpu, r1, r2, GETPC()); ··· 720 720 721 721 void HELPER(pcistg)(CPUS390XState *env, uint32_t r1, uint32_t r2) 722 722 { 723 - S390CPU *cpu = s390_env_get_cpu(env); 723 + S390CPU *cpu = env_archcpu(env); 724 724 725 725 qemu_mutex_lock_iothread(); 726 726 pcistg_service_call(cpu, r1, r2, GETPC()); ··· 730 730 void HELPER(stpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, 731 731 uint32_t ar) 732 732 { 733 - S390CPU *cpu = s390_env_get_cpu(env); 733 + S390CPU *cpu = env_archcpu(env); 734 734 735 735 qemu_mutex_lock_iothread(); 736 736 stpcifc_service_call(cpu, r1, fiba, ar, GETPC()); ··· 752 752 753 753 void HELPER(rpcit)(CPUS390XState *env, uint32_t r1, uint32_t r2) 754 754 { 755 - S390CPU *cpu = s390_env_get_cpu(env); 755 + S390CPU *cpu = env_archcpu(env); 756 756 757 757 qemu_mutex_lock_iothread(); 758 758 rpcit_service_call(cpu, r1, r2, GETPC()); ··· 762 762 void HELPER(pcistb)(CPUS390XState *env, uint32_t r1, uint32_t r3, 763 763 uint64_t gaddr, uint32_t ar) 764 764 { 765 - S390CPU *cpu = s390_env_get_cpu(env); 765 + S390CPU *cpu = env_archcpu(env); 766 766 767 767 qemu_mutex_lock_iothread(); 768 768 pcistb_service_call(cpu, r1, r3, gaddr, ar, GETPC()); ··· 772 772 void HELPER(mpcifc)(CPUS390XState *env, uint32_t r1, uint64_t fiba, 773 773 uint32_t ar) 774 774 { 775 - S390CPU *cpu = s390_env_get_cpu(env); 775 + S390CPU *cpu = env_archcpu(env); 776 776 777 777 qemu_mutex_lock_iothread(); 778 778 mpcifc_service_call(cpu, r1, fiba, ar, GETPC());
+4 -4
target/s390x/mmu_helper.c
··· 58 58 static void trigger_access_exception(CPUS390XState *env, uint32_t type, 59 59 uint32_t ilen, uint64_t tec) 60 60 { 61 - S390CPU *cpu = s390_env_get_cpu(env); 61 + S390CPU *cpu = env_archcpu(env); 62 62 63 63 if (kvm_enabled()) { 64 64 kvm_s390_access_exception(cpu, type, tec); 65 65 } else { 66 - CPUState *cs = CPU(cpu); 66 + CPUState *cs = env_cpu(env); 67 67 if (type != PGM_ADDRESSING) { 68 68 stq_phys(cs->as, env->psa + offsetof(LowCore, trans_exc_code), tec); 69 69 } ··· 185 185 target_ulong *raddr, int *flags, int rw, 186 186 bool exc) 187 187 { 188 - CPUState *cs = CPU(s390_env_get_cpu(env)); 188 + CPUState *cs = env_cpu(env); 189 189 uint64_t origin, offs, pt_entry; 190 190 191 191 if (st_entry & SEGMENT_ENTRY_RO) { ··· 214 214 target_ulong *raddr, int *flags, int rw, 215 215 bool exc) 216 216 { 217 - CPUState *cs = CPU(s390_env_get_cpu(env)); 217 + CPUState *cs = env_cpu(env); 218 218 uint64_t origin, offs, new_entry; 219 219 const int pchks[4] = { 220 220 PGM_SEGMENT_TRANS, PGM_REG_THIRD_TRANS,
+2 -2
target/s390x/sigp.c
··· 454 454 { 455 455 uint64_t *status_reg = &env->regs[r1]; 456 456 uint64_t param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1]; 457 - S390CPU *cpu = s390_env_get_cpu(env); 457 + S390CPU *cpu = env_archcpu(env); 458 458 S390CPU *dst_cpu = NULL; 459 459 int ret; 460 460 ··· 492 492 493 493 void do_stop_interrupt(CPUS390XState *env) 494 494 { 495 - S390CPU *cpu = s390_env_get_cpu(env); 495 + S390CPU *cpu = env_archcpu(env); 496 496 497 497 if (s390_cpu_set_state(S390_CPU_STATE_STOPPED, cpu) == 0) { 498 498 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+21
target/sh4/cpu-param.h
··· 1 + /* 2 + * SH4 cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2005 Samuel Tardieu 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef SH4_CPU_PARAM_H 9 + #define SH4_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + #define TARGET_PAGE_BITS 12 /* 4k */ 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 14 + #ifdef CONFIG_USER_ONLY 15 + # define TARGET_VIRT_ADDR_SPACE_BITS 31 16 + #else 17 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 18 + #endif 19 + #define NB_MMU_MODES 2 20 + 21 + #endif
+1 -2
target/sh4/cpu.c
··· 194 194 195 195 static void superh_cpu_initfn(Object *obj) 196 196 { 197 - CPUState *cs = CPU(obj); 198 197 SuperHCPU *cpu = SUPERH_CPU(obj); 199 198 CPUSH4State *env = &cpu->env; 200 199 201 - cs->env_ptr = env; 200 + cpu_set_cpustate_pointers(cpu); 202 201 203 202 env->movcal_backup_tail = &(env->movcal_backup); 204 203 }
+5 -25
target/sh4/cpu.h
··· 22 22 23 23 #include "qemu-common.h" 24 24 #include "cpu-qom.h" 25 + #include "exec/cpu-defs.h" 25 26 26 - #define TARGET_LONG_BITS 32 27 27 #define ALIGNED_ONLY 28 28 29 29 /* CPU Subtypes */ ··· 35 35 #define SH_CPU_SH7785 (1 << 5) 36 36 #define SH_CPU_SH7750_ALL (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7750R) 37 37 #define SH_CPU_SH7751_ALL (SH_CPU_SH7751 | SH_CPU_SH7751R) 38 - 39 - #define CPUArchState struct CPUSH4State 40 - 41 - #include "exec/cpu-defs.h" 42 - 43 - #define TARGET_PAGE_BITS 12 /* 4k XXXXX */ 44 - 45 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 46 - #ifdef CONFIG_USER_ONLY 47 - # define TARGET_VIRT_ADDR_SPACE_BITS 31 48 - #else 49 - # define TARGET_VIRT_ADDR_SPACE_BITS 32 50 - #endif 51 38 52 39 #define SR_MD 30 53 40 #define SR_RB 29 ··· 132 119 #define UTLB_SIZE 64 133 120 #define ITLB_SIZE 4 134 121 135 - #define NB_MMU_MODES 2 136 122 #define TARGET_INSN_START_EXTRA_WORDS 1 137 123 138 124 enum sh_features { ··· 193 179 /* Fields up to this point are cleared by a CPU reset */ 194 180 struct {} end_reset_fields; 195 181 196 - CPU_COMMON 197 - 198 182 /* Fields from here on are preserved over CPU reset. */ 199 183 int id; /* CPU model */ 200 184 ··· 218 202 CPUState parent_obj; 219 203 /*< public >*/ 220 204 205 + CPUNegativeOffsetState neg; 221 206 CPUSH4State env; 222 207 }; 223 208 224 - static inline SuperHCPU *sh_env_get_cpu(CPUSH4State *env) 225 - { 226 - return container_of(env, SuperHCPU, env); 227 - } 228 - 229 - #define ENV_GET_CPU(e) CPU(sh_env_get_cpu(e)) 230 - 231 - #define ENV_OFFSET offsetof(SuperHCPU, env) 232 209 233 210 void superh_cpu_do_interrupt(CPUState *cpu); 234 211 bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req); ··· 293 270 return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0; 294 271 } 295 272 } 273 + 274 + typedef CPUSH4State CPUArchState; 275 + typedef SuperHCPU ArchCPU; 296 276 297 277 #include "exec/cpu-all.h" 298 278
+12 -14
target/sh4/helper.c
··· 216 216 217 217 static int itlb_replacement(CPUSH4State * env) 218 218 { 219 - SuperHCPU *cpu = sh_env_get_cpu(env); 220 - 221 219 if ((env->mmucr & 0xe0000000) == 0xe0000000) { 222 220 return 0; 223 221 } ··· 230 228 if ((env->mmucr & 0x2c000000) == 0x00000000) { 231 229 return 3; 232 230 } 233 - cpu_abort(CPU(cpu), "Unhandled itlb_replacement"); 231 + cpu_abort(env_cpu(env), "Unhandled itlb_replacement"); 234 232 } 235 233 236 234 /* Find the corresponding entry in the right TLB ··· 286 284 itlb = itlb_replacement(env); 287 285 ientry = &env->itlb[itlb]; 288 286 if (ientry->v) { 289 - tlb_flush_page(CPU(sh_env_get_cpu(env)), ientry->vpn << 10); 287 + tlb_flush_page(env_cpu(env), ientry->vpn << 10); 290 288 } 291 289 *ientry = env->utlb[utlb]; 292 290 update_itlb_use(env, itlb); ··· 448 446 449 447 void cpu_load_tlb(CPUSH4State * env) 450 448 { 451 - SuperHCPU *cpu = sh_env_get_cpu(env); 449 + CPUState *cs = env_cpu(env); 452 450 int n = cpu_mmucr_urc(env->mmucr); 453 451 tlb_t * entry = &env->utlb[n]; 454 452 455 453 if (entry->v) { 456 454 /* Overwriting valid entry in utlb. */ 457 455 target_ulong address = entry->vpn << 10; 458 - tlb_flush_page(CPU(cpu), address); 456 + tlb_flush_page(cs, address); 459 457 } 460 458 461 459 /* Take values into cpu status from registers. */ ··· 478 476 entry->size = 1024 * 1024; /* 1M */ 479 477 break; 480 478 default: 481 - cpu_abort(CPU(cpu), "Unhandled load_tlb"); 479 + cpu_abort(cs, "Unhandled load_tlb"); 482 480 break; 483 481 } 484 482 entry->sh = (uint8_t)cpu_ptel_sh(env->ptel); ··· 505 503 entry->v = 0; 506 504 } 507 505 508 - tlb_flush(CPU(sh_env_get_cpu(s))); 506 + tlb_flush(env_cpu(s)); 509 507 } 510 508 511 509 uint32_t cpu_sh4_read_mmaped_itlb_addr(CPUSH4State *s, ··· 531 529 if (entry->v) { 532 530 /* Overwriting valid entry in itlb. */ 533 531 target_ulong address = entry->vpn << 10; 534 - tlb_flush_page(CPU(sh_env_get_cpu(s)), address); 532 + tlb_flush_page(env_cpu(s), address); 535 533 } 536 534 entry->asid = asid; 537 535 entry->vpn = vpn; ··· 573 571 if (entry->v) { 574 572 /* Overwriting valid entry in utlb. */ 575 573 target_ulong address = entry->vpn << 10; 576 - tlb_flush_page(CPU(sh_env_get_cpu(s)), address); 574 + tlb_flush_page(env_cpu(s), address); 577 575 } 578 576 entry->ppn = (mem_value & 0x1ffffc00) >> 10; 579 577 entry->v = (mem_value & 0x00000100) >> 8; ··· 626 624 if (entry->vpn == vpn 627 625 && (!use_asid || entry->asid == asid || entry->sh)) { 628 626 if (utlb_match_entry) { 629 - CPUState *cs = CPU(sh_env_get_cpu(s)); 627 + CPUState *cs = env_cpu(s); 630 628 631 629 /* Multiple TLB Exception */ 632 630 cs->exception_index = 0x140; ··· 658 656 } 659 657 660 658 if (needs_tlb_flush) { 661 - tlb_flush_page(CPU(sh_env_get_cpu(s)), vpn << 10); 659 + tlb_flush_page(env_cpu(s), vpn << 10); 662 660 } 663 661 } else { 664 662 int index = (addr & 0x00003f00) >> 8; 665 663 tlb_t * entry = &s->utlb[index]; 666 664 if (entry->v) { 667 - CPUState *cs = CPU(sh_env_get_cpu(s)); 665 + CPUState *cs = env_cpu(s); 668 666 669 667 /* Overwriting valid entry in utlb. */ 670 668 target_ulong address = entry->vpn << 10; ··· 719 717 if (entry->v) { 720 718 /* Overwriting valid entry in utlb. */ 721 719 target_ulong address = entry->vpn << 10; 722 - tlb_flush_page(CPU(sh_env_get_cpu(s)), address); 720 + tlb_flush_page(env_cpu(s), address); 723 721 } 724 722 entry->ppn = (mem_value & 0x1ffffc00) >> 10; 725 723 entry->v = (mem_value & 0x00000100) >> 8;
+4 -7
target/sh4/op_helper.c
··· 46 46 void helper_ldtlb(CPUSH4State *env) 47 47 { 48 48 #ifdef CONFIG_USER_ONLY 49 - SuperHCPU *cpu = sh_env_get_cpu(env); 50 - 51 - /* XXXXX */ 52 - cpu_abort(CPU(cpu), "Unhandled ldtlb"); 49 + cpu_abort(env_cpu(env), "Unhandled ldtlb"); 53 50 #else 54 51 cpu_load_tlb(env); 55 52 #endif ··· 58 55 static inline void QEMU_NORETURN raise_exception(CPUSH4State *env, int index, 59 56 uintptr_t retaddr) 60 57 { 61 - CPUState *cs = CPU(sh_env_get_cpu(env)); 58 + CPUState *cs = env_cpu(env); 62 59 63 60 cs->exception_index = index; 64 61 cpu_loop_exit_restore(cs, retaddr); ··· 91 88 92 89 void helper_sleep(CPUSH4State *env) 93 90 { 94 - CPUState *cs = CPU(sh_env_get_cpu(env)); 91 + CPUState *cs = env_cpu(env); 95 92 96 93 cs->halted = 1; 97 94 env->in_sleep = 1; ··· 107 104 void helper_exclusive(CPUSH4State *env) 108 105 { 109 106 /* We do not want cpu_restore_state to run. */ 110 - cpu_loop_exit_atomic(ENV_GET_CPU(env), 0); 107 + cpu_loop_exit_atomic(env_cpu(env), 0); 111 108 } 112 109 113 110 void helper_movcal(CPUSH4State *env, uint32_t address, uint32_t value)
+28
target/sparc/cpu-param.h
··· 1 + /* 2 + * Sparc cpu parameters for qemu. 3 + * 4 + * SPDX-License-Identifier: LGPL-2.0+ 5 + */ 6 + 7 + #ifndef SPARC_CPU_PARAM_H 8 + #define SPARC_CPU_PARAM_H 1 9 + 10 + #ifdef TARGET_SPARC64 11 + # define TARGET_LONG_BITS 64 12 + # define TARGET_PAGE_BITS 13 /* 8k */ 13 + # define TARGET_PHYS_ADDR_SPACE_BITS 41 14 + # ifdef TARGET_ABI32 15 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 16 + # else 17 + # define TARGET_VIRT_ADDR_SPACE_BITS 44 18 + # endif 19 + # define NB_MMU_MODES 6 20 + #else 21 + # define TARGET_LONG_BITS 32 22 + # define TARGET_PAGE_BITS 12 /* 4k */ 23 + # define TARGET_PHYS_ADDR_SPACE_BITS 36 24 + # define TARGET_VIRT_ADDR_SPACE_BITS 32 25 + # define NB_MMU_MODES 3 26 + #endif 27 + 28 + #endif
+1 -2
target/sparc/cpu.c
··· 774 774 775 775 static void sparc_cpu_initfn(Object *obj) 776 776 { 777 - CPUState *cs = CPU(obj); 778 777 SPARCCPU *cpu = SPARC_CPU(obj); 779 778 SPARCCPUClass *scc = SPARC_CPU_GET_CLASS(obj); 780 779 CPUSPARCState *env = &cpu->env; 781 780 782 - cs->env_ptr = env; 781 + cpu_set_cpustate_pointers(cpu); 783 782 784 783 if (scc->cpu_def) { 785 784 env->def = *scc->cpu_def;
+6 -30
target/sparc/cpu.h
··· 4 4 #include "qemu-common.h" 5 5 #include "qemu/bswap.h" 6 6 #include "cpu-qom.h" 7 + #include "exec/cpu-defs.h" 7 8 8 9 #define ALIGNED_ONLY 9 10 10 11 #if !defined(TARGET_SPARC64) 11 - #define TARGET_LONG_BITS 32 12 12 #define TARGET_DPREGS 16 13 - #define TARGET_PAGE_BITS 12 /* 4k */ 14 - #define TARGET_PHYS_ADDR_SPACE_BITS 36 15 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 16 13 #else 17 - #define TARGET_LONG_BITS 64 18 14 #define TARGET_DPREGS 32 19 - #define TARGET_PAGE_BITS 13 /* 8k */ 20 - #define TARGET_PHYS_ADDR_SPACE_BITS 41 21 - # ifdef TARGET_ABI32 22 - # define TARGET_VIRT_ADDR_SPACE_BITS 32 23 - # else 24 - # define TARGET_VIRT_ADDR_SPACE_BITS 44 25 - # endif 26 15 #endif 27 - 28 - #define CPUArchState struct CPUSPARCState 29 - 30 - #include "exec/cpu-defs.h" 31 16 32 17 /*#define EXCP_INTERRUPT 0x100*/ 33 18 ··· 225 210 #define MIN_NWINDOWS 3 226 211 #define MAX_NWINDOWS 32 227 212 228 - #if !defined(TARGET_SPARC64) 229 - #define NB_MMU_MODES 3 230 - #else 231 - #define NB_MMU_MODES 6 213 + #ifdef TARGET_SPARC64 232 214 typedef struct trap_state { 233 215 uint64_t tpc; 234 216 uint64_t tnpc; ··· 464 446 /* Fields up to this point are cleared by a CPU reset */ 465 447 struct {} end_reset_fields; 466 448 467 - CPU_COMMON 468 - 469 449 /* Fields from here on are preserved across CPU reset. */ 470 450 target_ulong version; 471 451 uint32_t nwindows; ··· 547 527 CPUState parent_obj; 548 528 /*< public >*/ 549 529 530 + CPUNegativeOffsetState neg; 550 531 CPUSPARCState env; 551 532 }; 552 533 553 - static inline SPARCCPU *sparc_env_get_cpu(CPUSPARCState *env) 554 - { 555 - return container_of(env, SPARCCPU, env); 556 - } 557 - 558 - #define ENV_GET_CPU(e) CPU(sparc_env_get_cpu(e)) 559 - 560 - #define ENV_OFFSET offsetof(SPARCCPU, env) 561 534 562 535 #ifndef CONFIG_USER_ONLY 563 536 extern const struct VMStateDescription vmstate_sparc_cpu; ··· 746 719 return pil > env1->psrpil; 747 720 #endif 748 721 } 722 + 723 + typedef CPUSPARCState CPUArchState; 724 + typedef SPARCCPU ArchCPU; 749 725 750 726 #include "exec/cpu-all.h" 751 727
+1 -1
target/sparc/fop_helper.c
··· 53 53 } 54 54 55 55 if ((fsr & FSR_CEXC_MASK) & ((fsr & FSR_TEM_MASK) >> 23)) { 56 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 56 + CPUState *cs = env_cpu(env); 57 57 58 58 /* Unmasked exception, generate a trap. Note that while 59 59 the helper is marked as NO_WG, we can get away with
+4 -4
target/sparc/helper.c
··· 26 26 27 27 void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra) 28 28 { 29 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 29 + CPUState *cs = env_cpu(env); 30 30 31 31 cs->exception_index = tt; 32 32 cpu_loop_exit_restore(cs, ra); ··· 34 34 35 35 void helper_raise_exception(CPUSPARCState *env, int tt) 36 36 { 37 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 37 + CPUState *cs = env_cpu(env); 38 38 39 39 cs->exception_index = tt; 40 40 cpu_loop_exit(cs); ··· 42 42 43 43 void helper_debug(CPUSPARCState *env) 44 44 { 45 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 45 + CPUState *cs = env_cpu(env); 46 46 47 47 cs->exception_index = EXCP_DEBUG; 48 48 cpu_loop_exit(cs); ··· 243 243 #ifndef TARGET_SPARC64 244 244 void helper_power_down(CPUSPARCState *env) 245 245 { 246 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 246 + CPUState *cs = env_cpu(env); 247 247 248 248 cs->halted = 1; 249 249 cs->exception_index = EXCP_HLT;
+15 -18
target/sparc/ldst_helper.c
··· 122 122 123 123 static void replace_tlb_entry(SparcTLBEntry *tlb, 124 124 uint64_t tlb_tag, uint64_t tlb_tte, 125 - CPUSPARCState *env1) 125 + CPUSPARCState *env) 126 126 { 127 127 target_ulong mask, size, va, offset; 128 128 129 129 /* flush page range if translation is valid */ 130 130 if (TTE_IS_VALID(tlb->tte)) { 131 - CPUState *cs = CPU(sparc_env_get_cpu(env1)); 131 + CPUState *cs = env_cpu(env); 132 132 133 133 size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte); 134 134 mask = 1ULL + ~size; ··· 499 499 { 500 500 int size = 1 << (memop & MO_SIZE); 501 501 int sign = memop & MO_SIGN; 502 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 502 + CPUState *cs = env_cpu(env); 503 503 uint64_t ret = 0; 504 504 #if defined(DEBUG_MXCC) || defined(DEBUG_ASI) 505 505 uint32_t last_addr = addr; ··· 725 725 int asi, uint32_t memop) 726 726 { 727 727 int size = 1 << (memop & MO_SIZE); 728 - SPARCCPU *cpu = sparc_env_get_cpu(env); 729 - CPUState *cs = CPU(cpu); 728 + CPUState *cs = env_cpu(env); 730 729 731 730 do_check_align(env, addr, size - 1, GETPC()); 732 731 switch (asi) { ··· 874 873 DPRINTF_MMU("mmu flush level %d\n", mmulev); 875 874 switch (mmulev) { 876 875 case 0: /* flush page */ 877 - tlb_flush_page(CPU(cpu), addr & 0xfffff000); 876 + tlb_flush_page(cs, addr & 0xfffff000); 878 877 break; 879 878 case 1: /* flush segment (256k) */ 880 879 case 2: /* flush region (16M) */ 881 880 case 3: /* flush context (4G) */ 882 881 case 4: /* flush entire */ 883 - tlb_flush(CPU(cpu)); 882 + tlb_flush(cs); 884 883 break; 885 884 default: 886 885 break; ··· 905 904 are invalid in normal mode. */ 906 905 if ((oldreg ^ env->mmuregs[reg]) 907 906 & (MMU_NF | env->def.mmu_bm)) { 908 - tlb_flush(CPU(cpu)); 907 + tlb_flush(cs); 909 908 } 910 909 break; 911 910 case 1: /* Context Table Pointer Register */ ··· 916 915 if (oldreg != env->mmuregs[reg]) { 917 916 /* we flush when the MMU context changes because 918 917 QEMU has no MMU context support */ 919 - tlb_flush(CPU(cpu)); 918 + tlb_flush(cs); 920 919 } 921 920 break; 922 921 case 3: /* Synchronous Fault Status Register with Clear */ ··· 1027 1026 case ASI_USERTXT: /* User code access, XXX */ 1028 1027 case ASI_KERNELTXT: /* Supervisor code access, XXX */ 1029 1028 default: 1030 - cpu_unassigned_access(CPU(sparc_env_get_cpu(env)), 1031 - addr, true, false, asi, size); 1029 + cpu_unassigned_access(cs, addr, true, false, asi, size); 1032 1030 break; 1033 1031 1034 1032 case ASI_USERDATA: /* User data access */ ··· 1175 1173 { 1176 1174 int size = 1 << (memop & MO_SIZE); 1177 1175 int sign = memop & MO_SIGN; 1178 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 1176 + CPUState *cs = env_cpu(env); 1179 1177 uint64_t ret = 0; 1180 1178 #if defined(DEBUG_ASI) 1181 1179 target_ulong last_addr = addr; ··· 1481 1479 int asi, uint32_t memop) 1482 1480 { 1483 1481 int size = 1 << (memop & MO_SIZE); 1484 - SPARCCPU *cpu = sparc_env_get_cpu(env); 1485 - CPUState *cs = CPU(cpu); 1482 + CPUState *cs = env_cpu(env); 1486 1483 1487 1484 #ifdef DEBUG_ASI 1488 1485 dump_asi("write", addr, asi, size, val); ··· 1686 1683 env->dmmu.mmu_primary_context = val; 1687 1684 /* can be optimized to only flush MMU_USER_IDX 1688 1685 and MMU_KERNEL_IDX entries */ 1689 - tlb_flush(CPU(cpu)); 1686 + tlb_flush(cs); 1690 1687 break; 1691 1688 case 2: /* Secondary context */ 1692 1689 env->dmmu.mmu_secondary_context = val; 1693 1690 /* can be optimized to only flush MMU_USER_SECONDARY_IDX 1694 1691 and MMU_KERNEL_SECONDARY_IDX entries */ 1695 - tlb_flush(CPU(cpu)); 1692 + tlb_flush(cs); 1696 1693 break; 1697 1694 case 5: /* TSB access */ 1698 1695 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016" ··· 1768 1765 case 1: 1769 1766 env->dmmu.mmu_primary_context = val; 1770 1767 env->immu.mmu_primary_context = val; 1771 - tlb_flush_by_mmuidx(CPU(cpu), 1768 + tlb_flush_by_mmuidx(cs, 1772 1769 (1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX)); 1773 1770 break; 1774 1771 case 2: 1775 1772 env->dmmu.mmu_secondary_context = val; 1776 1773 env->immu.mmu_secondary_context = val; 1777 - tlb_flush_by_mmuidx(CPU(cpu), 1774 + tlb_flush_by_mmuidx(cs, 1778 1775 (1 << MMU_USER_SECONDARY_IDX) | 1779 1776 (1 << MMU_KERNEL_SECONDARY_IDX)); 1780 1777 break;
+5 -5
target/sparc/mmu_helper.c
··· 97 97 uint32_t pde; 98 98 int error_code = 0, is_dirty, is_user; 99 99 unsigned long page_offset; 100 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 100 + CPUState *cs = env_cpu(env); 101 101 102 102 is_user = mmu_idx == MMU_USER_IDX; 103 103 ··· 268 268 269 269 target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev) 270 270 { 271 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 271 + CPUState *cs = env_cpu(env); 272 272 hwaddr pde_ptr; 273 273 uint32_t pde; 274 274 ··· 335 335 336 336 void dump_mmu(CPUSPARCState *env) 337 337 { 338 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 338 + CPUState *cs = env_cpu(env); 339 339 target_ulong va, va1, va2; 340 340 unsigned int n, m, o; 341 341 hwaddr pde_ptr, pa; ··· 494 494 hwaddr *physical, int *prot, 495 495 target_ulong address, int rw, int mmu_idx) 496 496 { 497 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 497 + CPUState *cs = env_cpu(env); 498 498 unsigned int i; 499 499 uint64_t context; 500 500 uint64_t sfsr = 0; ··· 612 612 hwaddr *physical, int *prot, 613 613 target_ulong address, int mmu_idx) 614 614 { 615 - CPUState *cs = CPU(sparc_env_get_cpu(env)); 615 + CPUState *cs = env_cpu(env); 616 616 unsigned int i; 617 617 uint64_t context; 618 618 bool is_user = false;
+17
target/tilegx/cpu-param.h
··· 1 + /* 2 + * TILE-Gx cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2015 Chen Gang 5 + * SPDX-License-Identifier: LGPL-2.0+ 6 + */ 7 + 8 + #ifndef TILEGX_CPU_PARAM_H 9 + #define TILEGX_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 64 12 + #define TARGET_PAGE_BITS 16 /* TILE-Gx uses 64KB page size */ 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 42 14 + #define TARGET_VIRT_ADDR_SPACE_BITS 64 15 + #define NB_MMU_MODES 1 16 + 17 + #endif
+1 -3
target/tilegx/cpu.c
··· 100 100 101 101 static void tilegx_cpu_initfn(Object *obj) 102 102 { 103 - CPUState *cs = CPU(obj); 104 103 TileGXCPU *cpu = TILEGX_CPU(obj); 105 - CPUTLGState *env = &cpu->env; 106 104 107 - cs->env_ptr = env; 105 + cpu_set_cpustate_pointers(cpu); 108 106 } 109 107 110 108 static void tilegx_cpu_do_interrupt(CPUState *cs)
+4 -19
target/tilegx/cpu.h
··· 21 21 #define TILEGX_CPU_H 22 22 23 23 #include "qemu-common.h" 24 - 25 - #define TARGET_LONG_BITS 64 26 - 27 - #define CPUArchState struct CPUTLGState 28 - 29 24 #include "exec/cpu-defs.h" 30 - 31 25 32 26 /* TILE-Gx common register alias */ 33 27 #define TILEGX_R_RE 0 /* 0 register, for function/syscall return value */ ··· 99 93 100 94 /* Fields up to this point are cleared by a CPU reset */ 101 95 struct {} end_reset_fields; 102 - 103 - CPU_COMMON 104 96 } CPUTLGState; 105 97 106 98 #include "qom/cpu.h" ··· 141 133 CPUState parent_obj; 142 134 /*< public >*/ 143 135 136 + CPUNegativeOffsetState neg; 144 137 CPUTLGState env; 145 138 } TileGXCPU; 146 139 147 - static inline TileGXCPU *tilegx_env_get_cpu(CPUTLGState *env) 148 - { 149 - return container_of(env, TileGXCPU, env); 150 - } 151 - 152 - #define ENV_GET_CPU(e) CPU(tilegx_env_get_cpu(e)) 153 - 154 - #define ENV_OFFSET offsetof(TileGXCPU, env) 155 140 156 141 /* TILE-Gx memory attributes */ 157 - #define TARGET_PAGE_BITS 16 /* TILE-Gx uses 64KB page size */ 158 - #define TARGET_PHYS_ADDR_SPACE_BITS 42 159 - #define TARGET_VIRT_ADDR_SPACE_BITS 64 160 142 #define MMU_USER_IDX 0 /* Current memory operation is in user mode */ 143 + 144 + typedef CPUTLGState CPUArchState; 145 + typedef TileGXCPU ArchCPU; 161 146 162 147 #include "exec/cpu-all.h" 163 148
+1 -1
target/tilegx/helper.c
··· 28 28 29 29 void helper_exception(CPUTLGState *env, uint32_t excp) 30 30 { 31 - CPUState *cs = CPU(tilegx_env_get_cpu(env)); 31 + CPUState *cs = env_cpu(env); 32 32 33 33 cs->exception_index = excp; 34 34 cpu_loop_exit(cs);
+17
target/tricore/cpu-param.h
··· 1 + /* 2 + * TriCore cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn 5 + * SPDX-License-Identifier: LGPL-2.1+ 6 + */ 7 + 8 + #ifndef TRICORE_CPU_PARAM_H 9 + #define TRICORE_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + #define TARGET_PAGE_BITS 14 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 14 + #define TARGET_VIRT_ADDR_SPACE_BITS 32 15 + #define NB_MMU_MODES 3 16 + 17 + #endif
+1 -3
target/tricore/cpu.c
··· 104 104 105 105 static void tricore_cpu_initfn(Object *obj) 106 106 { 107 - CPUState *cs = CPU(obj); 108 107 TriCoreCPU *cpu = TRICORE_CPU(obj); 109 - CPUTriCoreState *env = &cpu->env; 110 108 111 - cs->env_ptr = env; 109 + cpu_set_cpustate_pointers(cpu); 112 110 } 113 111 114 112 static ObjectClass *tricore_cpu_class_by_name(const char *cpu_model)
+4 -18
target/tricore/cpu.h
··· 20 20 #ifndef TRICORE_CPU_H 21 21 #define TRICORE_CPU_H 22 22 23 - #include "tricore-defs.h" 24 23 #include "qemu-common.h" 25 24 #include "cpu-qom.h" 26 25 #include "exec/cpu-defs.h" 27 - 28 - #define CPUArchState struct CPUTriCoreState 29 - 30 - struct CPUTriCoreState; 26 + #include "tricore-defs.h" 31 27 32 28 struct tricore_boot_info; 33 - 34 - #define NB_MMU_MODES 3 35 29 36 30 typedef struct tricore_def_t tricore_def_t; 37 31 ··· 190 184 int error_code; 191 185 uint32_t hflags; /* CPU State */ 192 186 193 - CPU_COMMON 194 - 195 187 /* Internal CPU feature flags. */ 196 188 uint64_t features; 197 189 ··· 211 203 CPUState parent_obj; 212 204 /*< public >*/ 213 205 206 + CPUNegativeOffsetState neg; 214 207 CPUTriCoreState env; 215 208 }; 216 209 217 - static inline TriCoreCPU *tricore_env_get_cpu(CPUTriCoreState *env) 218 - { 219 - return TRICORE_CPU(container_of(env, TriCoreCPU, env)); 220 - } 221 - 222 - #define ENV_GET_CPU(e) CPU(tricore_env_get_cpu(e)) 223 - 224 - #define ENV_OFFSET offsetof(TriCoreCPU, env) 225 210 226 211 hwaddr tricore_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); 227 212 void tricore_cpu_dump_state(CPUState *cpu, FILE *f, int flags); ··· 384 369 return 0; 385 370 } 386 371 387 - 372 + typedef CPUTriCoreState CPUArchState; 373 + typedef TriCoreCPU ArchCPU; 388 374 389 375 #include "exec/cpu-all.h" 390 376
+1 -1
target/tricore/op_helper.c
··· 29 29 raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin, 30 30 uintptr_t pc, uint32_t fcd_pc) 31 31 { 32 - CPUState *cs = CPU(tricore_env_get_cpu(env)); 32 + CPUState *cs = env_cpu(env); 33 33 /* in case we come from a helper-call we need to restore the PC */ 34 34 cpu_restore_state(cs, pc, true); 35 35
-5
target/tricore/tricore-defs.h
··· 18 18 #ifndef QEMU_TRICORE_DEFS_H 19 19 #define QEMU_TRICORE_DEFS_H 20 20 21 - #define TARGET_PAGE_BITS 14 22 - #define TARGET_LONG_BITS 32 23 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 24 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 25 - 26 21 #define TRICORE_TLB_MAX 128 27 22 28 23 #endif /* QEMU_TRICORE_DEFS_H */
+17
target/unicore32/cpu-param.h
··· 1 + /* 2 + * UniCore32 cpu parameters for qemu. 3 + * 4 + * Copyright (C) 2010-2012 Guan Xuetao 5 + * SPDX-License-Identifier: GPL-2.0+ 6 + */ 7 + 8 + #ifndef UNICORE32_CPU_PARAM_H 9 + #define UNICORE32_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + #define TARGET_PAGE_BITS 12 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 14 + #define TARGET_VIRT_ADDR_SPACE_BITS 32 15 + #define NB_MMU_MODES 2 16 + 17 + #endif
+1 -2
target/unicore32/cpu.c
··· 103 103 104 104 static void uc32_cpu_initfn(Object *obj) 105 105 { 106 - CPUState *cs = CPU(obj); 107 106 UniCore32CPU *cpu = UNICORE32_CPU(obj); 108 107 CPUUniCore32State *env = &cpu->env; 109 108 110 - cs->env_ptr = env; 109 + cpu_set_cpustate_pointers(cpu); 111 110 112 111 #ifdef CONFIG_USER_ONLY 113 112 env->uncached_asr = ASR_MODE_USER;
+4 -20
target/unicore32/cpu.h
··· 12 12 #ifndef UNICORE32_CPU_H 13 13 #define UNICORE32_CPU_H 14 14 15 - #define TARGET_LONG_BITS 32 16 - #define TARGET_PAGE_BITS 12 17 - 18 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 19 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 20 - 21 - #define CPUArchState struct CPUUniCore32State 22 - 23 15 #include "qemu-common.h" 24 16 #include "cpu-qom.h" 25 17 #include "exec/cpu-defs.h" 26 - 27 - #define NB_MMU_MODES 2 28 18 29 19 typedef struct CPUUniCore32State { 30 20 /* Regs for current mode. */ ··· 64 54 uint32_t xregs[32]; 65 55 float_status fp_status; 66 56 } ucf64; 67 - 68 - CPU_COMMON 69 57 70 58 /* Internal CPU feature flags. */ 71 59 uint32_t features; ··· 83 71 CPUState parent_obj; 84 72 /*< public >*/ 85 73 74 + CPUNegativeOffsetState neg; 86 75 CPUUniCore32State env; 87 76 }; 88 77 89 - static inline UniCore32CPU *uc32_env_get_cpu(CPUUniCore32State *env) 90 - { 91 - return container_of(env, UniCore32CPU, env); 92 - } 93 - 94 - #define ENV_GET_CPU(e) CPU(uc32_env_get_cpu(e)) 95 - 96 - #define ENV_OFFSET offsetof(UniCore32CPU, env) 97 78 98 79 void uc32_cpu_do_interrupt(CPUState *cpu); 99 80 bool uc32_cpu_exec_interrupt(CPUState *cpu, int int_req); ··· 160 141 { 161 142 return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0; 162 143 } 144 + 145 + typedef CPUUniCore32State CPUArchState; 146 + typedef UniCore32CPU ArchCPU; 163 147 164 148 #include "exec/cpu-all.h" 165 149
+1 -3
target/unicore32/helper.c
··· 31 31 void helper_cp0_set(CPUUniCore32State *env, uint32_t val, uint32_t creg, 32 32 uint32_t cop) 33 33 { 34 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 35 - 36 34 /* 37 35 * movc pp.nn, rn, #imm9 38 36 * rn: UCOP_REG_D ··· 101 99 case 6: 102 100 if ((cop <= 6) && (cop >= 2)) { 103 101 /* invalid all tlb */ 104 - tlb_flush(CPU(cpu)); 102 + tlb_flush(env_cpu(env)); 105 103 return; 106 104 } 107 105 break;
+1 -1
target/unicore32/op_helper.c
··· 19 19 20 20 void HELPER(exception)(CPUUniCore32State *env, uint32_t excp) 21 21 { 22 - CPUState *cs = CPU(uc32_env_get_cpu(env)); 22 + CPUState *cs = env_cpu(env); 23 23 24 24 cs->exception_index = excp; 25 25 cpu_loop_exit(cs);
+4 -7
target/unicore32/softmmu.c
··· 36 36 /* Map CPU modes onto saved register banks. */ 37 37 static inline int bank_number(CPUUniCore32State *env, int mode) 38 38 { 39 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 40 - 41 39 switch (mode) { 42 40 case ASR_MODE_USER: 43 41 case ASR_MODE_SUSR: ··· 51 49 case ASR_MODE_INTR: 52 50 return 4; 53 51 } 54 - cpu_abort(CPU(cpu), "Bad mode %x\n", mode); 52 + cpu_abort(env_cpu(env), "Bad mode %x\n", mode); 55 53 return -1; 56 54 } 57 55 ··· 126 124 int access_type, int is_user, uint32_t *phys_ptr, int *prot, 127 125 target_ulong *page_size) 128 126 { 129 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 130 - CPUState *cs = CPU(cpu); 127 + CPUState *cs = env_cpu(env); 131 128 int code; 132 129 uint32_t table; 133 130 uint32_t desc; ··· 174 171 *page_size = TARGET_PAGE_SIZE; 175 172 break; 176 173 default: 177 - cpu_abort(CPU(cpu), "wrong page type!"); 174 + cpu_abort(cs, "wrong page type!"); 178 175 } 179 176 break; 180 177 default: 181 - cpu_abort(CPU(cpu), "wrong page type!"); 178 + cpu_abort(cs, "wrong page type!"); 182 179 } 183 180 184 181 *phys_ptr = phys_addr;
+2 -24
target/unicore32/translate.c
··· 180 180 #define UCOP_SET_L UCOP_SET(24) 181 181 #define UCOP_SET_S UCOP_SET(24) 182 182 183 - #define ILLEGAL cpu_abort(CPU(cpu), \ 183 + #define ILLEGAL cpu_abort(env_cpu(env), \ 184 184 "Illegal UniCore32 instruction %x at line %d!", \ 185 185 insn, __LINE__) 186 186 ··· 188 188 static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s, 189 189 uint32_t insn) 190 190 { 191 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 192 191 TCGv tmp, tmp2, tmp3; 193 192 if ((insn & 0xfe000000) == 0xe0000000) { 194 193 tmp2 = new_tmp(); ··· 214 213 static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s, 215 214 uint32_t insn) 216 215 { 217 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 218 216 TCGv tmp; 219 217 220 218 if ((insn & 0xff003fff) == 0xe1000400) { ··· 682 680 /* UniCore-F64 single load/store I_offset */ 683 681 static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 684 682 { 685 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 686 683 int offset; 687 684 TCGv tmp; 688 685 TCGv addr; ··· 729 726 /* UniCore-F64 load/store multiple words */ 730 727 static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 731 728 { 732 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 733 729 unsigned int i; 734 730 int j, n, freg; 735 731 TCGv tmp; ··· 815 811 /* UniCore-F64 mrc/mcr */ 816 812 static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 817 813 { 818 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 819 814 TCGv tmp; 820 815 821 816 if ((insn & 0xfe0003ff) == 0xe2000000) { ··· 880 875 /* UniCore-F64 convert instructions */ 881 876 static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 882 877 { 883 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 884 - 885 878 if (UCOP_UCF64_FMT == 3) { 886 879 ILLEGAL; 887 880 } ··· 948 941 /* UniCore-F64 compare instructions */ 949 942 static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 950 943 { 951 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 952 - 953 944 if (UCOP_SET(25)) { 954 945 ILLEGAL; 955 946 } ··· 1028 1019 /* UniCore-F64 data processing */ 1029 1020 static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 1030 1021 { 1031 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1032 - 1033 1022 if (UCOP_UCF64_FMT == 3) { 1034 1023 ILLEGAL; 1035 1024 } ··· 1063 1052 /* Disassemble an F64 instruction */ 1064 1053 static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 1065 1054 { 1066 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1067 - 1068 1055 if (!UCOP_SET(29)) { 1069 1056 if (UCOP_SET(26)) { 1070 1057 do_ucf64_ldst_m(env, s, insn); ··· 1162 1149 static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s, 1163 1150 uint32_t insn) 1164 1151 { 1165 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1166 - 1167 1152 switch (UCOP_CPNUM) { 1168 1153 #ifndef CONFIG_USER_ONLY 1169 1154 case 0: ··· 1178 1163 break; 1179 1164 default: 1180 1165 /* Unknown coprocessor. */ 1181 - cpu_abort(CPU(cpu), "Unknown coprocessor!"); 1166 + cpu_abort(env_cpu(env), "Unknown coprocessor!"); 1182 1167 } 1183 1168 } 1184 1169 1185 1170 /* data processing instructions */ 1186 1171 static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 1187 1172 { 1188 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1189 1173 TCGv tmp; 1190 1174 TCGv tmp2; 1191 1175 int logic_cc; ··· 1419 1403 /* miscellaneous instructions */ 1420 1404 static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 1421 1405 { 1422 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1423 1406 unsigned int val; 1424 1407 TCGv tmp; 1425 1408 ··· 1545 1528 /* SWP instruction */ 1546 1529 static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 1547 1530 { 1548 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1549 1531 TCGv addr; 1550 1532 TCGv tmp; 1551 1533 TCGv tmp2; ··· 1573 1555 /* load/store hw/sb */ 1574 1556 static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 1575 1557 { 1576 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1577 1558 TCGv addr; 1578 1559 TCGv tmp; 1579 1560 ··· 1626 1607 /* load/store multiple words */ 1627 1608 static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 1628 1609 { 1629 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1630 1610 unsigned int val, i, mmu_idx; 1631 1611 int j, n, reg, user, loaded_base; 1632 1612 TCGv tmp; ··· 1768 1748 /* branch (and link) */ 1769 1749 static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn) 1770 1750 { 1771 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1772 1751 unsigned int val; 1773 1752 int32_t offset; 1774 1753 TCGv tmp; ··· 1798 1777 1799 1778 static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s) 1800 1779 { 1801 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 1802 1780 unsigned int insn; 1803 1781 1804 1782 insn = cpu_ldl_code(env, s->pc);
+1 -1
target/unicore32/ucf64_helper.c
··· 78 78 79 79 void HELPER(ucf64_set_fpscr)(CPUUniCore32State *env, uint32_t val) 80 80 { 81 - UniCore32CPU *cpu = uc32_env_get_cpu(env); 81 + UniCore32CPU *cpu = env_archcpu(env); 82 82 int i; 83 83 uint32_t changed; 84 84
+21
target/xtensa/cpu-param.h
··· 1 + /* 2 + * Xtensa cpu parameters for qemu. 3 + * 4 + * Copyright (c) 2011, Max Filippov, Open Source and Linux Lab. 5 + * SPDX-License-Identifier: BSD-3-Clause 6 + */ 7 + 8 + #ifndef XTENSA_CPU_PARAM_H 9 + #define XTENSA_CPU_PARAM_H 1 10 + 11 + #define TARGET_LONG_BITS 32 12 + #define TARGET_PAGE_BITS 12 13 + #define TARGET_PHYS_ADDR_SPACE_BITS 32 14 + #ifdef CONFIG_USER_ONLY 15 + #define TARGET_VIRT_ADDR_SPACE_BITS 30 16 + #else 17 + #define TARGET_VIRT_ADDR_SPACE_BITS 32 18 + #endif 19 + #define NB_MMU_MODES 4 20 + 21 + #endif
+1 -2
target/xtensa/cpu.c
··· 138 138 139 139 static void xtensa_cpu_initfn(Object *obj) 140 140 { 141 - CPUState *cs = CPU(obj); 142 141 XtensaCPU *cpu = XTENSA_CPU(obj); 143 142 XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj); 144 143 CPUXtensaState *env = &cpu->env; 145 144 146 - cs->env_ptr = env; 145 + cpu_set_cpustate_pointers(cpu); 147 146 env->config = xcc->config; 148 147 149 148 #ifndef CONFIG_USER_ONLY
+10 -30
target/xtensa/cpu.h
··· 28 28 #ifndef XTENSA_CPU_H 29 29 #define XTENSA_CPU_H 30 30 31 - #define ALIGNED_ONLY 32 - #define TARGET_LONG_BITS 32 33 - 34 - /* Xtensa processors have a weak memory model */ 35 - #define TCG_GUEST_DEFAULT_MO (0) 36 - 37 - #define CPUArchState struct CPUXtensaState 38 - 39 31 #include "qemu-common.h" 40 32 #include "cpu-qom.h" 41 33 #include "exec/cpu-defs.h" 42 34 #include "xtensa-isa.h" 43 35 44 - #define NB_MMU_MODES 4 36 + #define ALIGNED_ONLY 45 37 46 - #define TARGET_PHYS_ADDR_SPACE_BITS 32 47 - #ifdef CONFIG_USER_ONLY 48 - #define TARGET_VIRT_ADDR_SPACE_BITS 30 49 - #else 50 - #define TARGET_VIRT_ADDR_SPACE_BITS 32 51 - #endif 52 - #define TARGET_PAGE_BITS 12 38 + /* Xtensa processors have a weak memory model */ 39 + #define TCG_GUEST_DEFAULT_MO (0) 53 40 54 41 enum { 55 42 /* Additional instructions */ ··· 554 541 555 542 /* Watchpoints for DBREAK registers */ 556 543 struct CPUWatchpoint *cpu_watchpoint[MAX_NDBREAK]; 557 - 558 - CPU_COMMON 559 544 } CPUXtensaState; 560 545 561 546 /** ··· 569 554 CPUState parent_obj; 570 555 /*< public >*/ 571 556 557 + CPUNegativeOffsetState neg; 572 558 CPUXtensaState env; 573 559 }; 574 - 575 - static inline XtensaCPU *xtensa_env_get_cpu(const CPUXtensaState *env) 576 - { 577 - return container_of(env, XtensaCPU, env); 578 - } 579 - 580 - #define ENV_GET_CPU(e) CPU(xtensa_env_get_cpu(e)) 581 - 582 - #define ENV_OFFSET offsetof(XtensaCPU, env) 583 560 584 561 585 562 bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, ··· 739 716 #define XTENSA_CSBASE_LBEG_OFF_MASK 0x00ff0000 740 717 #define XTENSA_CSBASE_LBEG_OFF_SHIFT 16 741 718 719 + typedef CPUXtensaState CPUArchState; 720 + typedef XtensaCPU ArchCPU; 721 + 722 + #include "exec/cpu-all.h" 723 + 742 724 static inline void cpu_get_tb_cpu_state(CPUXtensaState *env, target_ulong *pc, 743 725 target_ulong *cs_base, uint32_t *flags) 744 726 { 745 - CPUState *cs = CPU(xtensa_env_get_cpu(env)); 727 + CPUState *cs = env_cpu(env); 746 728 747 729 *pc = env->pc; 748 730 *cs_base = 0; ··· 811 793 *flags |= XTENSA_TBFLAG_YIELD; 812 794 } 813 795 } 814 - 815 - #include "exec/cpu-all.h" 816 796 817 797 #endif
+2 -2
target/xtensa/dbg_helper.c
··· 71 71 static void set_dbreak(CPUXtensaState *env, unsigned i, uint32_t dbreaka, 72 72 uint32_t dbreakc) 73 73 { 74 - CPUState *cs = CPU(xtensa_env_get_cpu(env)); 74 + CPUState *cs = env_cpu(env); 75 75 int flags = BP_CPU | BP_STOP_BEFORE_ACCESS; 76 76 uint32_t mask = dbreakc | ~DBREAKC_MASK; 77 77 ··· 118 118 set_dbreak(env, i, env->sregs[DBREAKA + i], v); 119 119 } else { 120 120 if (env->cpu_watchpoint[i]) { 121 - CPUState *cs = CPU(xtensa_env_get_cpu(env)); 121 + CPUState *cs = env_cpu(env); 122 122 123 123 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[i]); 124 124 env->cpu_watchpoint[i] = NULL;
+4 -5
target/xtensa/exc_helper.c
··· 34 34 35 35 void HELPER(exception)(CPUXtensaState *env, uint32_t excp) 36 36 { 37 - CPUState *cs = CPU(xtensa_env_get_cpu(env)); 37 + CPUState *cs = env_cpu(env); 38 38 39 39 cs->exception_index = excp; 40 40 if (excp == EXCP_YIELD) { ··· 100 100 101 101 void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) 102 102 { 103 - CPUState *cpu; 103 + CPUState *cpu = env_cpu(env); 104 104 105 105 env->pc = pc; 106 106 env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | ··· 111 111 qemu_mutex_unlock_iothread(); 112 112 113 113 if (env->pending_irq_level) { 114 - cpu_loop_exit(CPU(xtensa_env_get_cpu(env))); 114 + cpu_loop_exit(cpu); 115 115 return; 116 116 } 117 117 118 - cpu = CPU(xtensa_env_get_cpu(env)); 119 118 cpu->halted = 1; 120 119 HELPER(exception)(env, EXCP_HLT); 121 120 } ··· 165 164 (env->config->level_mask[level] & 166 165 env->sregs[INTSET] & 167 166 env->sregs[INTENABLE])) { 168 - CPUState *cs = CPU(xtensa_env_get_cpu(env)); 167 + CPUState *cs = env_cpu(env); 169 168 170 169 if (level > 1) { 171 170 env->sregs[EPC1 + level - 1] = env->pc;
+1 -1
target/xtensa/helper.c
··· 324 324 325 325 void xtensa_runstall(CPUXtensaState *env, bool runstall) 326 326 { 327 - CPUState *cpu = CPU(xtensa_env_get_cpu(env)); 327 + CPUState *cpu = env_cpu(env); 328 328 329 329 env->runstall = runstall; 330 330 cpu->halted = runstall;
+6 -11
target/xtensa/mmu_helper.c
··· 71 71 72 72 void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) 73 73 { 74 - XtensaCPU *cpu = xtensa_env_get_cpu(env); 75 - 76 74 v = (v & 0xffffff00) | 0x1; 77 75 if (v != env->sregs[RASID]) { 78 76 env->sregs[RASID] = v; 79 - tlb_flush(CPU(cpu)); 77 + tlb_flush(env_cpu(env)); 80 78 } 81 79 } 82 80 ··· 276 274 unsigned wi, unsigned ei, 277 275 uint32_t vpn, uint32_t pte) 278 276 { 279 - XtensaCPU *cpu = xtensa_env_get_cpu(env); 280 - CPUState *cs = CPU(cpu); 277 + CPUState *cs = env_cpu(env); 281 278 xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); 282 279 283 280 if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { ··· 503 500 uint32_t wi; 504 501 xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); 505 502 if (entry->variable && entry->asid) { 506 - tlb_flush_page(CPU(xtensa_env_get_cpu(env)), entry->vaddr); 503 + tlb_flush_page(env_cpu(env), entry->vaddr); 507 504 entry->asid = 0; 508 505 } 509 506 } ··· 844 841 845 842 static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) 846 843 { 847 - CPUState *cs = CPU(xtensa_env_get_cpu(env)); 844 + CPUState *cs = env_cpu(env); 848 845 uint32_t paddr; 849 846 uint32_t page_size; 850 847 unsigned access; ··· 924 921 925 922 void HELPER(wsr_mpuenb)(CPUXtensaState *env, uint32_t v) 926 923 { 927 - XtensaCPU *cpu = xtensa_env_get_cpu(env); 928 - 929 924 v &= (2u << (env->config->n_mpu_fg_segments - 1)) - 1; 930 925 931 926 if (v != env->sregs[MPUENB]) { 932 927 env->sregs[MPUENB] = v; 933 - tlb_flush(CPU(cpu)); 928 + tlb_flush(env_cpu(env)); 934 929 } 935 930 } 936 931 ··· 942 937 env->mpu_fg[segment].vaddr = v & -env->config->mpu_align; 943 938 env->mpu_fg[segment].attr = p & XTENSA_MPU_ATTR_MASK; 944 939 env->sregs[MPUENB] = deposit32(env->sregs[MPUENB], segment, 1, v); 945 - tlb_flush(CPU(xtensa_env_get_cpu(env))); 940 + tlb_flush(env_cpu(env)); 946 941 } 947 942 } 948 943
+1 -1
target/xtensa/xtensa-semi.c
··· 197 197 198 198 void HELPER(simcall)(CPUXtensaState *env) 199 199 { 200 - CPUState *cs = CPU(xtensa_env_get_cpu(env)); 200 + CPUState *cs = env_cpu(env); 201 201 uint32_t *regs = env->regs; 202 202 203 203 switch (regs[2]) {
+10 -30
tcg/aarch64/tcg-target.inc.c
··· 1637 1637 label->label_ptr[0] = label_ptr; 1638 1638 } 1639 1639 1640 - /* We expect tlb_mask to be before tlb_table. */ 1641 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) < 1642 - offsetof(CPUArchState, tlb_mask)); 1640 + /* We expect to use a 7-bit scaled negative offset from ENV. */ 1641 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1642 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512); 1643 1643 1644 - /* We expect to use a 24-bit unsigned offset from ENV. */ 1645 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1]) 1646 - > 0xffffff); 1644 + /* These offsets are built into the LDP below. */ 1645 + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1646 + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); 1647 1647 1648 1648 /* Load and compare a TLB entry, emitting the conditional jump to the 1649 1649 slow path for the failure case, which will be patched later when finalizing ··· 1653 1653 tcg_insn_unit **label_ptr, int mem_index, 1654 1654 bool is_read) 1655 1655 { 1656 - int mask_ofs = offsetof(CPUArchState, tlb_mask[mem_index]); 1657 - int table_ofs = offsetof(CPUArchState, tlb_table[mem_index]); 1658 1656 unsigned a_bits = get_alignment_bits(opc); 1659 1657 unsigned s_bits = opc & MO_SIZE; 1660 1658 unsigned a_mask = (1u << a_bits) - 1; 1661 1659 unsigned s_mask = (1u << s_bits) - 1; 1662 - TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0, x3; 1660 + TCGReg x3; 1663 1661 TCGType mask_type; 1664 1662 uint64_t compare_mask; 1665 1663 1666 - if (table_ofs > 0xfff) { 1667 - int table_hi = table_ofs & ~0xfff; 1668 - int mask_hi = mask_ofs & ~0xfff; 1669 - 1670 - table_base = TCG_REG_X1; 1671 - if (mask_hi == table_hi) { 1672 - mask_base = table_base; 1673 - } else if (mask_hi) { 1674 - mask_base = TCG_REG_X0; 1675 - tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, 1676 - mask_base, TCG_AREG0, mask_hi); 1677 - } 1678 - tcg_out_insn(s, 3401, ADDI, TCG_TYPE_I64, 1679 - table_base, TCG_AREG0, table_hi); 1680 - mask_ofs -= mask_hi; 1681 - table_ofs -= table_hi; 1682 - } 1683 - 1684 1664 mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32 1685 1665 ? TCG_TYPE_I64 : TCG_TYPE_I32); 1686 1666 1687 - /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ 1688 - tcg_out_ld(s, mask_type, TCG_REG_X0, mask_base, mask_ofs); 1689 - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_X1, table_base, table_ofs); 1667 + /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */ 1668 + tcg_out_insn(s, 3314, LDP, TCG_REG_X0, TCG_REG_X1, TCG_AREG0, 1669 + TLB_MASK_TABLE_OFS(mem_index), 1, 0); 1690 1670 1691 1671 /* Extract the TLB index from the address into X0. */ 1692 1672 tcg_out_insn(s, 3502S, AND_LSR, mask_type == TCG_TYPE_I64,
+54 -67
tcg/arm/tcg-target.inc.c
··· 267 267 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0); 268 268 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1); 269 269 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2); 270 + tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3); 270 271 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14); 271 272 #endif 272 273 break; ··· 1220 1221 1221 1222 #define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS) 1222 1223 1223 - /* We expect tlb_mask to be before tlb_table. */ 1224 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) < 1225 - offsetof(CPUArchState, tlb_mask)); 1224 + /* We expect to use an 9-bit sign-magnitude negative offset from ENV. */ 1225 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1226 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -256); 1226 1227 1227 - /* We expect to use a 20-bit unsigned offset from ENV. */ 1228 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1]) 1229 - > 0xfffff); 1228 + /* These offsets are built into the LDRD below. */ 1229 + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); 1230 + QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); 1230 1231 1231 1232 /* Load and compare a TLB entry, leaving the flags set. Returns the register 1232 1233 containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */ ··· 1236 1237 { 1237 1238 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) 1238 1239 : offsetof(CPUTLBEntry, addr_write)); 1239 - int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]); 1240 - int table_off = offsetof(CPUArchState, tlb_table[mem_index]); 1241 - TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; 1240 + int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1241 + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1242 + int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1242 1243 unsigned s_bits = opc & MO_SIZE; 1243 1244 unsigned a_bits = get_alignment_bits(opc); 1244 1245 1245 - if (table_off > 0xfff) { 1246 - int mask_hi = mask_off & ~0xfff; 1247 - int table_hi = table_off & ~0xfff; 1248 - int rot; 1249 - 1250 - table_base = TCG_REG_R2; 1251 - if (mask_hi == table_hi) { 1252 - mask_base = table_base; 1253 - } else if (mask_hi) { 1254 - mask_base = TCG_REG_TMP; 1255 - rot = encode_imm(mask_hi); 1256 - assert(rot >= 0); 1257 - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, mask_base, TCG_AREG0, 1258 - rotl(mask_hi, rot) | (rot << 7)); 1259 - } 1260 - rot = encode_imm(table_hi); 1261 - assert(rot >= 0); 1262 - tcg_out_dat_imm(s, COND_AL, ARITH_ADD, table_base, TCG_AREG0, 1263 - rotl(table_hi, rot) | (rot << 7)); 1264 - 1265 - mask_off -= mask_hi; 1266 - table_off -= table_hi; 1246 + /* 1247 + * We don't support inline unaligned acceses, but we can easily 1248 + * support overalignment checks. 1249 + */ 1250 + if (a_bits < s_bits) { 1251 + a_bits = s_bits; 1267 1252 } 1268 1253 1269 - /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ 1270 - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP, mask_base, mask_off); 1271 - tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R2, table_base, table_off); 1254 + /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {r0,r1}. */ 1255 + if (use_armv6_instructions) { 1256 + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); 1257 + } else { 1258 + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R0, TCG_AREG0, mask_off); 1259 + tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R1, TCG_AREG0, table_off); 1260 + } 1272 1261 1273 - /* Extract the tlb index from the address into TMP. */ 1274 - tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, addrlo, 1262 + /* Extract the tlb index from the address into R0. */ 1263 + tcg_out_dat_reg(s, COND_AL, ARITH_AND, TCG_REG_R0, TCG_REG_R0, addrlo, 1275 1264 SHIFT_IMM_LSR(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS)); 1276 1265 1277 1266 /* 1278 - * Add the tlb_table pointer, creating the CPUTLBEntry address in R2. 1279 - * Load the tlb comparator into R0/R1 and the fast path addend into R2. 1267 + * Add the tlb_table pointer, creating the CPUTLBEntry address in R1. 1268 + * Load the tlb comparator into R2/R3 and the fast path addend into R1. 1280 1269 */ 1281 1270 if (cmp_off == 0) { 1282 - if (use_armv6_instructions && TARGET_LONG_BITS == 64) { 1283 - tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R0, TCG_REG_R2, TCG_REG_TMP); 1271 + if (use_armv6_instructions && TARGET_LONG_BITS == 64) { 1272 + tcg_out_ldrd_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1284 1273 } else { 1285 - tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R0, TCG_REG_R2, TCG_REG_TMP); 1274 + tcg_out_ld32_rwb(s, COND_AL, TCG_REG_R2, TCG_REG_R1, TCG_REG_R0); 1286 1275 } 1287 1276 } else { 1288 1277 tcg_out_dat_reg(s, COND_AL, ARITH_ADD, 1289 - TCG_REG_R2, TCG_REG_R2, TCG_REG_TMP, 0); 1278 + TCG_REG_R1, TCG_REG_R1, TCG_REG_R0, 0); 1290 1279 if (use_armv6_instructions && TARGET_LONG_BITS == 64) { 1291 - tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off); 1280 + tcg_out_ldrd_8(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1292 1281 } else { 1293 - tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off); 1294 - } 1282 + tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R1, cmp_off); 1283 + } 1295 1284 } 1296 1285 if (!use_armv6_instructions && TARGET_LONG_BITS == 64) { 1297 - tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2, cmp_off + 4); 1286 + tcg_out_ld32_12(s, COND_AL, TCG_REG_R3, TCG_REG_R1, cmp_off + 4); 1298 1287 } 1299 1288 1300 1289 /* Load the tlb addend. */ 1301 - tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R2, 1290 + tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R1, 1302 1291 offsetof(CPUTLBEntry, addend)); 1303 1292 1304 - /* Check alignment. We don't support inline unaligned acceses, 1305 - but we can easily support overalignment checks. */ 1306 - if (a_bits < s_bits) { 1307 - a_bits = s_bits; 1308 - } 1309 - 1310 - if (use_armv7_instructions) { 1293 + /* 1294 + * Check alignment, check comparators. 1295 + * Do this in no more than 3 insns. Use MOVW for v7, if possible, 1296 + * to reduce the number of sequential conditional instructions. 1297 + * Almost all guests have at least 4k pages, which means that we need 1298 + * to clear at least 9 bits even for an 8-byte memory, which means it 1299 + * isn't worth checking for an immediate operand for BIC. 1300 + */ 1301 + if (use_armv7_instructions && TARGET_PAGE_BITS <= 16) { 1311 1302 tcg_target_ulong mask = ~(TARGET_PAGE_MASK | ((1 << a_bits) - 1)); 1312 - int rot = encode_imm(mask); 1313 1303 1314 - if (rot >= 0) { 1315 - tcg_out_dat_imm(s, COND_AL, ARITH_BIC, TCG_REG_TMP, addrlo, 1316 - rotl(mask, rot) | (rot << 7)); 1317 - } else { 1318 - tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask); 1319 - tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1320 - addrlo, TCG_REG_TMP, 0); 1321 - } 1322 - tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R0, TCG_REG_TMP, 0); 1304 + tcg_out_movi32(s, COND_AL, TCG_REG_TMP, mask); 1305 + tcg_out_dat_reg(s, COND_AL, ARITH_BIC, TCG_REG_TMP, 1306 + addrlo, TCG_REG_TMP, 0); 1307 + tcg_out_dat_reg(s, COND_AL, ARITH_CMP, 0, TCG_REG_R2, TCG_REG_TMP, 0); 1323 1308 } else { 1324 1309 if (a_bits) { 1325 1310 tcg_out_dat_imm(s, COND_AL, ARITH_TST, 0, addrlo, 1326 1311 (1 << a_bits) - 1); 1327 1312 } 1313 + tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP, 0, addrlo, 1314 + SHIFT_IMM_LSR(TARGET_PAGE_BITS)); 1328 1315 tcg_out_dat_reg(s, (a_bits ? COND_EQ : COND_AL), ARITH_CMP, 1329 - 0, TCG_REG_R0, TCG_REG_TMP, 1316 + 0, TCG_REG_R2, TCG_REG_TMP, 1330 1317 SHIFT_IMM_LSL(TARGET_PAGE_BITS)); 1331 1318 } 1332 1319 1333 1320 if (TARGET_LONG_BITS == 64) { 1334 - tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R1, addrhi, 0); 1321 + tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0, TCG_REG_R3, addrhi, 0); 1335 1322 } 1336 1323 1337 - return TCG_REG_R2; 1324 + return TCG_REG_R1; 1338 1325 } 1339 1326 1340 1327 /* Record the context of a call to the out of line helper code for the slow
+4 -2
tcg/i386/tcg-target.inc.c
··· 1730 1730 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 1731 1731 1732 1732 tcg_out_modrm_offset(s, OPC_AND_GvEv + trexw, r0, TCG_AREG0, 1733 - offsetof(CPUArchState, tlb_mask[mem_index])); 1733 + TLB_MASK_TABLE_OFS(mem_index) + 1734 + offsetof(CPUTLBDescFast, mask)); 1734 1735 1735 1736 tcg_out_modrm_offset(s, OPC_ADD_GvEv + hrexw, r0, TCG_AREG0, 1736 - offsetof(CPUArchState, tlb_table[mem_index])); 1737 + TLB_MASK_TABLE_OFS(mem_index) + 1738 + offsetof(CPUTLBDescFast, table)); 1737 1739 1738 1740 /* If the required alignment is at least as large as the access, simply 1739 1741 copy the address and mask. For lesser alignments, check that we don't
+8 -37
tcg/mips/tcg-target.inc.c
··· 1202 1202 return i; 1203 1203 } 1204 1204 1205 - /* We expect tlb_mask to be before tlb_table. */ 1206 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) < 1207 - offsetof(CPUArchState, tlb_mask)); 1208 - 1209 - /* We expect tlb_mask to be "near" tlb_table. */ 1210 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) - 1211 - offsetof(CPUArchState, tlb_mask) >= 0x8000); 1205 + /* We expect to use a 16-bit negative offset from ENV. */ 1206 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1207 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); 1212 1208 1213 1209 /* 1214 1210 * Perform the tlb comparison operation. ··· 1223 1219 unsigned s_bits = opc & MO_SIZE; 1224 1220 unsigned a_bits = get_alignment_bits(opc); 1225 1221 int mem_index = get_mmuidx(oi); 1226 - int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]); 1227 - int table_off = offsetof(CPUArchState, tlb_table[mem_index]); 1222 + int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1223 + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1224 + int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1228 1225 int add_off = offsetof(CPUTLBEntry, addend); 1229 1226 int cmp_off = (is_load ? offsetof(CPUTLBEntry, addr_read) 1230 1227 : offsetof(CPUTLBEntry, addr_write)); 1231 - TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; 1232 1228 target_ulong mask; 1233 1229 1234 - if (table_off > 0x7fff) { 1235 - int mask_hi = mask_off - (int16_t)mask_off; 1236 - int table_hi = table_off - (int16_t)table_off; 1237 - 1238 - table_base = TCG_TMP1; 1239 - if (likely(mask_hi == table_hi)) { 1240 - mask_base = table_base; 1241 - tcg_out_opc_imm(s, OPC_LUI, mask_base, TCG_REG_ZERO, mask_hi >> 16); 1242 - tcg_out_opc_reg(s, ALIAS_PADD, mask_base, mask_base, TCG_AREG0); 1243 - mask_off -= mask_hi; 1244 - table_off -= mask_hi; 1245 - } else { 1246 - if (mask_hi != 0) { 1247 - mask_base = TCG_TMP0; 1248 - tcg_out_opc_imm(s, OPC_LUI, 1249 - mask_base, TCG_REG_ZERO, mask_hi >> 16); 1250 - tcg_out_opc_reg(s, ALIAS_PADD, 1251 - mask_base, mask_base, TCG_AREG0); 1252 - } 1253 - table_off -= mask_off; 1254 - mask_off -= mask_hi; 1255 - tcg_out_opc_imm(s, ALIAS_PADDI, table_base, mask_base, mask_off); 1256 - } 1257 - } 1258 - 1259 1230 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ 1260 - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, mask_base, mask_off); 1261 - tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, table_base, table_off); 1231 + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_AREG0, mask_off); 1232 + tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off); 1262 1233 1263 1234 /* Extract the TLB index from the address into TMP3. */ 1264 1235 tcg_out_opc_sa(s, ALIAS_TSRL, TCG_TMP3, addrl,
+8 -24
tcg/ppc/tcg-target.inc.c
··· 1498 1498 [MO_BEQ] = helper_be_stq_mmu, 1499 1499 }; 1500 1500 1501 - /* We expect tlb_mask to be before tlb_table. */ 1502 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) < 1503 - offsetof(CPUArchState, tlb_mask)); 1501 + /* We expect to use a 16-bit negative offset from ENV. */ 1502 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1503 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); 1504 1504 1505 1505 /* Perform the TLB load and compare. Places the result of the comparison 1506 1506 in CR7, loads the addend of the TLB into R3, and returns the register ··· 1514 1514 = (is_read 1515 1515 ? offsetof(CPUTLBEntry, addr_read) 1516 1516 : offsetof(CPUTLBEntry, addr_write)); 1517 - int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]); 1518 - int table_off = offsetof(CPUArchState, tlb_table[mem_index]); 1519 - TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; 1517 + int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1518 + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1519 + int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1520 1520 unsigned s_bits = opc & MO_SIZE; 1521 1521 unsigned a_bits = get_alignment_bits(opc); 1522 1522 1523 - if (table_off > 0x7fff) { 1524 - int mask_hi = mask_off - (int16_t)mask_off; 1525 - int table_hi = table_off - (int16_t)table_off; 1526 - 1527 - table_base = TCG_REG_R4; 1528 - if (mask_hi == table_hi) { 1529 - mask_base = table_base; 1530 - } else if (mask_hi) { 1531 - mask_base = TCG_REG_R3; 1532 - tcg_out32(s, ADDIS | TAI(mask_base, TCG_AREG0, mask_hi >> 16)); 1533 - } 1534 - tcg_out32(s, ADDIS | TAI(table_base, TCG_AREG0, table_hi >> 16)); 1535 - mask_off -= mask_hi; 1536 - table_off -= table_hi; 1537 - } 1538 - 1539 1523 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ 1540 - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, mask_base, mask_off); 1541 - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, table_base, table_off); 1524 + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off); 1525 + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off); 1542 1526 1543 1527 /* Extract the page index, shifted into place for tlb index. */ 1544 1528 if (TCG_TARGET_REG_BITS == 32) {
+6 -31
tcg/riscv/tcg-target.inc.c
··· 962 962 /* We don't support oversize guests */ 963 963 QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); 964 964 965 - /* We expect tlb_mask to be before tlb_table. */ 966 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) < 967 - offsetof(CPUArchState, tlb_mask)); 968 - 969 - /* We expect tlb_mask to be "near" tlb_table. */ 970 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) - 971 - offsetof(CPUArchState, tlb_mask) >= 0x800); 965 + /* We expect to use a 12-bit negative offset from ENV. */ 966 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 967 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); 972 968 973 969 static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, 974 970 TCGReg addrh, TCGMemOpIdx oi, ··· 979 975 unsigned a_bits = get_alignment_bits(opc); 980 976 tcg_target_long compare_mask; 981 977 int mem_index = get_mmuidx(oi); 982 - int mask_off, table_off; 978 + int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); 979 + int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); 980 + int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); 983 981 TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; 984 - 985 - mask_off = offsetof(CPUArchState, tlb_mask[mem_index]); 986 - table_off = offsetof(CPUArchState, tlb_table[mem_index]); 987 - if (table_off > 0x7ff) { 988 - int mask_hi = mask_off - sextreg(mask_off, 0, 12); 989 - int table_hi = table_off - sextreg(table_off, 0, 12); 990 - 991 - if (likely(mask_hi == table_hi)) { 992 - mask_base = table_base = TCG_REG_TMP1; 993 - tcg_out_opc_upper(s, OPC_LUI, mask_base, mask_hi); 994 - tcg_out_opc_reg(s, OPC_ADD, mask_base, mask_base, TCG_AREG0); 995 - mask_off -= mask_hi; 996 - table_off -= mask_hi; 997 - } else { 998 - mask_base = TCG_REG_TMP0; 999 - table_base = TCG_REG_TMP1; 1000 - tcg_out_opc_upper(s, OPC_LUI, mask_base, mask_hi); 1001 - tcg_out_opc_reg(s, OPC_ADD, mask_base, mask_base, TCG_AREG0); 1002 - table_off -= mask_off; 1003 - mask_off -= mask_hi; 1004 - tcg_out_opc_imm(s, OPC_ADDI, table_base, mask_base, mask_off); 1005 - } 1006 - } 1007 982 1008 983 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_off); 1009 984 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_off);
+6 -7
tcg/s390/tcg-target.inc.c
··· 1538 1538 #if defined(CONFIG_SOFTMMU) 1539 1539 #include "tcg-ldst.inc.c" 1540 1540 1541 - /* We're expecting to use a 20-bit signed offset on the tlb memory ops. */ 1542 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_mask[NB_MMU_MODES - 1]) 1543 - > 0x7ffff); 1544 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1]) 1545 - > 0x7ffff); 1541 + /* We're expecting to use a 20-bit negative offset on the tlb memory ops. */ 1542 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1543 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19)); 1546 1544 1547 1545 /* Load and compare a TLB entry, leaving the flags set. Loads the TLB 1548 1546 addend into R2. Returns a register with the santitized guest address. */ ··· 1553 1551 unsigned a_bits = get_alignment_bits(opc); 1554 1552 unsigned s_mask = (1 << s_bits) - 1; 1555 1553 unsigned a_mask = (1 << a_bits) - 1; 1556 - int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]); 1557 - int table_off = offsetof(CPUArchState, tlb_table[mem_index]); 1554 + int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1555 + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1556 + int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1558 1557 int ofs, a_off; 1559 1558 uint64_t tlb_mask; 1560 1559
+10 -30
tcg/sparc/tcg-target.inc.c
··· 1062 1062 } 1063 1063 1064 1064 #if defined(CONFIG_SOFTMMU) 1065 + 1066 + /* We expect to use a 13-bit negative offset from ENV. */ 1067 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); 1068 + QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12)); 1069 + 1065 1070 /* Perform the TLB load and compare. 1066 1071 1067 1072 Inputs: ··· 1075 1080 The result of the TLB comparison is in %[ix]cc. The sanitized address 1076 1081 is in the returned register, maybe %o0. The TLB addend is in %o1. */ 1077 1082 1078 - /* We expect tlb_mask to be before tlb_table. */ 1079 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) < 1080 - offsetof(CPUArchState, tlb_mask)); 1081 - 1082 - /* We expect tlb_mask to be "near" tlb_table. */ 1083 - QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) - 1084 - offsetof(CPUArchState, tlb_mask) >= (1 << 13)); 1085 - 1086 1083 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, 1087 1084 TCGMemOp opc, int which) 1088 1085 { 1089 - int mask_off = offsetof(CPUArchState, tlb_mask[mem_index]); 1090 - int table_off = offsetof(CPUArchState, tlb_table[mem_index]); 1091 - TCGReg base = TCG_AREG0; 1086 + int fast_off = TLB_MASK_TABLE_OFS(mem_index); 1087 + int mask_off = fast_off + offsetof(CPUTLBDescFast, mask); 1088 + int table_off = fast_off + offsetof(CPUTLBDescFast, table); 1092 1089 const TCGReg r0 = TCG_REG_O0; 1093 1090 const TCGReg r1 = TCG_REG_O1; 1094 1091 const TCGReg r2 = TCG_REG_O2; ··· 1096 1093 unsigned a_bits = get_alignment_bits(opc); 1097 1094 tcg_target_long compare_mask; 1098 1095 1099 - if (!check_fit_i32(table_off, 13)) { 1100 - int table_hi; 1101 - 1102 - base = r1; 1103 - if (table_off <= 2 * 0xfff) { 1104 - table_hi = 0xfff; 1105 - tcg_out_arithi(s, base, TCG_AREG0, table_hi, ARITH_ADD); 1106 - } else { 1107 - table_hi = table_off & ~0x3ff; 1108 - tcg_out_sethi(s, base, table_hi); 1109 - tcg_out_arith(s, base, TCG_AREG0, base, ARITH_ADD); 1110 - } 1111 - mask_off -= table_hi; 1112 - table_off -= table_hi; 1113 - tcg_debug_assert(check_fit_i32(mask_off, 13)); 1114 - } 1115 - 1116 1096 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */ 1117 - tcg_out_ld(s, TCG_TYPE_PTR, r0, base, mask_off); 1118 - tcg_out_ld(s, TCG_TYPE_PTR, r1, base, table_off); 1097 + tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off); 1098 + tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off); 1119 1099 1120 1100 /* Extract the page index, shifted into place for tlb index. */ 1121 1101 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,