qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

target/arm: Use env_cpu, env_archcpu

Cleanup in the boilerplate that each target must define.
Replace arm_env_get_cpu with env_archcpu. The combination
CPU(arm_env_get_cpu) should have used ENV_GET_CPU to begin;
use env_cpu now.

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

+88 -94
+3 -3
linux-user/aarch64/cpu_loop.c
··· 73 73 /* AArch64 main loop */ 74 74 void cpu_loop(CPUARMState *env) 75 75 { 76 - CPUState *cs = CPU(arm_env_get_cpu(env)); 76 + CPUState *cs = env_cpu(env); 77 77 int trapnr; 78 78 abi_long ret; 79 79 target_siginfo_t info; ··· 150 150 151 151 void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) 152 152 { 153 - ARMCPU *cpu = arm_env_get_cpu(env); 154 - CPUState *cs = CPU(cpu); 153 + ARMCPU *cpu = env_archcpu(env); 154 + CPUState *cs = env_cpu(env); 155 155 TaskState *ts = cs->opaque; 156 156 struct image_info *info = ts->info; 157 157 int i;
+2 -2
linux-user/aarch64/signal.c
··· 314 314 break; 315 315 316 316 case TARGET_SVE_MAGIC: 317 - if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { 317 + if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 318 318 vq = (env->vfp.zcr_el[1] & 0xf) + 1; 319 319 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); 320 320 if (!sve && size == sve_size) { ··· 433 433 &layout); 434 434 435 435 /* SVE state needs saving only if it exists. */ 436 - if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(env))) { 436 + if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { 437 437 vq = (env->vfp.zcr_el[1] & 0xf) + 1; 438 438 sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16); 439 439 sve_ofs = alloc_sigframe_space(sve_size, &layout);
+1 -1
linux-user/arm/cpu_loop.c
··· 206 206 207 207 void cpu_loop(CPUARMState *env) 208 208 { 209 - CPUState *cs = CPU(arm_env_get_cpu(env)); 209 + CPUState *cs = env_cpu(env); 210 210 int trapnr; 211 211 unsigned int n, insn; 212 212 target_siginfo_t info;
+4 -4
linux-user/syscall.c
··· 9781 9781 * even though the current architectural maximum is VQ=16. 9782 9782 */ 9783 9783 ret = -TARGET_EINVAL; 9784 - if (cpu_isar_feature(aa64_sve, arm_env_get_cpu(cpu_env)) 9784 + if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env)) 9785 9785 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) { 9786 9786 CPUARMState *env = cpu_env; 9787 - ARMCPU *cpu = arm_env_get_cpu(env); 9787 + ARMCPU *cpu = env_archcpu(env); 9788 9788 uint32_t vq, old_vq; 9789 9789 9790 9790 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1; ··· 9801 9801 case TARGET_PR_SVE_GET_VL: 9802 9802 ret = -TARGET_EINVAL; 9803 9803 { 9804 - ARMCPU *cpu = arm_env_get_cpu(cpu_env); 9804 + ARMCPU *cpu = env_archcpu(cpu_env); 9805 9805 if (cpu_isar_feature(aa64_sve, cpu)) { 9806 9806 ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16; 9807 9807 } ··· 9810 9810 case TARGET_PR_PAC_RESET_KEYS: 9811 9811 { 9812 9812 CPUARMState *env = cpu_env; 9813 - ARMCPU *cpu = arm_env_get_cpu(env); 9813 + ARMCPU *cpu = env_archcpu(env); 9814 9814 9815 9815 if (arg3 || arg4 || arg5) { 9816 9816 return -TARGET_EINVAL;
+2 -2
target/arm/arm-semi.c
··· 257 257 */ 258 258 target_ulong do_arm_semihosting(CPUARMState *env) 259 259 { 260 - ARMCPU *cpu = arm_env_get_cpu(env); 261 - CPUState *cs = CPU(cpu); 260 + ARMCPU *cpu = env_archcpu(env); 261 + CPUState *cs = env_cpu(env); 262 262 target_ulong args; 263 263 target_ulong arg0, arg1, arg2, arg3; 264 264 char * s;
-5
target/arm/cpu.h
··· 913 913 uint32_t sve_max_vq; 914 914 }; 915 915 916 - static inline ARMCPU *arm_env_get_cpu(CPUARMState *env) 917 - { 918 - return container_of(env, ARMCPU, env); 919 - } 920 - 921 916 void arm_cpu_post_init(Object *obj); 922 917 923 918 uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
+1 -1
target/arm/cpu64.c
··· 43 43 #ifndef CONFIG_USER_ONLY 44 44 static uint64_t a57_a53_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri) 45 45 { 46 - ARMCPU *cpu = arm_env_get_cpu(env); 46 + ARMCPU *cpu = env_archcpu(env); 47 47 48 48 /* Number of cores is in [25:24]; otherwise we RAZ */ 49 49 return (cpu->core_count - 1) << 24;
+2 -2
target/arm/helper-a64.c
··· 1005 1005 } 1006 1006 1007 1007 qemu_mutex_lock_iothread(); 1008 - arm_call_pre_el_change_hook(arm_env_get_cpu(env)); 1008 + arm_call_pre_el_change_hook(env_archcpu(env)); 1009 1009 qemu_mutex_unlock_iothread(); 1010 1010 1011 1011 if (!return_to_aa64) { ··· 1047 1047 aarch64_sve_change_el(env, cur_el, new_el, return_to_aa64); 1048 1048 1049 1049 qemu_mutex_lock_iothread(); 1050 - arm_call_el_change_hook(arm_env_get_cpu(env)); 1050 + arm_call_el_change_hook(env_archcpu(env)); 1051 1051 qemu_mutex_unlock_iothread(); 1052 1052 1053 1053 return;
+60 -60
target/arm/helper.c
··· 227 227 228 228 static int arm_gdb_get_sysreg(CPUARMState *env, uint8_t *buf, int reg) 229 229 { 230 - ARMCPU *cpu = arm_env_get_cpu(env); 230 + ARMCPU *cpu = env_archcpu(env); 231 231 const ARMCPRegInfo *ri; 232 232 uint32_t key; 233 233 ··· 548 548 549 549 static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 550 550 { 551 - ARMCPU *cpu = arm_env_get_cpu(env); 551 + ARMCPU *cpu = env_archcpu(env); 552 552 553 553 raw_write(env, ri, value); 554 554 tlb_flush(CPU(cpu)); /* Flush TLB as domain not tracked in TLB */ ··· 556 556 557 557 static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 558 558 { 559 - ARMCPU *cpu = arm_env_get_cpu(env); 559 + ARMCPU *cpu = env_archcpu(env); 560 560 561 561 if (raw_read(env, ri) != value) { 562 562 /* Unlike real hardware the qemu TLB uses virtual addresses, ··· 570 570 static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri, 571 571 uint64_t value) 572 572 { 573 - ARMCPU *cpu = arm_env_get_cpu(env); 573 + ARMCPU *cpu = env_archcpu(env); 574 574 575 575 if (raw_read(env, ri) != value && !arm_feature(env, ARM_FEATURE_PMSA) 576 576 && !extended_addresses_enabled(env)) { ··· 631 631 uint64_t value) 632 632 { 633 633 /* Invalidate all (TLBIALL) */ 634 - ARMCPU *cpu = arm_env_get_cpu(env); 634 + ARMCPU *cpu = env_archcpu(env); 635 635 636 636 if (tlb_force_broadcast(env)) { 637 637 tlbiall_is_write(env, NULL, value); ··· 645 645 uint64_t value) 646 646 { 647 647 /* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */ 648 - ARMCPU *cpu = arm_env_get_cpu(env); 648 + ARMCPU *cpu = env_archcpu(env); 649 649 650 650 if (tlb_force_broadcast(env)) { 651 651 tlbimva_is_write(env, NULL, value); ··· 659 659 uint64_t value) 660 660 { 661 661 /* Invalidate by ASID (TLBIASID) */ 662 - ARMCPU *cpu = arm_env_get_cpu(env); 662 + ARMCPU *cpu = env_archcpu(env); 663 663 664 664 if (tlb_force_broadcast(env)) { 665 665 tlbiasid_is_write(env, NULL, value); ··· 673 673 uint64_t value) 674 674 { 675 675 /* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */ 676 - ARMCPU *cpu = arm_env_get_cpu(env); 676 + ARMCPU *cpu = env_archcpu(env); 677 677 678 678 if (tlb_force_broadcast(env)) { 679 679 tlbimvaa_is_write(env, NULL, value); ··· 1353 1353 1354 1354 static void pmu_update_irq(CPUARMState *env) 1355 1355 { 1356 - ARMCPU *cpu = arm_env_get_cpu(env); 1356 + ARMCPU *cpu = env_archcpu(env); 1357 1357 qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && 1358 1358 (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); 1359 1359 } ··· 1408 1408 if (overflow_in > 0) { 1409 1409 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1410 1410 overflow_in; 1411 - ARMCPU *cpu = arm_env_get_cpu(env); 1411 + ARMCPU *cpu = env_archcpu(env); 1412 1412 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1413 1413 } 1414 1414 #endif ··· 1457 1457 if (overflow_in > 0) { 1458 1458 int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1459 1459 overflow_in; 1460 - ARMCPU *cpu = arm_env_get_cpu(env); 1460 + ARMCPU *cpu = env_archcpu(env); 1461 1461 timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at); 1462 1462 } 1463 1463 #endif ··· 1865 1865 { 1866 1866 /* Begin with base v8.0 state. */ 1867 1867 uint32_t valid_mask = 0x3fff; 1868 - ARMCPU *cpu = arm_env_get_cpu(env); 1868 + ARMCPU *cpu = env_archcpu(env); 1869 1869 1870 1870 if (arm_el_is_aa64(env, 3)) { 1871 1871 value |= SCR_FW | SCR_AW; /* these two bits are RES1. */ ··· 1902 1902 1903 1903 static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) 1904 1904 { 1905 - ARMCPU *cpu = arm_env_get_cpu(env); 1905 + ARMCPU *cpu = env_archcpu(env); 1906 1906 1907 1907 /* Acquire the CSSELR index from the bank corresponding to the CCSIDR 1908 1908 * bank ··· 2452 2452 static void gt_timer_reset(CPUARMState *env, const ARMCPRegInfo *ri, 2453 2453 int timeridx) 2454 2454 { 2455 - ARMCPU *cpu = arm_env_get_cpu(env); 2455 + ARMCPU *cpu = env_archcpu(env); 2456 2456 2457 2457 timer_del(cpu->gt_timer[timeridx]); 2458 2458 } ··· 2473 2473 { 2474 2474 trace_arm_gt_cval_write(timeridx, value); 2475 2475 env->cp15.c14_timer[timeridx].cval = value; 2476 - gt_recalc_timer(arm_env_get_cpu(env), timeridx); 2476 + gt_recalc_timer(env_archcpu(env), timeridx); 2477 2477 } 2478 2478 2479 2479 static uint64_t gt_tval_read(CPUARMState *env, const ARMCPRegInfo *ri, ··· 2494 2494 trace_arm_gt_tval_write(timeridx, value); 2495 2495 env->cp15.c14_timer[timeridx].cval = gt_get_countervalue(env) - offset + 2496 2496 sextract64(value, 0, 32); 2497 - gt_recalc_timer(arm_env_get_cpu(env), timeridx); 2497 + gt_recalc_timer(env_archcpu(env), timeridx); 2498 2498 } 2499 2499 2500 2500 static void gt_ctl_write(CPUARMState *env, const ARMCPRegInfo *ri, 2501 2501 int timeridx, 2502 2502 uint64_t value) 2503 2503 { 2504 - ARMCPU *cpu = arm_env_get_cpu(env); 2504 + ARMCPU *cpu = env_archcpu(env); 2505 2505 uint32_t oldval = env->cp15.c14_timer[timeridx].ctl; 2506 2506 2507 2507 trace_arm_gt_ctl_write(timeridx, value); ··· 2579 2579 static void gt_cntvoff_write(CPUARMState *env, const ARMCPRegInfo *ri, 2580 2580 uint64_t value) 2581 2581 { 2582 - ARMCPU *cpu = arm_env_get_cpu(env); 2582 + ARMCPU *cpu = env_archcpu(env); 2583 2583 2584 2584 trace_arm_gt_cntvoff_write(value); 2585 2585 raw_write(env, ri, value); ··· 3212 3212 static void pmsav7_write(CPUARMState *env, const ARMCPRegInfo *ri, 3213 3213 uint64_t value) 3214 3214 { 3215 - ARMCPU *cpu = arm_env_get_cpu(env); 3215 + ARMCPU *cpu = env_archcpu(env); 3216 3216 uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri); 3217 3217 3218 3218 if (!u32p) { ··· 3227 3227 static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3228 3228 uint64_t value) 3229 3229 { 3230 - ARMCPU *cpu = arm_env_get_cpu(env); 3230 + ARMCPU *cpu = env_archcpu(env); 3231 3231 uint32_t nrgs = cpu->pmsav7_dregion; 3232 3232 3233 3233 if (value >= nrgs) { ··· 3355 3355 static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3356 3356 uint64_t value) 3357 3357 { 3358 - ARMCPU *cpu = arm_env_get_cpu(env); 3358 + ARMCPU *cpu = env_archcpu(env); 3359 3359 TCR *tcr = raw_ptr(env, ri); 3360 3360 3361 3361 if (arm_feature(env, ARM_FEATURE_LPAE)) { ··· 3384 3384 static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, 3385 3385 uint64_t value) 3386 3386 { 3387 - ARMCPU *cpu = arm_env_get_cpu(env); 3387 + ARMCPU *cpu = env_archcpu(env); 3388 3388 TCR *tcr = raw_ptr(env, ri); 3389 3389 3390 3390 /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */ ··· 3398 3398 /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ 3399 3399 if (cpreg_field_is_64bit(ri) && 3400 3400 extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { 3401 - ARMCPU *cpu = arm_env_get_cpu(env); 3401 + ARMCPU *cpu = env_archcpu(env); 3402 3402 tlb_flush(CPU(cpu)); 3403 3403 } 3404 3404 raw_write(env, ri, value); ··· 3407 3407 static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, 3408 3408 uint64_t value) 3409 3409 { 3410 - ARMCPU *cpu = arm_env_get_cpu(env); 3410 + ARMCPU *cpu = env_archcpu(env); 3411 3411 CPUState *cs = CPU(cpu); 3412 3412 3413 3413 /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ ··· 3497 3497 uint64_t value) 3498 3498 { 3499 3499 /* Wait-for-interrupt (deprecated) */ 3500 - cpu_interrupt(CPU(arm_env_get_cpu(env)), CPU_INTERRUPT_HALT); 3500 + cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); 3501 3501 } 3502 3502 3503 3503 static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, ··· 3650 3650 3651 3651 static uint64_t midr_read(CPUARMState *env, const ARMCPRegInfo *ri) 3652 3652 { 3653 - ARMCPU *cpu = arm_env_get_cpu(env); 3653 + ARMCPU *cpu = env_archcpu(env); 3654 3654 unsigned int cur_el = arm_current_el(env); 3655 3655 bool secure = arm_is_secure(env); 3656 3656 ··· 3662 3662 3663 3663 static uint64_t mpidr_read_val(CPUARMState *env) 3664 3664 { 3665 - ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env)); 3665 + ARMCPU *cpu = env_archcpu(env); 3666 3666 uint64_t mpidr = cpu->mp_affinity; 3667 3667 3668 3668 if (arm_feature(env, ARM_FEATURE_V7MP)) { ··· 3815 3815 * stage 2 translations, whereas most other scopes only invalidate 3816 3816 * stage 1 translations. 3817 3817 */ 3818 - ARMCPU *cpu = arm_env_get_cpu(env); 3818 + ARMCPU *cpu = env_archcpu(env); 3819 3819 CPUState *cs = CPU(cpu); 3820 3820 3821 3821 if (arm_is_secure_below_el3(env)) { ··· 3839 3839 static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, 3840 3840 uint64_t value) 3841 3841 { 3842 - ARMCPU *cpu = arm_env_get_cpu(env); 3842 + ARMCPU *cpu = env_archcpu(env); 3843 3843 CPUState *cs = CPU(cpu); 3844 3844 3845 3845 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E2); ··· 3848 3848 static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, 3849 3849 uint64_t value) 3850 3850 { 3851 - ARMCPU *cpu = arm_env_get_cpu(env); 3851 + ARMCPU *cpu = env_archcpu(env); 3852 3852 CPUState *cs = CPU(cpu); 3853 3853 3854 3854 tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_S1E3); ··· 3904 3904 * Currently handles both VAE2 and VALE2, since we don't support 3905 3905 * flush-last-level-only. 3906 3906 */ 3907 - ARMCPU *cpu = arm_env_get_cpu(env); 3907 + ARMCPU *cpu = env_archcpu(env); 3908 3908 CPUState *cs = CPU(cpu); 3909 3909 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3910 3910 ··· 3918 3918 * Currently handles both VAE3 and VALE3, since we don't support 3919 3919 * flush-last-level-only. 3920 3920 */ 3921 - ARMCPU *cpu = arm_env_get_cpu(env); 3921 + ARMCPU *cpu = env_archcpu(env); 3922 3922 CPUState *cs = CPU(cpu); 3923 3923 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3924 3924 ··· 3928 3928 static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, 3929 3929 uint64_t value) 3930 3930 { 3931 - ARMCPU *cpu = arm_env_get_cpu(env); 3931 + ARMCPU *cpu = env_archcpu(env); 3932 3932 CPUState *cs = CPU(cpu); 3933 3933 bool sec = arm_is_secure_below_el3(env); 3934 3934 uint64_t pageaddr = sextract64(value << 12, 0, 56); ··· 3952 3952 * since we don't support flush-for-specific-ASID-only or 3953 3953 * flush-last-level-only. 3954 3954 */ 3955 - ARMCPU *cpu = arm_env_get_cpu(env); 3955 + ARMCPU *cpu = env_archcpu(env); 3956 3956 CPUState *cs = CPU(cpu); 3957 3957 uint64_t pageaddr = sextract64(value << 12, 0, 56); 3958 3958 ··· 4001 4001 * translation information. 4002 4002 * This must NOP if EL2 isn't implemented or SCR_EL3.NS is zero. 4003 4003 */ 4004 - ARMCPU *cpu = arm_env_get_cpu(env); 4004 + ARMCPU *cpu = env_archcpu(env); 4005 4005 CPUState *cs = CPU(cpu); 4006 4006 uint64_t pageaddr; 4007 4007 ··· 4044 4044 4045 4045 static uint64_t aa64_dczid_read(CPUARMState *env, const ARMCPRegInfo *ri) 4046 4046 { 4047 - ARMCPU *cpu = arm_env_get_cpu(env); 4047 + ARMCPU *cpu = env_archcpu(env); 4048 4048 int dzp_bit = 1 << 4; 4049 4049 4050 4050 /* DZP indicates whether DC ZVA access is allowed */ ··· 4079 4079 static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, 4080 4080 uint64_t value) 4081 4081 { 4082 - ARMCPU *cpu = arm_env_get_cpu(env); 4082 + ARMCPU *cpu = env_archcpu(env); 4083 4083 4084 4084 if (raw_read(env, ri) == value) { 4085 4085 /* Skip the TLB flush if nothing actually changed; Linux likes ··· 4571 4571 4572 4572 static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) 4573 4573 { 4574 - ARMCPU *cpu = arm_env_get_cpu(env); 4574 + ARMCPU *cpu = env_archcpu(env); 4575 4575 uint64_t valid_mask = HCR_MASK; 4576 4576 4577 4577 if (arm_feature(env, ARM_FEATURE_EL3)) { ··· 5238 5238 */ 5239 5239 uint32_t sve_zcr_len_for_el(CPUARMState *env, int el) 5240 5240 { 5241 - ARMCPU *cpu = arm_env_get_cpu(env); 5241 + ARMCPU *cpu = env_archcpu(env); 5242 5242 uint32_t zcr_len = cpu->sve_max_vq - 1; 5243 5243 5244 5244 if (el <= 1) { ··· 5406 5406 static void dbgwvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5407 5407 uint64_t value) 5408 5408 { 5409 - ARMCPU *cpu = arm_env_get_cpu(env); 5409 + ARMCPU *cpu = env_archcpu(env); 5410 5410 int i = ri->crm; 5411 5411 5412 5412 /* Bits [63:49] are hardwired to the value of bit [48]; that is, the ··· 5422 5422 static void dbgwcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5423 5423 uint64_t value) 5424 5424 { 5425 - ARMCPU *cpu = arm_env_get_cpu(env); 5425 + ARMCPU *cpu = env_archcpu(env); 5426 5426 int i = ri->crm; 5427 5427 5428 5428 raw_write(env, ri, value); ··· 5524 5524 static void dbgbvr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5525 5525 uint64_t value) 5526 5526 { 5527 - ARMCPU *cpu = arm_env_get_cpu(env); 5527 + ARMCPU *cpu = env_archcpu(env); 5528 5528 int i = ri->crm; 5529 5529 5530 5530 raw_write(env, ri, value); ··· 5534 5534 static void dbgbcr_write(CPUARMState *env, const ARMCPRegInfo *ri, 5535 5535 uint64_t value) 5536 5536 { 5537 - ARMCPU *cpu = arm_env_get_cpu(env); 5537 + ARMCPU *cpu = env_archcpu(env); 5538 5538 int i = ri->crm; 5539 5539 5540 5540 /* BAS[3] is a read-only copy of BAS[2], and BAS[1] a read-only ··· 5630 5630 */ 5631 5631 static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) 5632 5632 { 5633 - ARMCPU *cpu = arm_env_get_cpu(env); 5633 + ARMCPU *cpu = env_archcpu(env); 5634 5634 uint64_t pfr1 = cpu->id_pfr1; 5635 5635 5636 5636 if (env->gicv3state) { ··· 5641 5641 5642 5642 static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) 5643 5643 { 5644 - ARMCPU *cpu = arm_env_get_cpu(env); 5644 + ARMCPU *cpu = env_archcpu(env); 5645 5645 uint64_t pfr0 = cpu->isar.id_aa64pfr0; 5646 5646 5647 5647 if (env->gicv3state) { ··· 7421 7421 /* These should probably raise undefined insn exceptions. */ 7422 7422 void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val) 7423 7423 { 7424 - ARMCPU *cpu = arm_env_get_cpu(env); 7424 + ARMCPU *cpu = env_archcpu(env); 7425 7425 7426 7426 cpu_abort(CPU(cpu), "v7m_msr %d\n", reg); 7427 7427 } 7428 7428 7429 7429 uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg) 7430 7430 { 7431 - ARMCPU *cpu = arm_env_get_cpu(env); 7431 + ARMCPU *cpu = env_archcpu(env); 7432 7432 7433 7433 cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg); 7434 7434 return 0; ··· 7488 7488 7489 7489 static void switch_mode(CPUARMState *env, int mode) 7490 7490 { 7491 - ARMCPU *cpu = arm_env_get_cpu(env); 7491 + ARMCPU *cpu = env_archcpu(env); 7492 7492 7493 7493 if (mode != ARM_CPU_MODE_USR) { 7494 7494 cpu_abort(CPU(cpu), "Tried to switch out of user mode\n"); ··· 7831 7831 * PreserveFPState() pseudocode. 7832 7832 * We may throw an exception if the stacking fails. 7833 7833 */ 7834 - ARMCPU *cpu = arm_env_get_cpu(env); 7834 + ARMCPU *cpu = env_archcpu(env); 7835 7835 bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK; 7836 7836 bool negpri = !(env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_HFRDY_MASK); 7837 7837 bool is_priv = !(env->v7m.fpccr[is_secure] & R_V7M_FPCCR_USER_MASK); ··· 10938 10938 target_ulong *page_size, 10939 10939 ARMMMUFaultInfo *fi) 10940 10940 { 10941 - CPUState *cs = CPU(arm_env_get_cpu(env)); 10941 + CPUState *cs = env_cpu(env); 10942 10942 int level = 1; 10943 10943 uint32_t table; 10944 10944 uint32_t desc; ··· 11059 11059 hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot, 11060 11060 target_ulong *page_size, ARMMMUFaultInfo *fi) 11061 11061 { 11062 - CPUState *cs = CPU(arm_env_get_cpu(env)); 11062 + CPUState *cs = env_cpu(env); 11063 11063 int level = 1; 11064 11064 uint32_t table; 11065 11065 uint32_t desc; ··· 11444 11444 target_ulong *page_size_ptr, 11445 11445 ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs) 11446 11446 { 11447 - ARMCPU *cpu = arm_env_get_cpu(env); 11447 + ARMCPU *cpu = env_archcpu(env); 11448 11448 CPUState *cs = CPU(cpu); 11449 11449 /* Read an LPAE long-descriptor translation table. */ 11450 11450 ARMFaultType fault_type = ARMFault_Translation; ··· 11802 11802 target_ulong *page_size, 11803 11803 ARMMMUFaultInfo *fi) 11804 11804 { 11805 - ARMCPU *cpu = arm_env_get_cpu(env); 11805 + ARMCPU *cpu = env_archcpu(env); 11806 11806 int n; 11807 11807 bool is_user = regime_is_user(env, mmu_idx); 11808 11808 ··· 12006 12006 * pseudocode SecurityCheck() function. 12007 12007 * We assume the caller has zero-initialized *sattrs. 12008 12008 */ 12009 - ARMCPU *cpu = arm_env_get_cpu(env); 12009 + ARMCPU *cpu = env_archcpu(env); 12010 12010 int r; 12011 12011 bool idau_exempt = false, idau_ns = true, idau_nsc = true; 12012 12012 int idau_region = IREGION_NOTVALID; ··· 12119 12119 * We set is_subpage to true if the region hit doesn't cover the 12120 12120 * entire TARGET_PAGE the address is within. 12121 12121 */ 12122 - ARMCPU *cpu = arm_env_get_cpu(env); 12122 + ARMCPU *cpu = env_archcpu(env); 12123 12123 bool is_user = regime_is_user(env, mmu_idx); 12124 12124 uint32_t secure = regime_is_secure(env, mmu_idx); 12125 12125 int n; ··· 12899 12899 limit = is_psp ? env->v7m.psplim[false] : env->v7m.msplim[false]; 12900 12900 12901 12901 if (val < limit) { 12902 - CPUState *cs = CPU(arm_env_get_cpu(env)); 12902 + CPUState *cs = env_cpu(env); 12903 12903 12904 12904 cpu_restore_state(cs, GETPC(), true); 12905 12905 raise_exception(env, EXCP_STKOF, 0, 1); ··· 13180 13180 * alignment faults or any memory attribute handling). 13181 13181 */ 13182 13182 13183 - ARMCPU *cpu = arm_env_get_cpu(env); 13183 + ARMCPU *cpu = env_archcpu(env); 13184 13184 uint64_t blocklen = 4 << cpu->dcz_blocksize; 13185 13185 uint64_t vaddr = vaddr_in & ~(blocklen - 1); 13186 13186 ··· 13680 13680 uint32_t flags = 0; 13681 13681 13682 13682 if (is_a64(env)) { 13683 - ARMCPU *cpu = arm_env_get_cpu(env); 13683 + ARMCPU *cpu = env_archcpu(env); 13684 13684 uint64_t sctlr; 13685 13685 13686 13686 *pc = env->pc; ··· 13853 13853 uint64_t pmask; 13854 13854 13855 13855 assert(vq >= 1 && vq <= ARM_MAX_VQ); 13856 - assert(vq <= arm_env_get_cpu(env)->sve_max_vq); 13856 + assert(vq <= env_archcpu(env)->sve_max_vq); 13857 13857 13858 13858 /* Zap the high bits of the zregs. */ 13859 13859 for (i = 0; i < 32; i++) { ··· 13879 13879 void aarch64_sve_change_el(CPUARMState *env, int old_el, 13880 13880 int new_el, bool el0_a64) 13881 13881 { 13882 - ARMCPU *cpu = arm_env_get_cpu(env); 13882 + ARMCPU *cpu = env_archcpu(env); 13883 13883 int old_len, new_len; 13884 13884 bool old_a64, new_a64; 13885 13885
+10 -11
target/arm/op_helper.c
··· 31 31 static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp, 32 32 uint32_t syndrome, uint32_t target_el) 33 33 { 34 - CPUState *cs = CPU(arm_env_get_cpu(env)); 34 + CPUState *cs = env_cpu(env); 35 35 36 36 if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) { 37 37 /* ··· 224 224 * raising an exception if the limit is breached. 225 225 */ 226 226 if (newvalue < v7m_sp_limit(env)) { 227 - CPUState *cs = CPU(arm_env_get_cpu(env)); 227 + CPUState *cs = env_cpu(env); 228 228 229 229 /* 230 230 * Stack limit exceptions are a rare case, so rather than syncing ··· 427 427 428 428 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len) 429 429 { 430 - CPUState *cs = CPU(arm_env_get_cpu(env)); 430 + CPUState *cs = env_cpu(env); 431 431 int target_el = check_wfx_trap(env, false); 432 432 433 433 if (cpu_has_work(cs)) { ··· 462 462 463 463 void HELPER(yield)(CPUARMState *env) 464 464 { 465 - ARMCPU *cpu = arm_env_get_cpu(env); 466 - CPUState *cs = CPU(cpu); 465 + CPUState *cs = env_cpu(env); 467 466 468 467 /* This is a non-trappable hint instruction that generally indicates 469 468 * that the guest is currently busy-looping. Yield control back to the ··· 481 480 */ 482 481 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) 483 482 { 484 - CPUState *cs = CPU(arm_env_get_cpu(env)); 483 + CPUState *cs = env_cpu(env); 485 484 486 485 assert(excp_is_internal(excp)); 487 486 cs->exception_index = excp; ··· 524 523 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) 525 524 { 526 525 qemu_mutex_lock_iothread(); 527 - arm_call_pre_el_change_hook(arm_env_get_cpu(env)); 526 + arm_call_pre_el_change_hook(env_archcpu(env)); 528 527 qemu_mutex_unlock_iothread(); 529 528 530 529 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn); ··· 537 536 env->regs[15] &= (env->thumb ? ~1 : ~3); 538 537 539 538 qemu_mutex_lock_iothread(); 540 - arm_call_el_change_hook(arm_env_get_cpu(env)); 539 + arm_call_el_change_hook(env_archcpu(env)); 541 540 qemu_mutex_unlock_iothread(); 542 541 } 543 542 ··· 842 841 843 842 void HELPER(pre_hvc)(CPUARMState *env) 844 843 { 845 - ARMCPU *cpu = arm_env_get_cpu(env); 844 + ARMCPU *cpu = env_archcpu(env); 846 845 int cur_el = arm_current_el(env); 847 846 /* FIXME: Use actual secure state. */ 848 847 bool secure = false; ··· 882 881 883 882 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) 884 883 { 885 - ARMCPU *cpu = arm_env_get_cpu(env); 884 + ARMCPU *cpu = env_archcpu(env); 886 885 int cur_el = arm_current_el(env); 887 886 bool secure = arm_is_secure(env); 888 887 bool smd_flag = env->cp15.scr_el3 & SCR_SMD; ··· 1156 1155 1157 1156 void HELPER(check_breakpoints)(CPUARMState *env) 1158 1157 { 1159 - ARMCPU *cpu = arm_env_get_cpu(env); 1158 + ARMCPU *cpu = env_archcpu(env); 1160 1159 1161 1160 if (check_breakpoints(cpu)) { 1162 1161 HELPER(exception_internal(env, EXCP_DEBUG));
+1 -1
target/arm/translate-a64.c
··· 14289 14289 { 14290 14290 DisasContext *dc = container_of(dcbase, DisasContext, base); 14291 14291 CPUARMState *env = cpu->env_ptr; 14292 - ARMCPU *arm_cpu = arm_env_get_cpu(env); 14292 + ARMCPU *arm_cpu = env_archcpu(env); 14293 14293 uint32_t tb_flags = dc->base.tb->flags; 14294 14294 int bound, core_mmu_idx; 14295 14295
+1 -1
target/arm/translate.c
··· 13408 13408 { 13409 13409 DisasContext *dc = container_of(dcbase, DisasContext, base); 13410 13410 CPUARMState *env = cs->env_ptr; 13411 - ARMCPU *cpu = arm_env_get_cpu(env); 13411 + ARMCPU *cpu = env_archcpu(env); 13412 13412 uint32_t tb_flags = dc->base.tb->flags; 13413 13413 uint32_t condexec, core_mmu_idx; 13414 13414
+1 -1
target/arm/vfp_helper.c
··· 101 101 uint32_t changed = env->vfp.xregs[ARM_VFP_FPSCR]; 102 102 103 103 /* When ARMv8.2-FP16 is not supported, FZ16 is RES0. */ 104 - if (!cpu_isar_feature(aa64_fp16, arm_env_get_cpu(env))) { 104 + if (!cpu_isar_feature(aa64_fp16, env_archcpu(env))) { 105 105 val &= ~FPCR_FZ16; 106 106 } 107 107