qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-5.1-20200507' into staging

ppc patch queue for 2020-04-07

First pull request for qemu-5.1. This includes:
* Removal of all remaining cases where we had CAS triggered reboots
* A number of improvements to NMI injection
* Support for partition scoped radix translation in softmmu
* Some fixes for NVDIMM handling
* A handful of other minor fixes

# gpg: Signature made Thu 07 May 2020 06:00:55 BST
# gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-5.1-20200507:
target-ppc: fix rlwimi, rlwinm, rlwnm for Clang-9
spapr_nvdimm: Tweak error messages
spapr_nvdimm.c: make 'label-size' mandatory
target/ppc: Add support for Radix partition-scoped translation
target/ppc: Rework ppc_radix64_walk_tree() for partition-scoped translation
target/ppc: Extend ppc_radix64_check_prot() with a 'partition_scoped' bool
target/ppc: Introduce ppc_radix64_xlate() for Radix tree translation
spapr: Don't allow unplug of NVLink2 devices
target/ppc: Assert if HV mode is set when running under a pseries machine
target/ppc: Introduce a relocation bool in ppc_radix64_handle_mmu_fault()
target/ppc: Enforce that the root page directory size must be at least 5
spapr: Drop CAS reboot flag
spapr/cas: Separate CAS handling from rebuilding the FDT
spapr: Simplify selection of radix/hash during CAS
ppc/pnv: Add support for NMI interface
ppc/spapr: tweak change system reset helper
spapr: Don't check capabilities removed between CAS calls
target/ppc: Improve syscall exception logging

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+506 -217
+29
hw/ppc/pnv.c
··· 27 27 #include "sysemu/runstate.h" 28 28 #include "sysemu/cpus.h" 29 29 #include "sysemu/device_tree.h" 30 + #include "sysemu/hw_accel.h" 30 31 #include "target/ppc/cpu.h" 31 32 #include "qemu/log.h" 32 33 #include "hw/ppc/fdt.h" ··· 34 35 #include "hw/ppc/pnv.h" 35 36 #include "hw/ppc/pnv_core.h" 36 37 #include "hw/loader.h" 38 + #include "hw/nmi.h" 37 39 #include "exec/address-spaces.h" 38 40 #include "qapi/visitor.h" 39 41 #include "monitor/monitor.h" ··· 1977 1979 } 1978 1980 } 1979 1981 1982 + static void pnv_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg) 1983 + { 1984 + PowerPCCPU *cpu = POWERPC_CPU(cs); 1985 + CPUPPCState *env = &cpu->env; 1986 + 1987 + cpu_synchronize_state(cs); 1988 + ppc_cpu_do_system_reset(cs); 1989 + /* 1990 + * SRR1[42:45] is set to 0100 which the ISA defines as implementation 1991 + * dependent. POWER processors use this for xscom triggered interrupts, 1992 + * which come from the BMC or NMI IPIs. 1993 + */ 1994 + env->spr[SPR_SRR1] |= PPC_BIT(43); 1995 + } 1996 + 1997 + static void pnv_nmi(NMIState *n, int cpu_index, Error **errp) 1998 + { 1999 + CPUState *cs; 2000 + 2001 + CPU_FOREACH(cs) { 2002 + async_run_on_cpu(cs, pnv_cpu_do_nmi_on_cpu, RUN_ON_CPU_NULL); 2003 + } 2004 + } 2005 + 1980 2006 static void pnv_machine_class_init(ObjectClass *oc, void *data) 1981 2007 { 1982 2008 MachineClass *mc = MACHINE_CLASS(oc); 1983 2009 InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); 2010 + NMIClass *nc = NMI_CLASS(oc); 1984 2011 1985 2012 mc->desc = "IBM PowerNV (Non-Virtualized)"; 1986 2013 mc->init = pnv_init; ··· 1997 2024 mc->default_ram_size = INITRD_LOAD_ADDR + INITRD_MAX_SIZE; 1998 2025 mc->default_ram_id = "pnv.ram"; 1999 2026 ispc->print_info = pnv_pic_print_info; 2027 + nc->nmi_monitor_handler = pnv_nmi; 2000 2028 2001 2029 object_class_property_add_bool(oc, "hb-mode", 2002 2030 pnv_machine_get_hb, pnv_machine_set_hb, ··· 2060 2088 .class_size = sizeof(PnvMachineClass), 2061 2089 .interfaces = (InterfaceInfo[]) { 2062 2090 { TYPE_INTERRUPT_STATS_PROVIDER }, 2091 + { TYPE_NMI }, 2063 2092 { }, 2064 2093 }, 2065 2094 },
+11 -18
hw/ppc/spapr.c
··· 96 96 * 97 97 * We load our kernel at 4M, leaving space for SLOF initial image 98 98 */ 99 - #define FDT_MAX_SIZE 0x100000 100 99 #define RTAS_MAX_ADDR 0x80000000 /* RTAS must stay below that */ 101 100 #define FW_MAX_SIZE 0x400000 102 101 #define FW_FILE_NAME "slof.bin" ··· 1580 1579 { 1581 1580 int hpt_shift; 1582 1581 1583 - if ((spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) 1584 - || (spapr->cas_reboot 1585 - && !spapr_ovec_test(spapr->ov5_cas, OV5_HPT_RESIZE))) { 1582 + if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { 1586 1583 hpt_shift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); 1587 1584 } else { 1588 1585 uint64_t current_ram_size; ··· 1646 1643 1647 1644 qemu_devices_reset(); 1648 1645 1649 - /* 1650 - * If this reset wasn't generated by CAS, we should reset our 1651 - * negotiated options and start from scratch 1652 - */ 1653 - if (!spapr->cas_reboot) { 1654 - spapr_ovec_cleanup(spapr->ov5_cas); 1655 - spapr->ov5_cas = spapr_ovec_new(); 1646 + spapr_ovec_cleanup(spapr->ov5_cas); 1647 + spapr->ov5_cas = spapr_ovec_new(); 1656 1648 1657 - ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal); 1658 - } 1649 + ppc_set_compat_all(spapr->max_compat_pvr, &error_fatal); 1659 1650 1660 1651 /* 1661 1652 * This is fixing some of the default configuration of the XIVE ··· 1707 1698 /* Set up the entry state */ 1708 1699 spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, 0, fdt_addr, 0); 1709 1700 first_ppc_cpu->env.gpr[5] = 0; 1710 - 1711 - spapr->cas_reboot = false; 1712 1701 1713 1702 spapr->fwnmi_system_reset_addr = -1; 1714 1703 spapr->fwnmi_machine_check_addr = -1; ··· 2837 2826 if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) && 2838 2827 ppc_type_check_compat(machine->cpu_type, CPU_POWERPC_LOGICAL_3_00, 0, 2839 2828 spapr->max_compat_pvr)) { 2829 + spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_300); 2840 2830 /* KVM and TCG always allow GTSE with radix... */ 2841 2831 spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE); 2842 2832 } ··· 3385 3375 void spapr_do_system_reset_on_cpu(CPUState *cs, run_on_cpu_data arg) 3386 3376 { 3387 3377 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 3378 + PowerPCCPU *cpu = POWERPC_CPU(cs); 3379 + CPUPPCState *env = &cpu->env; 3388 3380 3389 3381 cpu_synchronize_state(cs); 3390 3382 /* If FWNMI is inactive, addr will be -1, which will deliver to 0x100 */ 3391 3383 if (spapr->fwnmi_system_reset_addr != -1) { 3392 3384 uint64_t rtas_addr, addr; 3393 - PowerPCCPU *cpu = POWERPC_CPU(cs); 3394 - CPUPPCState *env = &cpu->env; 3395 3385 3396 3386 /* get rtas addr from fdt */ 3397 3387 rtas_addr = spapr_get_rtas_addr(); ··· 3405 3395 stq_be_phys(&address_space_memory, addr + sizeof(uint64_t), 0); 3406 3396 env->gpr[3] = addr; 3407 3397 } 3408 - ppc_cpu_do_system_reset(cs, spapr->fwnmi_system_reset_addr); 3398 + ppc_cpu_do_system_reset(cs); 3399 + if (spapr->fwnmi_system_reset_addr != -1) { 3400 + env->nip = spapr->fwnmi_system_reset_addr; 3401 + } 3409 3402 } 3410 3403 3411 3404 static void spapr_nmi(NMIState *n, int cpu_index, Error **errp)
+51 -57
hw/ppc/spapr_hcall.c
··· 1665 1665 spapr_clear_pending_hotplug_events(spapr); 1666 1666 } 1667 1667 1668 - static target_ulong h_client_architecture_support(PowerPCCPU *cpu, 1669 - SpaprMachineState *spapr, 1670 - target_ulong opcode, 1671 - target_ulong *args) 1668 + target_ulong do_client_architecture_support(PowerPCCPU *cpu, 1669 + SpaprMachineState *spapr, 1670 + target_ulong vec, 1671 + target_ulong fdt_bufsize) 1672 1672 { 1673 - /* Working address in data buffer */ 1674 - target_ulong addr = ppc64_phys_to_real(args[0]); 1675 - target_ulong fdt_buf = args[1]; 1676 - target_ulong fdt_bufsize = args[2]; 1677 - target_ulong ov_table; 1673 + target_ulong ov_table; /* Working address in data buffer */ 1678 1674 uint32_t cas_pvr; 1679 - SpaprOptionVector *ov1_guest, *ov5_guest, *ov5_cas_old; 1675 + SpaprOptionVector *ov1_guest, *ov5_guest; 1680 1676 bool guest_radix; 1681 1677 Error *local_err = NULL; 1682 1678 bool raw_mode_supported = false; 1683 1679 bool guest_xive; 1684 1680 CPUState *cs; 1681 + void *fdt; 1685 1682 1686 1683 /* CAS is supposed to be called early when only the boot vCPU is active. */ 1687 1684 CPU_FOREACH(cs) { ··· 1694 1691 } 1695 1692 } 1696 1693 1697 - cas_pvr = cas_check_pvr(spapr, cpu, &addr, &raw_mode_supported, &local_err); 1694 + cas_pvr = cas_check_pvr(spapr, cpu, &vec, &raw_mode_supported, &local_err); 1698 1695 if (local_err) { 1699 1696 error_report_err(local_err); 1700 1697 return H_HARDWARE; ··· 1717 1714 } 1718 1715 1719 1716 /* For the future use: here @ov_table points to the first option vector */ 1720 - ov_table = addr; 1717 + ov_table = vec; 1721 1718 1722 1719 ov1_guest = spapr_ovec_parse_vector(ov_table, 1); 1723 1720 if (!ov1_guest) { ··· 1739 1736 exit(EXIT_FAILURE); 1740 1737 } 1741 1738 1742 - /* The radix/hash bit in byte 24 requires special handling: */ 1743 1739 guest_radix = spapr_ovec_test(ov5_guest, OV5_MMU_RADIX_300); 1744 - spapr_ovec_clear(ov5_guest, OV5_MMU_RADIX_300); 1745 1740 1746 1741 guest_xive = spapr_ovec_test(ov5_guest, OV5_XIVE_EXPLOIT); 1747 1742 ··· 1782 1777 * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need 1783 1778 * to worry about this for now. 1784 1779 */ 1785 - ov5_cas_old = spapr_ovec_clone(spapr->ov5_cas); 1786 - 1787 - /* also clear the radix/hash bit from the current ov5_cas bits to 1788 - * be in sync with the newly ov5 bits. Else the radix bit will be 1789 - * seen as being removed and this will generate a reset loop 1790 - */ 1791 - spapr_ovec_clear(ov5_cas_old, OV5_MMU_RADIX_300); 1792 1780 1793 1781 /* full range of negotiated ov5 capabilities */ 1794 1782 spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest); 1795 1783 spapr_ovec_cleanup(ov5_guest); 1796 - /* capabilities that have been added since CAS-generated guest reset. 1797 - * if capabilities have since been removed, generate another reset 1798 - */ 1799 - spapr->cas_reboot = !spapr_ovec_subset(ov5_cas_old, spapr->ov5_cas); 1800 - spapr_ovec_cleanup(ov5_cas_old); 1801 - /* Now that processing is finished, set the radix/hash bit for the 1802 - * guest if it requested a valid mode; otherwise terminate the boot. */ 1784 + 1803 1785 if (guest_radix) { 1804 1786 if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) { 1805 1787 error_report("Guest requested unavailable MMU mode (radix)."); 1806 1788 exit(EXIT_FAILURE); 1807 1789 } 1808 - spapr_ovec_set(spapr->ov5_cas, OV5_MMU_RADIX_300); 1809 1790 } else { 1810 1791 if (kvm_enabled() && kvmppc_has_cap_mmu_radix() 1811 1792 && !kvmppc_has_cap_mmu_hash_v3()) { ··· 1838 1819 1839 1820 spapr_handle_transient_dev_before_cas(spapr); 1840 1821 1841 - if (!spapr->cas_reboot) { 1842 - void *fdt; 1843 - SpaprDeviceTreeUpdateHeader hdr = { .version_id = 1 }; 1822 + /* 1823 + * If spapr_machine_reset() did not set up a HPT but one is necessary 1824 + * (because the guest isn't going to use radix) then set it up here. 1825 + */ 1826 + if ((spapr->patb_entry & PATE1_GR) && !guest_radix) { 1827 + /* legacy hash or new hash: */ 1828 + spapr_setup_hpt(spapr); 1829 + } 1844 1830 1845 - /* If spapr_machine_reset() did not set up a HPT but one is necessary 1846 - * (because the guest isn't going to use radix) then set it up here. */ 1847 - if ((spapr->patb_entry & PATE1_GR) && !guest_radix) { 1848 - /* legacy hash or new hash: */ 1849 - spapr_setup_hpt(spapr); 1850 - } 1831 + fdt = spapr_build_fdt(spapr, false, fdt_bufsize); 1851 1832 1852 - if (fdt_bufsize < sizeof(hdr)) { 1853 - error_report("SLOF provided insufficient CAS buffer " 1854 - TARGET_FMT_lu " (min: %zu)", fdt_bufsize, sizeof(hdr)); 1855 - exit(EXIT_FAILURE); 1856 - } 1833 + g_free(spapr->fdt_blob); 1834 + spapr->fdt_size = fdt_totalsize(fdt); 1835 + spapr->fdt_initial_size = spapr->fdt_size; 1836 + spapr->fdt_blob = fdt; 1857 1837 1858 - fdt_bufsize -= sizeof(hdr); 1838 + return H_SUCCESS; 1839 + } 1859 1840 1860 - fdt = spapr_build_fdt(spapr, false, fdt_bufsize); 1861 - _FDT((fdt_pack(fdt))); 1841 + static target_ulong h_client_architecture_support(PowerPCCPU *cpu, 1842 + SpaprMachineState *spapr, 1843 + target_ulong opcode, 1844 + target_ulong *args) 1845 + { 1846 + target_ulong vec = ppc64_phys_to_real(args[0]); 1847 + target_ulong fdt_buf = args[1]; 1848 + target_ulong fdt_bufsize = args[2]; 1849 + target_ulong ret; 1850 + SpaprDeviceTreeUpdateHeader hdr = { .version_id = 1 }; 1862 1851 1863 - cpu_physical_memory_write(fdt_buf, &hdr, sizeof(hdr)); 1864 - cpu_physical_memory_write(fdt_buf + sizeof(hdr), fdt, 1865 - fdt_totalsize(fdt)); 1866 - trace_spapr_cas_continue(fdt_totalsize(fdt) + sizeof(hdr)); 1852 + if (fdt_bufsize < sizeof(hdr)) { 1853 + error_report("SLOF provided insufficient CAS buffer " 1854 + TARGET_FMT_lu " (min: %zu)", fdt_bufsize, sizeof(hdr)); 1855 + exit(EXIT_FAILURE); 1856 + } 1867 1857 1868 - g_free(spapr->fdt_blob); 1869 - spapr->fdt_size = fdt_totalsize(fdt); 1858 + fdt_bufsize -= sizeof(hdr); 1859 + 1860 + ret = do_client_architecture_support(cpu, spapr, vec, fdt_bufsize); 1861 + if (ret == H_SUCCESS) { 1862 + _FDT((fdt_pack(spapr->fdt_blob))); 1863 + spapr->fdt_size = fdt_totalsize(spapr->fdt_blob); 1870 1864 spapr->fdt_initial_size = spapr->fdt_size; 1871 - spapr->fdt_blob = fdt; 1872 - } 1873 1865 1874 - if (spapr->cas_reboot) { 1875 - qemu_system_reset_request(SHUTDOWN_CAUSE_SUBSYSTEM_RESET); 1866 + cpu_physical_memory_write(fdt_buf, &hdr, sizeof(hdr)); 1867 + cpu_physical_memory_write(fdt_buf + sizeof(hdr), spapr->fdt_blob, 1868 + spapr->fdt_size); 1869 + trace_spapr_cas_continue(spapr->fdt_size + sizeof(hdr)); 1876 1870 } 1877 1871 1878 - return H_SUCCESS; 1872 + return ret; 1879 1873 } 1880 1874 1881 1875 static target_ulong h_home_node_associativity(PowerPCCPU *cpu,
+8 -2
hw/ppc/spapr_nvdimm.c
··· 37 37 QemuUUID uuid; 38 38 int ret; 39 39 40 + if (object_property_get_int(OBJECT(nvdimm), NVDIMM_LABEL_SIZE_PROP, 41 + &error_abort) == 0) { 42 + error_setg(errp, "PAPR requires NVDIMM devices to have label-size set"); 43 + return; 44 + } 45 + 40 46 if (size % SPAPR_MINIMUM_SCM_BLOCK_SIZE) { 41 - error_setg(errp, "NVDIMM memory size excluding the label area" 42 - " must be a multiple of %" PRIu64 "MB", 47 + error_setg(errp, "PAPR requires NVDIMM memory size (excluding label)" 48 + " to be a multiple of %" PRIu64 "MB", 43 49 SPAPR_MINIMUM_SCM_BLOCK_SIZE / MiB); 44 50 return; 45 51 }
+4
hw/ppc/spapr_pci.c
··· 1665 1665 error_setg(errp, "PCI: Hot unplug of PCI bridges not supported"); 1666 1666 return; 1667 1667 } 1668 + if (object_property_get_uint(OBJECT(pdev), "nvlink2-tgt", NULL)) { 1669 + error_setg(errp, "PCI: Cannot unplug NVLink2 devices"); 1670 + return; 1671 + } 1668 1672 1669 1673 /* ensure any other present functions are pending unplug */ 1670 1674 if (PCI_FUNC(pdev->devfn) == 0) {
+7 -1
include/hw/ppc/spapr.h
··· 102 102 #define SPAPR_CAP_FIXED_CCD 0x03 103 103 #define SPAPR_CAP_FIXED_NA 0x10 /* Lets leave a bit of a gap... */ 104 104 105 + #define FDT_MAX_SIZE 0x100000 106 + 105 107 typedef struct SpaprCapabilities SpaprCapabilities; 106 108 struct SpaprCapabilities { 107 109 uint8_t caps[SPAPR_CAP_NUM]; ··· 176 178 SpaprEventSource *event_sources; 177 179 178 180 /* ibm,client-architecture-support option negotiation */ 179 - bool cas_reboot; 180 181 bool cas_pre_isa3_guest; 181 182 SpaprOptionVector *ov5; /* QEMU-supported option vectors */ 182 183 SpaprOptionVector *ov5_cas; /* negotiated (via CAS) option vectors */ ··· 565 566 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn); 566 567 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, 567 568 target_ulong *args); 569 + 570 + target_ulong do_client_architecture_support(PowerPCCPU *cpu, 571 + SpaprMachineState *spapr, 572 + target_ulong addr, 573 + target_ulong fdt_bufsize); 568 574 569 575 /* Virtual Processor Area structure constants */ 570 576 #define VPA_MIN_SIZE 640
+4 -1
target/ppc/cpu.h
··· 463 463 #define DSISR_AMR 0x00200000 464 464 /* Unsupported Radix Tree Configuration */ 465 465 #define DSISR_R_BADCONFIG 0x00080000 466 + #define DSISR_ATOMIC_RC 0x00040000 467 + /* Unable to translate address of (guest) pde or process/page table entry */ 468 + #define DSISR_PRTABLE_FAULT 0x00020000 466 469 467 470 /* SRR1 error code fields */ 468 471 ··· 1220 1223 int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, 1221 1224 int cpuid, void *opaque); 1222 1225 #ifndef CONFIG_USER_ONLY 1223 - void ppc_cpu_do_system_reset(CPUState *cs, target_ulong vector); 1226 + void ppc_cpu_do_system_reset(CPUState *cs); 1224 1227 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector); 1225 1228 extern const VMStateDescription vmstate_ppc_cpu; 1226 1229 #endif
+29 -9
target/ppc/excp_helper.c
··· 57 57 #else /* defined(CONFIG_USER_ONLY) */ 58 58 static inline void dump_syscall(CPUPPCState *env) 59 59 { 60 - qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 " r3=%016" PRIx64 61 - " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 60 + qemu_log_mask(CPU_LOG_INT, "syscall r0=%016" PRIx64 61 + " r3=%016" PRIx64 " r4=%016" PRIx64 " r5=%016" PRIx64 62 + " r6=%016" PRIx64 " r7=%016" PRIx64 " r8=%016" PRIx64 62 63 " nip=" TARGET_FMT_lx "\n", 63 64 ppc_dump_gpr(env, 0), ppc_dump_gpr(env, 3), 64 65 ppc_dump_gpr(env, 4), ppc_dump_gpr(env, 5), 65 - ppc_dump_gpr(env, 6), env->nip); 66 + ppc_dump_gpr(env, 6), ppc_dump_gpr(env, 7), 67 + ppc_dump_gpr(env, 8), env->nip); 68 + } 69 + 70 + static inline void dump_hcall(CPUPPCState *env) 71 + { 72 + qemu_log_mask(CPU_LOG_INT, "hypercall r3=%016" PRIx64 73 + " r4=%016" PRIx64 " r5=%016" PRIx64 " r6=%016" PRIx64 74 + " r7=%016" PRIx64 " r8=%016" PRIx64 " r9=%016" PRIx64 75 + " r10=%016" PRIx64 " r11=%016" PRIx64 " r12=%016" PRIx64 76 + " nip=" TARGET_FMT_lx "\n", 77 + ppc_dump_gpr(env, 3), ppc_dump_gpr(env, 4), 78 + ppc_dump_gpr(env, 5), ppc_dump_gpr(env, 6), 79 + ppc_dump_gpr(env, 7), ppc_dump_gpr(env, 8), 80 + ppc_dump_gpr(env, 9), ppc_dump_gpr(env, 10), 81 + ppc_dump_gpr(env, 11), ppc_dump_gpr(env, 12), 82 + env->nip); 66 83 } 67 84 68 85 static int powerpc_reset_wakeup(CPUState *cs, CPUPPCState *env, int excp, ··· 379 396 } 380 397 break; 381 398 case POWERPC_EXCP_SYSCALL: /* System call exception */ 382 - dump_syscall(env); 383 399 lev = env->error_code; 400 + 401 + if ((lev == 1) && cpu->vhyp) { 402 + dump_hcall(env); 403 + } else { 404 + dump_syscall(env); 405 + } 384 406 385 407 /* 386 408 * We need to correct the NIP which in this case is supposed ··· 484 506 case POWERPC_EXCP_ISEG: /* Instruction segment exception */ 485 507 case POWERPC_EXCP_TRACE: /* Trace exception */ 486 508 break; 509 + case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 510 + msr |= env->error_code; 487 511 case POWERPC_EXCP_HDECR: /* Hypervisor decrementer exception */ 488 512 case POWERPC_EXCP_HDSI: /* Hypervisor data storage exception */ 489 - case POWERPC_EXCP_HISI: /* Hypervisor instruction storage exception */ 490 513 case POWERPC_EXCP_HDSEG: /* Hypervisor data segment exception */ 491 514 case POWERPC_EXCP_HISEG: /* Hypervisor instruction segment exception */ 492 515 case POWERPC_EXCP_SDOOR_HV: /* Hypervisor Doorbell interrupt */ ··· 961 984 } 962 985 } 963 986 964 - void ppc_cpu_do_system_reset(CPUState *cs, target_ulong vector) 987 + void ppc_cpu_do_system_reset(CPUState *cs) 965 988 { 966 989 PowerPCCPU *cpu = POWERPC_CPU(cs); 967 990 CPUPPCState *env = &cpu->env; 968 991 969 992 powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET); 970 - if (vector != -1) { 971 - env->nip = vector; 972 - } 973 993 } 974 994 975 995 void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
+342 -126
target/ppc/mmu-radix64.c
··· 103 103 } 104 104 } 105 105 106 + static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, int rwx, vaddr eaddr, 107 + hwaddr g_raddr, uint32_t cause) 108 + { 109 + CPUState *cs = CPU(cpu); 110 + CPUPPCState *env = &cpu->env; 111 + 112 + if (rwx == 2) { /* H Instruction Storage Interrupt */ 113 + cs->exception_index = POWERPC_EXCP_HISI; 114 + env->spr[SPR_ASDR] = g_raddr; 115 + env->error_code = cause; 116 + } else { /* H Data Storage Interrupt */ 117 + cs->exception_index = POWERPC_EXCP_HDSI; 118 + if (rwx == 1) { /* Write -> Store */ 119 + cause |= DSISR_ISSTORE; 120 + } 121 + env->spr[SPR_HDSISR] = cause; 122 + env->spr[SPR_HDAR] = eaddr; 123 + env->spr[SPR_ASDR] = g_raddr; 124 + env->error_code = 0; 125 + } 126 + } 106 127 107 128 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, 108 - int *fault_cause, int *prot) 129 + int *fault_cause, int *prot, 130 + bool partition_scoped) 109 131 { 110 132 CPUPPCState *env = &cpu->env; 111 133 const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC }; ··· 121 143 } 122 144 123 145 /* Determine permissions allowed by Encoded Access Authority */ 124 - if ((pte & R_PTE_EAA_PRIV) && msr_pr) { /* Insufficient Privilege */ 146 + if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) { 125 147 *prot = 0; 126 - } else if (msr_pr || (pte & R_PTE_EAA_PRIV)) { 148 + } else if (msr_pr || (pte & R_PTE_EAA_PRIV) || partition_scoped) { 127 149 *prot = ppc_radix64_get_prot_eaa(pte); 128 - } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) */ 150 + } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */ 129 151 *prot = ppc_radix64_get_prot_eaa(pte); 130 152 *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ 131 153 } ··· 162 184 } 163 185 } 164 186 165 - static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr, 166 - uint64_t base_addr, uint64_t nls, 167 - hwaddr *raddr, int *psize, 168 - int *fault_cause, hwaddr *pte_addr) 187 + static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr, 188 + uint64_t *pte_addr, uint64_t *nls, 189 + int *psize, uint64_t *pte, int *fault_cause) 169 190 { 170 - CPUState *cs = CPU(cpu); 171 191 uint64_t index, pde; 172 192 173 - if (nls < 5) { /* Directory maps less than 2**5 entries */ 193 + if (*nls < 5) { /* Directory maps less than 2**5 entries */ 174 194 *fault_cause |= DSISR_R_BADCONFIG; 175 - return 0; 195 + return 1; 176 196 } 177 197 178 198 /* Read page <directory/table> entry from guest address space */ 179 - index = eaddr >> (*psize - nls); /* Shift */ 180 - index &= ((1UL << nls) - 1); /* Mask */ 181 - pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde))); 182 - if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ 199 + pde = ldq_phys(as, *pte_addr); 200 + if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ 183 201 *fault_cause |= DSISR_NOPTE; 184 - return 0; 202 + return 1; 185 203 } 186 204 187 - *psize -= nls; 205 + *pte = pde; 206 + *psize -= *nls; 207 + if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */ 208 + *nls = pde & R_PDE_NLS; 209 + index = eaddr >> (*psize - *nls); /* Shift */ 210 + index &= ((1UL << *nls) - 1); /* Mask */ 211 + *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde)); 212 + } 213 + return 0; 214 + } 188 215 189 - /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */ 190 - if (pde & R_PTE_LEAF) { 191 - uint64_t rpn = pde & R_PTE_RPN; 192 - uint64_t mask = (1UL << *psize) - 1; 216 + static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr, 217 + uint64_t base_addr, uint64_t nls, 218 + hwaddr *raddr, int *psize, uint64_t *pte, 219 + int *fault_cause, hwaddr *pte_addr) 220 + { 221 + uint64_t index, pde, rpn , mask; 193 222 194 - /* Or high bits of rpn and low bits to ea to form whole real addr */ 195 - *raddr = (rpn & ~mask) | (eaddr & mask); 196 - *pte_addr = base_addr + (index * sizeof(pde)); 197 - return pde; 223 + if (nls < 5) { /* Directory maps less than 2**5 entries */ 224 + *fault_cause |= DSISR_R_BADCONFIG; 225 + return 1; 198 226 } 199 227 200 - /* Next Level of Radix Tree */ 201 - return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS, 202 - raddr, psize, fault_cause, pte_addr); 228 + index = eaddr >> (*psize - nls); /* Shift */ 229 + index &= ((1UL << nls) - 1); /* Mask */ 230 + *pte_addr = base_addr + (index * sizeof(pde)); 231 + do { 232 + int ret; 233 + 234 + ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde, 235 + fault_cause); 236 + if (ret) { 237 + return ret; 238 + } 239 + } while (!(pde & R_PTE_LEAF)); 240 + 241 + *pte = pde; 242 + rpn = pde & R_PTE_RPN; 243 + mask = (1UL << *psize) - 1; 244 + 245 + /* Or high bits of rpn and low bits to ea to form whole real addr */ 246 + *raddr = (rpn & ~mask) | (eaddr & mask); 247 + return 0; 203 248 } 204 249 205 250 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate) ··· 212 257 if (lpid == 0 && !msr_hv) { 213 258 return false; 214 259 } 260 + if ((pate->dw0 & PATE1_R_PRTS) < 5) { 261 + return false; 262 + } 215 263 /* More checks ... */ 216 264 return true; 217 265 } 218 266 267 + static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, int rwx, 268 + vaddr eaddr, hwaddr g_raddr, 269 + ppc_v3_pate_t pate, 270 + hwaddr *h_raddr, int *h_prot, 271 + int *h_page_size, bool pde_addr, 272 + bool cause_excp) 273 + { 274 + int fault_cause = 0; 275 + hwaddr pte_addr; 276 + uint64_t pte; 277 + 278 + *h_page_size = PRTBE_R_GET_RTS(pate.dw0); 279 + /* No valid pte or access denied due to protection */ 280 + if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB, 281 + pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size, 282 + &pte, &fault_cause, &pte_addr) || 283 + ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, h_prot, true)) { 284 + if (pde_addr) /* address being translated was that of a guest pde */ 285 + fault_cause |= DSISR_PRTABLE_FAULT; 286 + if (cause_excp) { 287 + ppc_radix64_raise_hsi(cpu, rwx, eaddr, g_raddr, fault_cause); 288 + } 289 + return 1; 290 + } 291 + 292 + /* Update Reference and Change Bits */ 293 + ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, h_prot); 294 + 295 + return 0; 296 + } 297 + 298 + static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx, 299 + vaddr eaddr, uint64_t pid, 300 + ppc_v3_pate_t pate, hwaddr *g_raddr, 301 + int *g_prot, int *g_page_size, 302 + bool cause_excp) 303 + { 304 + CPUState *cs = CPU(cpu); 305 + CPUPPCState *env = &cpu->env; 306 + uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte; 307 + int fault_cause = 0, h_page_size, h_prot; 308 + hwaddr h_raddr, pte_addr; 309 + int ret; 310 + 311 + /* Index Process Table by PID to Find Corresponding Process Table Entry */ 312 + offset = pid * sizeof(struct prtb_entry); 313 + size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); 314 + if (offset >= size) { 315 + /* offset exceeds size of the process table */ 316 + if (cause_excp) { 317 + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); 318 + } 319 + return 1; 320 + } 321 + prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset; 322 + 323 + if (cpu->vhyp) { 324 + prtbe0 = ldq_phys(cs->as, prtbe_addr); 325 + } else { 326 + /* 327 + * Process table addresses are subject to partition-scoped 328 + * translation 329 + * 330 + * On a Radix host, the partition-scoped page table for LPID=0 331 + * is only used to translate the effective addresses of the 332 + * process table entries. 333 + */ 334 + ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr, 335 + pate, &h_raddr, &h_prot, 336 + &h_page_size, 1, 1); 337 + if (ret) { 338 + return ret; 339 + } 340 + prtbe0 = ldq_phys(cs->as, h_raddr); 341 + } 342 + 343 + /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ 344 + *g_page_size = PRTBE_R_GET_RTS(prtbe0); 345 + base_addr = prtbe0 & PRTBE_R_RPDB; 346 + nls = prtbe0 & PRTBE_R_RPDS; 347 + if (msr_hv || cpu->vhyp) { 348 + /* 349 + * Can treat process table addresses as real addresses 350 + */ 351 + ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr, 352 + nls, g_raddr, g_page_size, &pte, 353 + &fault_cause, &pte_addr); 354 + if (ret) { 355 + /* No valid PTE */ 356 + if (cause_excp) { 357 + ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); 358 + } 359 + return ret; 360 + } 361 + } else { 362 + uint64_t rpn, mask; 363 + 364 + index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */ 365 + index &= ((1UL << nls) - 1); /* Mask */ 366 + pte_addr = base_addr + (index * sizeof(pte)); 367 + 368 + /* 369 + * Each process table address is subject to a partition-scoped 370 + * translation 371 + */ 372 + do { 373 + ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr, 374 + pate, &h_raddr, &h_prot, 375 + &h_page_size, 1, 1); 376 + if (ret) { 377 + return ret; 378 + } 379 + 380 + ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr, 381 + &nls, g_page_size, &pte, &fault_cause); 382 + if (ret) { 383 + /* No valid pte */ 384 + if (cause_excp) { 385 + ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); 386 + } 387 + return ret; 388 + } 389 + pte_addr = h_raddr; 390 + } while (!(pte & R_PTE_LEAF)); 391 + 392 + rpn = pte & R_PTE_RPN; 393 + mask = (1UL << *g_page_size) - 1; 394 + 395 + /* Or high bits of rpn and low bits to ea to form whole real addr */ 396 + *g_raddr = (rpn & ~mask) | (eaddr & mask); 397 + } 398 + 399 + if (ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot, false)) { 400 + /* Access denied due to protection */ 401 + if (cause_excp) { 402 + ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); 403 + } 404 + return 1; 405 + } 406 + 407 + ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot); 408 + 409 + return 0; 410 + } 411 + 412 + /* 413 + * Radix tree translation is a 2 steps translation process: 414 + * 415 + * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr 416 + * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr 417 + * 418 + * MSR[HV] 419 + * +-------------+----------------+---------------+ 420 + * | | HV = 0 | HV = 1 | 421 + * +-------------+----------------+---------------+ 422 + * | Relocation | Partition | No | 423 + * | = Off | Scoped | Translation | 424 + * Relocation +-------------+----------------+---------------+ 425 + * | Relocation | Partition & | Process | 426 + * | = On | Process Scoped | Scoped | 427 + * +-------------+----------------+---------------+ 428 + */ 429 + static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx, 430 + bool relocation, 431 + hwaddr *raddr, int *psizep, int *protp, 432 + bool cause_excp) 433 + { 434 + CPUPPCState *env = &cpu->env; 435 + uint64_t lpid = 0, pid = 0; 436 + ppc_v3_pate_t pate; 437 + int psize, prot; 438 + hwaddr g_raddr; 439 + 440 + /* Virtual Mode Access - get the fully qualified address */ 441 + if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) { 442 + if (cause_excp) { 443 + ppc_radix64_raise_segi(cpu, rwx, eaddr); 444 + } 445 + return 1; 446 + } 447 + 448 + /* Get Process Table */ 449 + if (cpu->vhyp) { 450 + PPCVirtualHypervisorClass *vhc; 451 + vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 452 + vhc->get_pate(cpu->vhyp, &pate); 453 + } else { 454 + if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { 455 + if (cause_excp) { 456 + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); 457 + } 458 + return 1; 459 + } 460 + if (!validate_pate(cpu, lpid, &pate)) { 461 + if (cause_excp) { 462 + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG); 463 + } 464 + return 1; 465 + } 466 + } 467 + 468 + *psizep = INT_MAX; 469 + *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; 470 + 471 + /* 472 + * Perform process-scoped translation if relocation enabled. 473 + * 474 + * - Translates an effective address to a host real address in 475 + * quadrants 0 and 3 when HV=1. 476 + * 477 + * - Translates an effective address to a guest real address. 478 + */ 479 + if (relocation) { 480 + int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid, 481 + pate, &g_raddr, &prot, 482 + &psize, cause_excp); 483 + if (ret) { 484 + return ret; 485 + } 486 + *psizep = MIN(*psizep, psize); 487 + *protp &= prot; 488 + } else { 489 + g_raddr = eaddr & R_EADDR_MASK; 490 + } 491 + 492 + if (cpu->vhyp) { 493 + *raddr = g_raddr; 494 + } else { 495 + /* 496 + * Perform partition-scoped translation if !HV or HV access to 497 + * quadrants 1 or 2. Translates a guest real address to a host 498 + * real address. 499 + */ 500 + if (lpid || !msr_hv) { 501 + int ret; 502 + 503 + ret = ppc_radix64_partition_scoped_xlate(cpu, rwx, eaddr, g_raddr, 504 + pate, raddr, &prot, &psize, 505 + 0, cause_excp); 506 + if (ret) { 507 + return ret; 508 + } 509 + *psizep = MIN(*psizep, psize); 510 + *protp &= prot; 511 + } else { 512 + *raddr = g_raddr; 513 + } 514 + } 515 + 516 + return 0; 517 + } 518 + 219 519 int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, 220 520 int mmu_idx) 221 521 { 222 522 CPUState *cs = CPU(cpu); 223 523 CPUPPCState *env = &cpu->env; 224 - PPCVirtualHypervisorClass *vhc; 225 - hwaddr raddr, pte_addr; 226 - uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; 227 - int page_size, prot, fault_cause = 0; 228 - ppc_v3_pate_t pate; 524 + int page_size, prot; 525 + bool relocation; 526 + hwaddr raddr; 229 527 528 + assert(!(msr_hv && cpu->vhyp)); 230 529 assert((rwx == 0) || (rwx == 1) || (rwx == 2)); 231 530 531 + relocation = ((rwx == 2) && (msr_ir == 1)) || ((rwx != 2) && (msr_dr == 1)); 232 532 /* HV or virtual hypervisor Real Mode Access */ 233 - if ((msr_hv || cpu->vhyp) && 234 - (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0)))) { 533 + if (!relocation && (msr_hv || cpu->vhyp)) { 235 534 /* In real mode top 4 effective addr bits (mostly) ignored */ 236 535 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 237 536 ··· 257 556 TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); 258 557 } 259 558 260 - /* Virtual Mode Access - get the fully qualified address */ 261 - if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { 262 - ppc_radix64_raise_segi(cpu, rwx, eaddr); 559 + /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */ 560 + if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr, 561 + &page_size, &prot, true)) { 263 562 return 1; 264 563 } 265 564 266 - /* Get Process Table */ 267 - if (cpu->vhyp) { 268 - vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 269 - vhc->get_pate(cpu->vhyp, &pate); 270 - } else { 271 - if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { 272 - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); 273 - return 1; 274 - } 275 - if (!validate_pate(cpu, lpid, &pate)) { 276 - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG); 277 - } 278 - /* We don't support guest mode yet */ 279 - if (lpid != 0) { 280 - error_report("PowerNV guest support Unimplemented"); 281 - exit(1); 282 - } 283 - } 284 - 285 - /* Index Process Table by PID to Find Corresponding Process Table Entry */ 286 - offset = pid * sizeof(struct prtb_entry); 287 - size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); 288 - if (offset >= size) { 289 - /* offset exceeds size of the process table */ 290 - ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); 291 - return 1; 292 - } 293 - prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); 294 - 295 - /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ 296 - page_size = PRTBE_R_GET_RTS(prtbe0); 297 - pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, 298 - prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, 299 - &raddr, &page_size, &fault_cause, &pte_addr); 300 - if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) { 301 - /* Couldn't get pte or access denied due to protection */ 302 - ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); 303 - return 1; 304 - } 305 - 306 - /* Update Reference and Change Bits */ 307 - ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); 308 - 309 565 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, 310 566 prot, mmu_idx, 1UL << page_size); 311 567 return 0; ··· 313 569 314 570 hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) 315 571 { 316 - CPUState *cs = CPU(cpu); 317 572 CPUPPCState *env = &cpu->env; 318 - PPCVirtualHypervisorClass *vhc; 319 - hwaddr raddr, pte_addr; 320 - uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; 321 - int page_size, fault_cause = 0; 322 - ppc_v3_pate_t pate; 573 + int psize, prot; 574 + hwaddr raddr; 323 575 324 576 /* Handle Real Mode */ 325 - if (msr_dr == 0) { 577 + if ((msr_dr == 0) && (msr_hv || cpu->vhyp)) { 326 578 /* In real mode top 4 effective addr bits (mostly) ignored */ 327 579 return eaddr & 0x0FFFFFFFFFFFFFFFULL; 328 580 } 329 581 330 - /* Virtual Mode Access - get the fully qualified address */ 331 - if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { 332 - return -1; 333 - } 334 - 335 - /* Get Process Table */ 336 - if (cpu->vhyp) { 337 - vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 338 - vhc->get_pate(cpu->vhyp, &pate); 339 - } else { 340 - if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { 341 - return -1; 342 - } 343 - if (!validate_pate(cpu, lpid, &pate)) { 344 - return -1; 345 - } 346 - /* We don't support guest mode yet */ 347 - if (lpid != 0) { 348 - error_report("PowerNV guest support Unimplemented"); 349 - exit(1); 350 - } 351 - } 352 - 353 - /* Index Process Table by PID to Find Corresponding Process Table Entry */ 354 - offset = pid * sizeof(struct prtb_entry); 355 - size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); 356 - if (offset >= size) { 357 - /* offset exceeds size of the process table */ 358 - return -1; 359 - } 360 - prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); 361 - 362 - /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ 363 - page_size = PRTBE_R_GET_RTS(prtbe0); 364 - pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, 365 - prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, 366 - &raddr, &page_size, &fault_cause, &pte_addr); 367 - if (!pte) { 582 + if (ppc_radix64_xlate(cpu, eaddr, 0, msr_dr, &raddr, &psize, 583 + &prot, false)) { 368 584 return -1; 369 585 } 370 586
+21 -3
target/ppc/translate.c
··· 1882 1882 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); 1883 1883 } else { 1884 1884 target_ulong mask; 1885 + bool mask_in_32b = true; 1885 1886 TCGv t1; 1886 1887 1887 1888 #if defined(TARGET_PPC64) ··· 1890 1891 #endif 1891 1892 mask = MASK(mb, me); 1892 1893 1894 + #if defined(TARGET_PPC64) 1895 + if (mask > 0xffffffffu) { 1896 + mask_in_32b = false; 1897 + } 1898 + #endif 1893 1899 t1 = tcg_temp_new(); 1894 - if (mask <= 0xffffffffu) { 1900 + if (mask_in_32b) { 1895 1901 TCGv_i32 t0 = tcg_temp_new_i32(); 1896 1902 tcg_gen_trunc_tl_i32(t0, t_rs); 1897 1903 tcg_gen_rotli_i32(t0, t0, sh); ··· 1933 1939 tcg_gen_extract_tl(t_ra, t_rs, rsh, len); 1934 1940 } else { 1935 1941 target_ulong mask; 1942 + bool mask_in_32b = true; 1936 1943 #if defined(TARGET_PPC64) 1937 1944 mb += 32; 1938 1945 me += 32; 1939 1946 #endif 1940 1947 mask = MASK(mb, me); 1941 - if (mask <= 0xffffffffu) { 1948 + #if defined(TARGET_PPC64) 1949 + if (mask > 0xffffffffu) { 1950 + mask_in_32b = false; 1951 + } 1952 + #endif 1953 + if (mask_in_32b) { 1942 1954 if (sh == 0) { 1943 1955 tcg_gen_andi_tl(t_ra, t_rs, mask); 1944 1956 } else { ··· 1973 1985 uint32_t mb = MB(ctx->opcode); 1974 1986 uint32_t me = ME(ctx->opcode); 1975 1987 target_ulong mask; 1988 + bool mask_in_32b = true; 1976 1989 1977 1990 #if defined(TARGET_PPC64) 1978 1991 mb += 32; ··· 1980 1993 #endif 1981 1994 mask = MASK(mb, me); 1982 1995 1983 - if (mask <= 0xffffffffu) { 1996 + #if defined(TARGET_PPC64) 1997 + if (mask > 0xffffffffu) { 1998 + mask_in_32b = false; 1999 + } 2000 + #endif 2001 + if (mask_in_32b) { 1984 2002 TCGv_i32 t0 = tcg_temp_new_i32(); 1985 2003 TCGv_i32 t1 = tcg_temp_new_i32(); 1986 2004 tcg_gen_trunc_tl_i32(t0, t_rb);