qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-4.1-20190426' into staging

ppc patch queue 2019-04-26

Here's the first ppc target pull request for qemu-4.1. This has a
number of things that have accumulated while qemu-4.0 was frozen.

* A number of emulated MMU improvements from Ben Herrenschmidt

* Assorted cleanups fro Greg Kurz

* A large set of mostly mechanical cleanups from me to make target/ppc
much closer to compliant with the modern coding style

* Support for passthrough of NVIDIA GPUs using NVLink2

As well as some other assorted fixes.

# gpg: Signature made Fri 26 Apr 2019 07:02:19 BST
# gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392
# gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full]
# gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full]
# gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full]
# gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown]
# Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392

* remotes/dgibson/tags/ppc-for-4.1-20190426: (36 commits)
target/ppc: improve performance of large BAT invalidations
ppc/hash32: Rework R and C bit updates
ppc/hash64: Rework R and C bit updates
ppc/spapr: Use proper HPTE accessors for H_READ
target/ppc: Don't check UPRT in radix mode when in HV real mode
target/ppc/kvm: Convert DPRINTF to traces
target/ppc/trace-events: Fix trivial typo
spapr: Drop duplicate PCI swizzle code
spapr_pci: Get rid of duplicate code for node name creation
target/ppc: Style fixes for translate/spe-impl.inc.c
target/ppc: Style fixes for translate/vmx-impl.inc.c
target/ppc: Style fixes for translate/vsx-impl.inc.c
target/ppc: Style fixes for translate/fp-impl.inc.c
target/ppc: Style fixes for translate.c
target/ppc: Style fixes for translate_init.inc.c
target/ppc: Style fixes for monitor.c
target/ppc: Style fixes for mmu_helper.c
target/ppc: Style fixes for mmu-hash64.[ch]
target/ppc: Style fixes for mmu-hash32.[ch]
target/ppc: Style fixes for misc_helper.c
...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+2152 -973
+1 -1
hw/pci/pci.c
··· 1556 1556 */ 1557 1557 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin) 1558 1558 { 1559 - return (pin + PCI_SLOT(pci_dev->devfn)) % PCI_NUM_PINS; 1559 + return pci_swizzle(PCI_SLOT(pci_dev->devfn), pin); 1560 1560 } 1561 1561 1562 1562 /***********************************************************/
+1 -1
hw/ppc/Makefile.objs
··· 9 9 # IBM PowerNV 10 10 obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o pnv_psi.o pnv_occ.o pnv_bmc.o 11 11 ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy) 12 - obj-y += spapr_pci_vfio.o 12 + obj-y += spapr_pci_vfio.o spapr_pci_nvlink2.o 13 13 endif 14 14 obj-$(CONFIG_PSERIES) += spapr_rtas_ddw.o 15 15 # PowerPC 4xx boards
-1
hw/ppc/prep.c
··· 40 40 #include "hw/ide.h" 41 41 #include "hw/loader.h" 42 42 #include "hw/timer/mc146818rtc.h" 43 - #include "hw/input/i8042.h" 44 43 #include "hw/isa/pc87312.h" 45 44 #include "hw/net/ne2000-isa.h" 46 45 #include "sysemu/arch_init.h"
+78 -11
hw/ppc/spapr.c
··· 1034 1034 0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE), 1035 1035 cpu_to_be32(max_cpus / smp_threads), 1036 1036 }; 1037 + uint32_t maxdomain = cpu_to_be32(spapr->gpu_numa_id > 1 ? 1 : 0); 1037 1038 uint32_t maxdomains[] = { 1038 1039 cpu_to_be32(4), 1039 - cpu_to_be32(0), 1040 - cpu_to_be32(0), 1041 - cpu_to_be32(0), 1042 - cpu_to_be32(nb_numa_nodes ? nb_numa_nodes : 1), 1040 + maxdomain, 1041 + maxdomain, 1042 + maxdomain, 1043 + cpu_to_be32(spapr->gpu_numa_id), 1043 1044 }; 1044 1045 1045 1046 _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); ··· 1519 1520 /* Nothing to do for qemu managed HPT */ 1520 1521 } 1521 1522 1522 - static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1523 - uint64_t pte0, uint64_t pte1) 1523 + void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 1524 + uint64_t pte0, uint64_t pte1) 1524 1525 { 1525 - SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1526 + SpaprMachineState *spapr = SPAPR_MACHINE(cpu->vhyp); 1526 1527 hwaddr offset = ptex * HASH_PTE_SIZE_64; 1527 1528 1528 1529 if (!spapr->htab) { ··· 1548 1549 stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); 1549 1550 } 1550 1551 } 1552 + } 1553 + 1554 + static void spapr_hpte_set_c(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1555 + uint64_t pte1) 1556 + { 1557 + hwaddr offset = ptex * HASH_PTE_SIZE_64 + 15; 1558 + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1559 + 1560 + if (!spapr->htab) { 1561 + /* There should always be a hash table when this is called */ 1562 + error_report("spapr_hpte_set_c called with no hash table !"); 1563 + return; 1564 + } 1565 + 1566 + /* The HW performs a non-atomic byte update */ 1567 + stb_p(spapr->htab + offset, (pte1 & 0xff) | 0x80); 1568 + } 1569 + 1570 + static void spapr_hpte_set_r(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1571 + uint64_t pte1) 1572 + { 1573 + hwaddr offset = ptex * HASH_PTE_SIZE_64 + 14; 1574 + SpaprMachineState *spapr = SPAPR_MACHINE(vhyp); 1575 + 1576 + if (!spapr->htab) { 1577 + /* There should always be a hash table when this is called */ 1578 + error_report("spapr_hpte_set_r called with no hash table !"); 1579 + return; 1580 + } 1581 + 1582 + /* The HW performs a non-atomic byte update */ 1583 + stb_p(spapr->htab + offset, ((pte1 >> 8) & 0xff) | 0x01); 1551 1584 } 1552 1585 1553 1586 int spapr_hpt_shift_for_ramsize(uint64_t ramsize) ··· 1698 1731 spapr_irq_msi_reset(spapr); 1699 1732 } 1700 1733 1734 + /* 1735 + * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node. 1736 + * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is 1737 + * called from vPHB reset handler so we initialize the counter here. 1738 + * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM 1739 + * must be equally distant from any other node. 1740 + * The final value of spapr->gpu_numa_id is going to be written to 1741 + * max-associativity-domains in spapr_build_fdt(). 1742 + */ 1743 + spapr->gpu_numa_id = MAX(1, nb_numa_nodes); 1701 1744 qemu_devices_reset(); 1702 1745 1703 1746 /* ··· 3907 3950 smc->phb_placement(spapr, sphb->index, 3908 3951 &sphb->buid, &sphb->io_win_addr, 3909 3952 &sphb->mem_win_addr, &sphb->mem64_win_addr, 3910 - windows_supported, sphb->dma_liobn, errp); 3953 + windows_supported, sphb->dma_liobn, 3954 + &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr, 3955 + errp); 3911 3956 } 3912 3957 3913 3958 static void spapr_phb_plug(HotplugHandler *hotplug_dev, DeviceState *dev, ··· 4108 4153 static void spapr_phb_placement(SpaprMachineState *spapr, uint32_t index, 4109 4154 uint64_t *buid, hwaddr *pio, 4110 4155 hwaddr *mmio32, hwaddr *mmio64, 4111 - unsigned n_dma, uint32_t *liobns, Error **errp) 4156 + unsigned n_dma, uint32_t *liobns, 4157 + hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4112 4158 { 4113 4159 /* 4114 4160 * New-style PHB window placement. ··· 4153 4199 *pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE; 4154 4200 *mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE; 4155 4201 *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; 4202 + 4203 + *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE; 4204 + *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE; 4156 4205 } 4157 4206 4158 4207 static ICSState *spapr_ics_get(XICSFabric *dev, int irq) ··· 4274 4323 vhc->hpt_mask = spapr_hpt_mask; 4275 4324 vhc->map_hptes = spapr_map_hptes; 4276 4325 vhc->unmap_hptes = spapr_unmap_hptes; 4277 - vhc->store_hpte = spapr_store_hpte; 4326 + vhc->hpte_set_c = spapr_hpte_set_c; 4327 + vhc->hpte_set_r = spapr_hpte_set_r; 4278 4328 vhc->get_pate = spapr_get_pate; 4279 4329 vhc->encode_hpt_for_kvm_pr = spapr_encode_hpt_for_kvm_pr; 4280 4330 xic->ics_get = spapr_ics_get; ··· 4368 4418 /* 4369 4419 * pseries-3.1 4370 4420 */ 4421 + static void phb_placement_3_1(SpaprMachineState *spapr, uint32_t index, 4422 + uint64_t *buid, hwaddr *pio, 4423 + hwaddr *mmio32, hwaddr *mmio64, 4424 + unsigned n_dma, uint32_t *liobns, 4425 + hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4426 + { 4427 + spapr_phb_placement(spapr, index, buid, pio, mmio32, mmio64, n_dma, liobns, 4428 + nv2gpa, nv2atsd, errp); 4429 + *nv2gpa = 0; 4430 + *nv2atsd = 0; 4431 + } 4432 + 4371 4433 static void spapr_machine_3_1_class_options(MachineClass *mc) 4372 4434 { 4373 4435 SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); ··· 4383 4445 smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN; 4384 4446 smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN; 4385 4447 smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; 4448 + smc->phb_placement = phb_placement_3_1; 4386 4449 } 4387 4450 4388 4451 DEFINE_SPAPR_MACHINE(3_1, "3.1", false); ··· 4514 4577 static void phb_placement_2_7(SpaprMachineState *spapr, uint32_t index, 4515 4578 uint64_t *buid, hwaddr *pio, 4516 4579 hwaddr *mmio32, hwaddr *mmio64, 4517 - unsigned n_dma, uint32_t *liobns, Error **errp) 4580 + unsigned n_dma, uint32_t *liobns, 4581 + hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp) 4518 4582 { 4519 4583 /* Legacy PHB placement for pseries-2.7 and earlier machine types */ 4520 4584 const uint64_t base_buid = 0x800000020000000ULL; ··· 4558 4622 * fallback behaviour of automatically splitting a large "32-bit" 4559 4623 * window into contiguous 32-bit and 64-bit windows 4560 4624 */ 4625 + 4626 + *nv2gpa = 0; 4627 + *nv2atsd = 0; 4561 4628 } 4562 4629 4563 4630 static void spapr_machine_2_7_class_options(MachineClass *mc)
+12 -12
hw/ppc/spapr_hcall.c
··· 118 118 ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); 119 119 } 120 120 121 - ppc_hash64_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel); 121 + spapr_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel); 122 122 123 123 args[0] = ptex + slot; 124 124 return H_SUCCESS; ··· 131 131 REMOVE_HW = 3, 132 132 } RemoveResult; 133 133 134 - static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, 134 + static RemoveResult remove_hpte(PowerPCCPU *cpu 135 + , target_ulong ptex, 135 136 target_ulong avpn, 136 137 target_ulong flags, 137 138 target_ulong *vp, target_ulong *rp) ··· 155 156 } 156 157 *vp = v; 157 158 *rp = r; 158 - ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); 159 + spapr_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); 159 160 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); 160 161 return REMOVE_SUCCESS; 161 162 } ··· 289 290 r |= (flags << 55) & HPTE64_R_PP0; 290 291 r |= (flags << 48) & HPTE64_R_KEY_HI; 291 292 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO); 292 - ppc_hash64_store_hpte(cpu, ptex, 293 - (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); 293 + spapr_store_hpte(cpu, ptex, 294 + (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); 294 295 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); 295 296 /* Flush the tlb */ 296 297 check_tlb_flush(env, true); 297 298 /* Don't need a memory barrier, due to qemu's global lock */ 298 - ppc_hash64_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r); 299 + spapr_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r); 299 300 return H_SUCCESS; 300 301 } 301 302 ··· 304 305 { 305 306 target_ulong flags = args[0]; 306 307 target_ulong ptex = args[1]; 307 - uint8_t *hpte; 308 308 int i, ridx, n_entries = 1; 309 + const ppc_hash_pte64_t *hptes; 309 310 310 311 if (!valid_ptex(cpu, ptex)) { 311 312 return H_PARAMETER; ··· 317 318 n_entries = 4; 318 319 } 319 320 320 - hpte = spapr->htab + (ptex * HASH_PTE_SIZE_64); 321 - 321 + hptes = ppc_hash64_map_hptes(cpu, ptex, n_entries); 322 322 for (i = 0, ridx = 0; i < n_entries; i++) { 323 - args[ridx++] = ldq_p(hpte); 324 - args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2)); 325 - hpte += HASH_PTE_SIZE_64; 323 + args[ridx++] = ppc_hash64_hpte0(cpu, hptes, i); 324 + args[ridx++] = ppc_hash64_hpte1(cpu, hptes, i); 326 325 } 326 + ppc_hash64_unmap_hptes(cpu, hptes, ptex, n_entries); 327 327 328 328 return H_SUCCESS; 329 329 }
+14 -30
hw/ppc/spapr_irq.c
··· 67 67 * XICS IRQ backend. 68 68 */ 69 69 70 - static ICSState *spapr_ics_create(SpaprMachineState *spapr, 71 - int nr_irqs, Error **errp) 72 - { 73 - Error *local_err = NULL; 74 - Object *obj; 75 - 76 - obj = object_new(TYPE_ICS_SIMPLE); 77 - object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort); 78 - object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), 79 - &error_abort); 80 - object_property_set_int(obj, nr_irqs, "nr-irqs", &local_err); 81 - if (local_err) { 82 - goto error; 83 - } 84 - object_property_set_bool(obj, true, "realized", &local_err); 85 - if (local_err) { 86 - goto error; 87 - } 88 - 89 - return ICS_BASE(obj); 90 - 91 - error: 92 - error_propagate(errp, local_err); 93 - return NULL; 94 - } 95 - 96 70 static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs, 97 71 Error **errp) 98 72 { 99 73 MachineState *machine = MACHINE(spapr); 74 + Object *obj; 100 75 Error *local_err = NULL; 101 76 bool xics_kvm = false; 102 77 ··· 108 83 if (machine_kernel_irqchip_required(machine) && !xics_kvm) { 109 84 error_prepend(&local_err, 110 85 "kernel_irqchip requested but unavailable: "); 111 - goto error; 86 + error_propagate(errp, local_err); 87 + return; 112 88 } 113 89 error_free(local_err); 114 90 local_err = NULL; ··· 118 94 xics_spapr_init(spapr); 119 95 } 120 96 121 - spapr->ics = spapr_ics_create(spapr, nr_irqs, &local_err); 97 + obj = object_new(TYPE_ICS_SIMPLE); 98 + object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort); 99 + object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), 100 + &error_fatal); 101 + object_property_set_int(obj, nr_irqs, "nr-irqs", &error_fatal); 102 + object_property_set_bool(obj, true, "realized", &local_err); 103 + if (local_err) { 104 + error_propagate(errp, local_err); 105 + return; 106 + } 122 107 123 - error: 124 - error_propagate(errp, local_err); 108 + spapr->ics = ICS_BASE(obj); 125 109 } 126 110 127 111 #define ICS_IRQ_FREE(ics, srcno) \
+24 -24
hw/ppc/spapr_pci.c
··· 719 719 rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); 720 720 } 721 721 722 - static int pci_spapr_swizzle(int slot, int pin) 723 - { 724 - return (slot + pin) % PCI_NUM_PINS; 725 - } 726 - 727 - static int pci_spapr_map_irq(PCIDevice *pci_dev, int irq_num) 728 - { 729 - /* 730 - * Here we need to convert pci_dev + irq_num to some unique value 731 - * which is less than number of IRQs on the specific bus (4). We 732 - * use standard PCI swizzling, that is (slot number + pin number) 733 - * % 4. 734 - */ 735 - return pci_spapr_swizzle(PCI_SLOT(pci_dev->devfn), irq_num); 736 - } 737 - 738 722 static void pci_spapr_set_irq(void *opaque, int irq_num, int level) 739 723 { 740 724 /* 741 - * Here we use the number returned by pci_spapr_map_irq to find a 725 + * Here we use the number returned by pci_swizzle_map_irq_fn to find a 742 726 * corresponding qemu_irq. 743 727 */ 744 728 SpaprPhbState *phb = opaque; ··· 1355 1339 if (sphb->pcie_ecs && pci_is_express(dev)) { 1356 1340 _FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1)); 1357 1341 } 1342 + 1343 + spapr_phb_nvgpu_populate_pcidev_dt(dev, fdt, offset, sphb); 1358 1344 } 1359 1345 1360 1346 /* create OF node for pci device and required OF DT properties */ ··· 1587 1573 int i; 1588 1574 const unsigned windows_supported = spapr_phb_windows_supported(sphb); 1589 1575 1576 + spapr_phb_nvgpu_free(sphb); 1577 + 1590 1578 if (sphb->msi) { 1591 1579 g_hash_table_unref(sphb->msi); 1592 1580 sphb->msi = NULL; ··· 1762 1750 &sphb->iowindow); 1763 1751 1764 1752 bus = pci_register_root_bus(dev, NULL, 1765 - pci_spapr_set_irq, pci_spapr_map_irq, sphb, 1753 + pci_spapr_set_irq, pci_swizzle_map_irq_fn, sphb, 1766 1754 &sphb->memspace, &sphb->iospace, 1767 1755 PCI_DEVFN(0, 0), PCI_NUM_PINS, 1768 1756 TYPE_SPAPR_PHB_ROOT_BUS); ··· 1898 1886 static void spapr_phb_reset(DeviceState *qdev) 1899 1887 { 1900 1888 SpaprPhbState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev); 1889 + Error *errp = NULL; 1901 1890 1902 1891 spapr_phb_dma_reset(sphb); 1892 + spapr_phb_nvgpu_free(sphb); 1893 + spapr_phb_nvgpu_setup(sphb, &errp); 1894 + if (errp) { 1895 + error_report_err(errp); 1896 + } 1903 1897 1904 1898 /* Reset the IOMMU state */ 1905 1899 object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL); ··· 1932 1926 pre_2_8_migration, false), 1933 1927 DEFINE_PROP_BOOL("pcie-extended-configuration-space", SpaprPhbState, 1934 1928 pcie_ecs, true), 1929 + DEFINE_PROP_UINT64("gpa", SpaprPhbState, nv2_gpa_win_addr, 0), 1930 + DEFINE_PROP_UINT64("atsd", SpaprPhbState, nv2_atsd_win_addr, 0), 1935 1931 DEFINE_PROP_END_OF_LIST(), 1936 1932 }; 1937 1933 ··· 2164 2160 uint32_t nr_msis, int *node_offset) 2165 2161 { 2166 2162 int bus_off, i, j, ret; 2167 - gchar *nodename; 2168 2163 uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) }; 2169 2164 struct { 2170 2165 uint32_t hi; ··· 2212 2207 PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus; 2213 2208 SpaprFdt s_fdt; 2214 2209 SpaprDrc *drc; 2210 + Error *errp = NULL; 2215 2211 2216 2212 /* Start populating the FDT */ 2217 - nodename = g_strdup_printf("pci@%" PRIx64, phb->buid); 2218 - _FDT(bus_off = fdt_add_subnode(fdt, 0, nodename)); 2219 - g_free(nodename); 2213 + _FDT(bus_off = fdt_add_subnode(fdt, 0, phb->dtbusname)); 2220 2214 if (node_offset) { 2221 2215 *node_offset = bus_off; 2222 2216 } ··· 2249 2243 } 2250 2244 2251 2245 /* Build the interrupt-map, this must matches what is done 2252 - * in pci_spapr_map_irq 2246 + * in pci_swizzle_map_irq_fn 2253 2247 */ 2254 2248 _FDT(fdt_setprop(fdt, bus_off, "interrupt-map-mask", 2255 2249 &interrupt_map_mask, sizeof(interrupt_map_mask))); 2256 2250 for (i = 0; i < PCI_SLOT_MAX; i++) { 2257 2251 for (j = 0; j < PCI_NUM_PINS; j++) { 2258 2252 uint32_t *irqmap = interrupt_map[i*PCI_NUM_PINS + j]; 2259 - int lsi_num = pci_spapr_swizzle(i, j); 2253 + int lsi_num = pci_swizzle(i, j); 2260 2254 2261 2255 irqmap[0] = cpu_to_be32(b_ddddd(i)|b_fff(0)); 2262 2256 irqmap[1] = 0; ··· 2303 2297 if (ret) { 2304 2298 return ret; 2305 2299 } 2300 + 2301 + spapr_phb_nvgpu_populate_dt(phb, fdt, bus_off, &errp); 2302 + if (errp) { 2303 + error_report_err(errp); 2304 + } 2305 + spapr_phb_nvgpu_ram_populate_dt(phb, fdt); 2306 2306 2307 2307 return 0; 2308 2308 }
+450
hw/ppc/spapr_pci_nvlink2.c
··· 1 + /* 2 + * QEMU sPAPR PCI for NVLink2 pass through 3 + * 4 + * Copyright (c) 2019 Alexey Kardashevskiy, IBM Corporation. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a copy 7 + * of this software and associated documentation files (the "Software"), to deal 8 + * in the Software without restriction, including without limitation the rights 9 + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 + * copies of the Software, and to permit persons to whom the Software is 11 + * furnished to do so, subject to the following conditions: 12 + * 13 + * The above copyright notice and this permission notice shall be included in 14 + * all copies or substantial portions of the Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 + * THE SOFTWARE. 23 + */ 24 + #include "qemu/osdep.h" 25 + #include "qapi/error.h" 26 + #include "qemu-common.h" 27 + #include "hw/pci/pci.h" 28 + #include "hw/pci-host/spapr.h" 29 + #include "qemu/error-report.h" 30 + #include "hw/ppc/fdt.h" 31 + #include "hw/pci/pci_bridge.h" 32 + 33 + #define PHANDLE_PCIDEV(phb, pdev) (0x12000000 | \ 34 + (((phb)->index) << 16) | ((pdev)->devfn)) 35 + #define PHANDLE_GPURAM(phb, n) (0x110000FF | ((n) << 8) | \ 36 + (((phb)->index) << 16)) 37 + #define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \ 38 + ((gn) << 4) | (nn)) 39 + 40 + #define SPAPR_GPU_NUMA_ID (cpu_to_be32(1)) 41 + 42 + struct spapr_phb_pci_nvgpu_config { 43 + uint64_t nv2_ram_current; 44 + uint64_t nv2_atsd_current; 45 + int num; /* number of non empty (i.e. tgt!=0) entries in slots[] */ 46 + struct spapr_phb_pci_nvgpu_slot { 47 + uint64_t tgt; 48 + uint64_t gpa; 49 + unsigned numa_id; 50 + PCIDevice *gpdev; 51 + int linknum; 52 + struct { 53 + uint64_t atsd_gpa; 54 + PCIDevice *npdev; 55 + uint32_t link_speed; 56 + } links[NVGPU_MAX_LINKS]; 57 + } slots[NVGPU_MAX_NUM]; 58 + Error *errp; 59 + }; 60 + 61 + static struct spapr_phb_pci_nvgpu_slot * 62 + spapr_nvgpu_get_slot(struct spapr_phb_pci_nvgpu_config *nvgpus, uint64_t tgt) 63 + { 64 + int i; 65 + 66 + /* Search for partially collected "slot" */ 67 + for (i = 0; i < nvgpus->num; ++i) { 68 + if (nvgpus->slots[i].tgt == tgt) { 69 + return &nvgpus->slots[i]; 70 + } 71 + } 72 + 73 + if (nvgpus->num == ARRAY_SIZE(nvgpus->slots)) { 74 + return NULL; 75 + } 76 + 77 + i = nvgpus->num; 78 + nvgpus->slots[i].tgt = tgt; 79 + ++nvgpus->num; 80 + 81 + return &nvgpus->slots[i]; 82 + } 83 + 84 + static void spapr_pci_collect_nvgpu(struct spapr_phb_pci_nvgpu_config *nvgpus, 85 + PCIDevice *pdev, uint64_t tgt, 86 + MemoryRegion *mr, Error **errp) 87 + { 88 + MachineState *machine = MACHINE(qdev_get_machine()); 89 + SpaprMachineState *spapr = SPAPR_MACHINE(machine); 90 + struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); 91 + 92 + if (!nvslot) { 93 + error_setg(errp, "Found too many GPUs per vPHB"); 94 + return; 95 + } 96 + g_assert(!nvslot->gpdev); 97 + nvslot->gpdev = pdev; 98 + 99 + nvslot->gpa = nvgpus->nv2_ram_current; 100 + nvgpus->nv2_ram_current += memory_region_size(mr); 101 + nvslot->numa_id = spapr->gpu_numa_id; 102 + ++spapr->gpu_numa_id; 103 + } 104 + 105 + static void spapr_pci_collect_nvnpu(struct spapr_phb_pci_nvgpu_config *nvgpus, 106 + PCIDevice *pdev, uint64_t tgt, 107 + MemoryRegion *mr, Error **errp) 108 + { 109 + struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt); 110 + int j; 111 + 112 + if (!nvslot) { 113 + error_setg(errp, "Found too many NVLink bridges per vPHB"); 114 + return; 115 + } 116 + 117 + j = nvslot->linknum; 118 + if (j == ARRAY_SIZE(nvslot->links)) { 119 + error_setg(errp, "Found too many NVLink bridges per GPU"); 120 + return; 121 + } 122 + ++nvslot->linknum; 123 + 124 + g_assert(!nvslot->links[j].npdev); 125 + nvslot->links[j].npdev = pdev; 126 + nvslot->links[j].atsd_gpa = nvgpus->nv2_atsd_current; 127 + nvgpus->nv2_atsd_current += memory_region_size(mr); 128 + nvslot->links[j].link_speed = 129 + object_property_get_uint(OBJECT(pdev), "nvlink2-link-speed", NULL); 130 + } 131 + 132 + static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev, 133 + void *opaque) 134 + { 135 + PCIBus *sec_bus; 136 + Object *po = OBJECT(pdev); 137 + uint64_t tgt = object_property_get_uint(po, "nvlink2-tgt", NULL); 138 + 139 + if (tgt) { 140 + Error *local_err = NULL; 141 + struct spapr_phb_pci_nvgpu_config *nvgpus = opaque; 142 + Object *mr_gpu = object_property_get_link(po, "nvlink2-mr[0]", NULL); 143 + Object *mr_npu = object_property_get_link(po, "nvlink2-atsd-mr[0]", 144 + NULL); 145 + 146 + g_assert(mr_gpu || mr_npu); 147 + if (mr_gpu) { 148 + spapr_pci_collect_nvgpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_gpu), 149 + &local_err); 150 + } else { 151 + spapr_pci_collect_nvnpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_npu), 152 + &local_err); 153 + } 154 + error_propagate(&nvgpus->errp, local_err); 155 + } 156 + if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) != 157 + PCI_HEADER_TYPE_BRIDGE)) { 158 + return; 159 + } 160 + 161 + sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev)); 162 + if (!sec_bus) { 163 + return; 164 + } 165 + 166 + pci_for_each_device(sec_bus, pci_bus_num(sec_bus), 167 + spapr_phb_pci_collect_nvgpu, opaque); 168 + } 169 + 170 + void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp) 171 + { 172 + int i, j, valid_gpu_num; 173 + PCIBus *bus; 174 + 175 + /* Search for GPUs and NPUs */ 176 + if (!sphb->nv2_gpa_win_addr || !sphb->nv2_atsd_win_addr) { 177 + return; 178 + } 179 + 180 + sphb->nvgpus = g_new0(struct spapr_phb_pci_nvgpu_config, 1); 181 + sphb->nvgpus->nv2_ram_current = sphb->nv2_gpa_win_addr; 182 + sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr; 183 + 184 + bus = PCI_HOST_BRIDGE(sphb)->bus; 185 + pci_for_each_device(bus, pci_bus_num(bus), 186 + spapr_phb_pci_collect_nvgpu, sphb->nvgpus); 187 + 188 + if (sphb->nvgpus->errp) { 189 + error_propagate(errp, sphb->nvgpus->errp); 190 + sphb->nvgpus->errp = NULL; 191 + goto cleanup_exit; 192 + } 193 + 194 + /* Add found GPU RAM and ATSD MRs if found */ 195 + for (i = 0, valid_gpu_num = 0; i < sphb->nvgpus->num; ++i) { 196 + Object *nvmrobj; 197 + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; 198 + 199 + if (!nvslot->gpdev) { 200 + continue; 201 + } 202 + nvmrobj = object_property_get_link(OBJECT(nvslot->gpdev), 203 + "nvlink2-mr[0]", NULL); 204 + /* ATSD is pointless without GPU RAM MR so skip those */ 205 + if (!nvmrobj) { 206 + continue; 207 + } 208 + 209 + ++valid_gpu_num; 210 + memory_region_add_subregion(get_system_memory(), nvslot->gpa, 211 + MEMORY_REGION(nvmrobj)); 212 + 213 + for (j = 0; j < nvslot->linknum; ++j) { 214 + Object *atsdmrobj; 215 + 216 + atsdmrobj = object_property_get_link(OBJECT(nvslot->links[j].npdev), 217 + "nvlink2-atsd-mr[0]", NULL); 218 + if (!atsdmrobj) { 219 + continue; 220 + } 221 + memory_region_add_subregion(get_system_memory(), 222 + nvslot->links[j].atsd_gpa, 223 + MEMORY_REGION(atsdmrobj)); 224 + } 225 + } 226 + 227 + if (valid_gpu_num) { 228 + return; 229 + } 230 + /* We did not find any interesting GPU */ 231 + cleanup_exit: 232 + g_free(sphb->nvgpus); 233 + sphb->nvgpus = NULL; 234 + } 235 + 236 + void spapr_phb_nvgpu_free(SpaprPhbState *sphb) 237 + { 238 + int i, j; 239 + 240 + if (!sphb->nvgpus) { 241 + return; 242 + } 243 + 244 + for (i = 0; i < sphb->nvgpus->num; ++i) { 245 + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; 246 + Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), 247 + "nvlink2-mr[0]", NULL); 248 + 249 + if (nv_mrobj) { 250 + memory_region_del_subregion(get_system_memory(), 251 + MEMORY_REGION(nv_mrobj)); 252 + } 253 + for (j = 0; j < nvslot->linknum; ++j) { 254 + PCIDevice *npdev = nvslot->links[j].npdev; 255 + Object *atsd_mrobj; 256 + atsd_mrobj = object_property_get_link(OBJECT(npdev), 257 + "nvlink2-atsd-mr[0]", NULL); 258 + if (atsd_mrobj) { 259 + memory_region_del_subregion(get_system_memory(), 260 + MEMORY_REGION(atsd_mrobj)); 261 + } 262 + } 263 + } 264 + g_free(sphb->nvgpus); 265 + sphb->nvgpus = NULL; 266 + } 267 + 268 + void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off, 269 + Error **errp) 270 + { 271 + int i, j, atsdnum = 0; 272 + uint64_t atsd[8]; /* The existing limitation of known guests */ 273 + 274 + if (!sphb->nvgpus) { 275 + return; 276 + } 277 + 278 + for (i = 0; (i < sphb->nvgpus->num) && (atsdnum < ARRAY_SIZE(atsd)); ++i) { 279 + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; 280 + 281 + if (!nvslot->gpdev) { 282 + continue; 283 + } 284 + for (j = 0; j < nvslot->linknum; ++j) { 285 + if (!nvslot->links[j].atsd_gpa) { 286 + continue; 287 + } 288 + 289 + if (atsdnum == ARRAY_SIZE(atsd)) { 290 + error_report("Only %"PRIuPTR" ATSD registers supported", 291 + ARRAY_SIZE(atsd)); 292 + break; 293 + } 294 + atsd[atsdnum] = cpu_to_be64(nvslot->links[j].atsd_gpa); 295 + ++atsdnum; 296 + } 297 + } 298 + 299 + if (!atsdnum) { 300 + error_setg(errp, "No ATSD registers found"); 301 + return; 302 + } 303 + 304 + if (!spapr_phb_eeh_available(sphb)) { 305 + /* 306 + * ibm,mmio-atsd contains ATSD registers; these belong to an NPU PHB 307 + * which we do not emulate as a separate device. Instead we put 308 + * ibm,mmio-atsd to the vPHB with GPU and make sure that we do not 309 + * put GPUs from different IOMMU groups to the same vPHB to ensure 310 + * that the guest will use ATSDs from the corresponding NPU. 311 + */ 312 + error_setg(errp, "ATSD requires separate vPHB per GPU IOMMU group"); 313 + return; 314 + } 315 + 316 + _FDT((fdt_setprop(fdt, bus_off, "ibm,mmio-atsd", atsd, 317 + atsdnum * sizeof(atsd[0])))); 318 + } 319 + 320 + void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt) 321 + { 322 + int i, j, linkidx, npuoff; 323 + char *npuname; 324 + 325 + if (!sphb->nvgpus) { 326 + return; 327 + } 328 + 329 + npuname = g_strdup_printf("npuphb%d", sphb->index); 330 + npuoff = fdt_add_subnode(fdt, 0, npuname); 331 + _FDT(npuoff); 332 + _FDT(fdt_setprop_cell(fdt, npuoff, "#address-cells", 1)); 333 + _FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0)); 334 + /* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */ 335 + _FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu"))); 336 + g_free(npuname); 337 + 338 + for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) { 339 + for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) { 340 + char *linkname = g_strdup_printf("link@%d", linkidx); 341 + int off = fdt_add_subnode(fdt, npuoff, linkname); 342 + 343 + _FDT(off); 344 + /* _FDT((fdt_setprop_cell(fdt, off, "reg", linkidx))); */ 345 + _FDT((fdt_setprop_string(fdt, off, "compatible", 346 + "ibm,npu-link"))); 347 + _FDT((fdt_setprop_cell(fdt, off, "phandle", 348 + PHANDLE_NVLINK(sphb, i, j)))); 349 + _FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx))); 350 + g_free(linkname); 351 + ++linkidx; 352 + } 353 + } 354 + 355 + /* Add memory nodes for GPU RAM and mark them unusable */ 356 + for (i = 0; i < sphb->nvgpus->num; ++i) { 357 + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; 358 + Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev), 359 + "nvlink2-mr[0]", NULL); 360 + uint32_t associativity[] = { 361 + cpu_to_be32(0x4), 362 + SPAPR_GPU_NUMA_ID, 363 + SPAPR_GPU_NUMA_ID, 364 + SPAPR_GPU_NUMA_ID, 365 + cpu_to_be32(nvslot->numa_id) 366 + }; 367 + uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL); 368 + uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) }; 369 + char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa); 370 + int off = fdt_add_subnode(fdt, 0, mem_name); 371 + 372 + _FDT(off); 373 + _FDT((fdt_setprop_string(fdt, off, "device_type", "memory"))); 374 + _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg)))); 375 + _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity, 376 + sizeof(associativity)))); 377 + 378 + _FDT((fdt_setprop_string(fdt, off, "compatible", 379 + "ibm,coherent-device-memory"))); 380 + 381 + mem_reg[1] = cpu_to_be64(0); 382 + _FDT((fdt_setprop(fdt, off, "linux,usable-memory", mem_reg, 383 + sizeof(mem_reg)))); 384 + _FDT((fdt_setprop_cell(fdt, off, "phandle", 385 + PHANDLE_GPURAM(sphb, i)))); 386 + g_free(mem_name); 387 + } 388 + 389 + } 390 + 391 + void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset, 392 + SpaprPhbState *sphb) 393 + { 394 + int i, j; 395 + 396 + if (!sphb->nvgpus) { 397 + return; 398 + } 399 + 400 + for (i = 0; i < sphb->nvgpus->num; ++i) { 401 + struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i]; 402 + 403 + /* Skip "slot" without attached GPU */ 404 + if (!nvslot->gpdev) { 405 + continue; 406 + } 407 + if (dev == nvslot->gpdev) { 408 + uint32_t npus[nvslot->linknum]; 409 + 410 + for (j = 0; j < nvslot->linknum; ++j) { 411 + PCIDevice *npdev = nvslot->links[j].npdev; 412 + 413 + npus[j] = cpu_to_be32(PHANDLE_PCIDEV(sphb, npdev)); 414 + } 415 + _FDT(fdt_setprop(fdt, offset, "ibm,npu", npus, 416 + j * sizeof(npus[0]))); 417 + _FDT((fdt_setprop_cell(fdt, offset, "phandle", 418 + PHANDLE_PCIDEV(sphb, dev)))); 419 + continue; 420 + } 421 + 422 + for (j = 0; j < nvslot->linknum; ++j) { 423 + if (dev != nvslot->links[j].npdev) { 424 + continue; 425 + } 426 + 427 + _FDT((fdt_setprop_cell(fdt, offset, "phandle", 428 + PHANDLE_PCIDEV(sphb, dev)))); 429 + _FDT(fdt_setprop_cell(fdt, offset, "ibm,gpu", 430 + PHANDLE_PCIDEV(sphb, nvslot->gpdev))); 431 + _FDT((fdt_setprop_cell(fdt, offset, "ibm,nvlink", 432 + PHANDLE_NVLINK(sphb, i, j)))); 433 + /* 434 + * If we ever want to emulate GPU RAM at the same location as on 435 + * the host - here is the encoding GPA->TGT: 436 + * 437 + * gta = ((sphb->nv2_gpa >> 42) & 0x1) << 42; 438 + * gta |= ((sphb->nv2_gpa >> 45) & 0x3) << 43; 439 + * gta |= ((sphb->nv2_gpa >> 49) & 0x3) << 45; 440 + * gta |= sphb->nv2_gpa & ((1UL << 43) - 1); 441 + */ 442 + _FDT(fdt_setprop_cell(fdt, offset, "memory-region", 443 + PHANDLE_GPURAM(sphb, i))); 444 + _FDT(fdt_setprop_u64(fdt, offset, "ibm,device-tgt-addr", 445 + nvslot->tgt)); 446 + _FDT(fdt_setprop_cell(fdt, offset, "ibm,nvlink-speed", 447 + nvslot->links[j].link_speed)); 448 + } 449 + } 450 + }
+1 -1
hw/ppc/spapr_rtas.c
··· 404 404 405 405 token -= RTAS_TOKEN_BASE; 406 406 407 - assert(!rtas_table[token].name); 407 + assert(!name || !rtas_table[token].name); 408 408 409 409 rtas_table[token].name = name; 410 410 rtas_table[token].fn = fn;
+131
hw/vfio/pci-quirks.c
··· 2180 2180 2181 2181 return 0; 2182 2182 } 2183 + 2184 + static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v, 2185 + const char *name, 2186 + void *opaque, Error **errp) 2187 + { 2188 + uint64_t tgt = (uintptr_t) opaque; 2189 + visit_type_uint64(v, name, &tgt, errp); 2190 + } 2191 + 2192 + static void vfio_pci_nvlink2_get_link_speed(Object *obj, Visitor *v, 2193 + const char *name, 2194 + void *opaque, Error **errp) 2195 + { 2196 + uint32_t link_speed = (uint32_t)(uintptr_t) opaque; 2197 + visit_type_uint32(v, name, &link_speed, errp); 2198 + } 2199 + 2200 + int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp) 2201 + { 2202 + int ret; 2203 + void *p; 2204 + struct vfio_region_info *nv2reg = NULL; 2205 + struct vfio_info_cap_header *hdr; 2206 + struct vfio_region_info_cap_nvlink2_ssatgt *cap; 2207 + VFIOQuirk *quirk; 2208 + 2209 + ret = vfio_get_dev_region_info(&vdev->vbasedev, 2210 + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | 2211 + PCI_VENDOR_ID_NVIDIA, 2212 + VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM, 2213 + &nv2reg); 2214 + if (ret) { 2215 + return ret; 2216 + } 2217 + 2218 + hdr = vfio_get_region_info_cap(nv2reg, VFIO_REGION_INFO_CAP_NVLINK2_SSATGT); 2219 + if (!hdr) { 2220 + ret = -ENODEV; 2221 + goto free_exit; 2222 + } 2223 + cap = (void *) hdr; 2224 + 2225 + p = mmap(NULL, nv2reg->size, PROT_READ | PROT_WRITE | PROT_EXEC, 2226 + MAP_SHARED, vdev->vbasedev.fd, nv2reg->offset); 2227 + if (p == MAP_FAILED) { 2228 + ret = -errno; 2229 + goto free_exit; 2230 + } 2231 + 2232 + quirk = vfio_quirk_alloc(1); 2233 + memory_region_init_ram_ptr(&quirk->mem[0], OBJECT(vdev), "nvlink2-mr", 2234 + nv2reg->size, p); 2235 + QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); 2236 + 2237 + object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64", 2238 + vfio_pci_nvlink2_get_tgt, NULL, NULL, 2239 + (void *) (uintptr_t) cap->tgt, NULL); 2240 + trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt, 2241 + nv2reg->size); 2242 + free_exit: 2243 + g_free(nv2reg); 2244 + 2245 + return ret; 2246 + } 2247 + 2248 + int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp) 2249 + { 2250 + int ret; 2251 + void *p; 2252 + struct vfio_region_info *atsdreg = NULL; 2253 + struct vfio_info_cap_header *hdr; 2254 + struct vfio_region_info_cap_nvlink2_ssatgt *captgt; 2255 + struct vfio_region_info_cap_nvlink2_lnkspd *capspeed; 2256 + VFIOQuirk *quirk; 2257 + 2258 + ret = vfio_get_dev_region_info(&vdev->vbasedev, 2259 + VFIO_REGION_TYPE_PCI_VENDOR_TYPE | 2260 + PCI_VENDOR_ID_IBM, 2261 + VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD, 2262 + &atsdreg); 2263 + if (ret) { 2264 + return ret; 2265 + } 2266 + 2267 + hdr = vfio_get_region_info_cap(atsdreg, 2268 + VFIO_REGION_INFO_CAP_NVLINK2_SSATGT); 2269 + if (!hdr) { 2270 + ret = -ENODEV; 2271 + goto free_exit; 2272 + } 2273 + captgt = (void *) hdr; 2274 + 2275 + hdr = vfio_get_region_info_cap(atsdreg, 2276 + VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD); 2277 + if (!hdr) { 2278 + ret = -ENODEV; 2279 + goto free_exit; 2280 + } 2281 + capspeed = (void *) hdr; 2282 + 2283 + /* Some NVLink bridges may not have assigned ATSD */ 2284 + if (atsdreg->size) { 2285 + p = mmap(NULL, atsdreg->size, PROT_READ | PROT_WRITE | PROT_EXEC, 2286 + MAP_SHARED, vdev->vbasedev.fd, atsdreg->offset); 2287 + if (p == MAP_FAILED) { 2288 + ret = -errno; 2289 + goto free_exit; 2290 + } 2291 + 2292 + quirk = vfio_quirk_alloc(1); 2293 + memory_region_init_ram_device_ptr(&quirk->mem[0], OBJECT(vdev), 2294 + "nvlink2-atsd-mr", atsdreg->size, p); 2295 + QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next); 2296 + } 2297 + 2298 + object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64", 2299 + vfio_pci_nvlink2_get_tgt, NULL, NULL, 2300 + (void *) (uintptr_t) captgt->tgt, NULL); 2301 + trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt, 2302 + atsdreg->size); 2303 + 2304 + object_property_add(OBJECT(vdev), "nvlink2-link-speed", "uint32", 2305 + vfio_pci_nvlink2_get_link_speed, NULL, NULL, 2306 + (void *) (uintptr_t) capspeed->link_speed, NULL); 2307 + trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name, 2308 + capspeed->link_speed); 2309 + free_exit: 2310 + g_free(atsdreg); 2311 + 2312 + return ret; 2313 + }
+14
hw/vfio/pci.c
··· 3086 3086 } 3087 3087 } 3088 3088 3089 + if (vdev->vendor_id == PCI_VENDOR_ID_NVIDIA) { 3090 + ret = vfio_pci_nvidia_v100_ram_init(vdev, errp); 3091 + if (ret && ret != -ENODEV) { 3092 + error_report("Failed to setup NVIDIA V100 GPU RAM"); 3093 + } 3094 + } 3095 + 3096 + if (vdev->vendor_id == PCI_VENDOR_ID_IBM) { 3097 + ret = vfio_pci_nvlink2_init(vdev, errp); 3098 + if (ret && ret != -ENODEV) { 3099 + error_report("Failed to setup NVlink2 bridge"); 3100 + } 3101 + } 3102 + 3089 3103 vfio_register_err_notifier(vdev); 3090 3104 vfio_register_req_notifier(vdev); 3091 3105 vfio_setup_resetfn_quirk(vdev);
+2
hw/vfio/pci.h
··· 196 196 int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, 197 197 struct vfio_region_info *info, 198 198 Error **errp); 199 + int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp); 200 + int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp); 199 201 200 202 void vfio_display_reset(VFIOPCIDevice *vdev); 201 203 int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
+4
hw/vfio/trace-events
··· 86 86 vfio_pci_igd_host_bridge_enabled(const char *name) "%s" 87 87 vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s" 88 88 89 + vfio_pci_nvidia_gpu_setup_quirk(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64 90 + vfio_pci_nvlink2_setup_quirk_ssatgt(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64 91 + vfio_pci_nvlink2_setup_quirk_lnkspd(const char *name, uint32_t link_speed) "%s link_speed=0x%x" 92 + 89 93 # common.c 90 94 vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)" 91 95 vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64
+45
include/hw/pci-host/spapr.h
··· 87 87 uint32_t mig_liobn; 88 88 hwaddr mig_mem_win_addr, mig_mem_win_size; 89 89 hwaddr mig_io_win_addr, mig_io_win_size; 90 + hwaddr nv2_gpa_win_addr; 91 + hwaddr nv2_atsd_win_addr; 92 + struct spapr_phb_pci_nvgpu_config *nvgpus; 90 93 }; 91 94 92 95 #define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL ··· 105 108 106 109 #define SPAPR_PCI_MSI_WINDOW 0x40000000000ULL 107 110 111 + #define SPAPR_PCI_NV2RAM64_WIN_BASE SPAPR_PCI_LIMIT 112 + #define SPAPR_PCI_NV2RAM64_WIN_SIZE (2 * TiB) /* For up to 6 GPUs 256GB each */ 113 + 114 + /* Max number of these GPUsper a physical box */ 115 + #define NVGPU_MAX_NUM 6 116 + /* Max number of NVLinks per GPU in any physical box */ 117 + #define NVGPU_MAX_LINKS 3 118 + 119 + /* 120 + * GPU RAM starts at 64TiB so huge DMA window to cover it all ends at 128TiB 121 + * which is enough. We do not need DMA for ATSD so we put them at 128TiB. 122 + */ 123 + #define SPAPR_PCI_NV2ATSD_WIN_BASE (128 * TiB) 124 + #define SPAPR_PCI_NV2ATSD_WIN_SIZE (NVGPU_MAX_NUM * NVGPU_MAX_LINKS * \ 125 + 64 * KiB) 126 + 108 127 static inline qemu_irq spapr_phb_lsi_qirq(struct SpaprPhbState *phb, int pin) 109 128 { 110 129 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); ··· 135 154 int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option); 136 155 int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb); 137 156 void spapr_phb_vfio_reset(DeviceState *qdev); 157 + void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp); 158 + void spapr_phb_nvgpu_free(SpaprPhbState *sphb); 159 + void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off, 160 + Error **errp); 161 + void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt); 162 + void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset, 163 + SpaprPhbState *sphb); 138 164 #else 139 165 static inline bool spapr_phb_eeh_available(SpaprPhbState *sphb) 140 166 { ··· 159 185 return RTAS_OUT_HW_ERROR; 160 186 } 161 187 static inline void spapr_phb_vfio_reset(DeviceState *qdev) 188 + { 189 + } 190 + static inline void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp) 191 + { 192 + } 193 + static inline void spapr_phb_nvgpu_free(SpaprPhbState *sphb) 194 + { 195 + } 196 + static inline void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, 197 + int bus_off, Error **errp) 198 + { 199 + } 200 + static inline void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, 201 + void *fdt) 202 + { 203 + } 204 + static inline void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, 205 + int offset, 206 + SpaprPhbState *sphb) 162 207 { 163 208 } 164 209 #endif
+4
include/hw/pci/pci.h
··· 413 413 void pci_bus_irqs_cleanup(PCIBus *bus); 414 414 int pci_bus_get_irq_level(PCIBus *bus, int irq_num); 415 415 /* 0 <= pin <= 3 0 = INTA, 1 = INTB, 2 = INTC, 3 = INTD */ 416 + static inline int pci_swizzle(int slot, int pin) 417 + { 418 + return (slot + pin) % PCI_NUM_PINS; 419 + } 416 420 int pci_swizzle_map_irq_fn(PCIDevice *pci_dev, int pin); 417 421 PCIBus *pci_register_root_bus(DeviceState *parent, const char *name, 418 422 pci_set_irq_fn set_irq, pci_map_irq_fn map_irq,
+10 -1
include/hw/ppc/spapr.h
··· 123 123 void (*phb_placement)(SpaprMachineState *spapr, uint32_t index, 124 124 uint64_t *buid, hwaddr *pio, 125 125 hwaddr *mmio32, hwaddr *mmio64, 126 - unsigned n_dma, uint32_t *liobns, Error **errp); 126 + unsigned n_dma, uint32_t *liobns, hwaddr *nv2gpa, 127 + hwaddr *nv2atsd, Error **errp); 127 128 SpaprResizeHpt resize_hpt_default; 128 129 SpaprCapabilities default_caps; 129 130 SpaprIrq *irq; ··· 199 200 200 201 bool cmd_line_caps[SPAPR_CAP_NUM]; 201 202 SpaprCapabilities def, eff, mig; 203 + 204 + unsigned gpu_numa_id; 202 205 }; 203 206 204 207 #define H_SUCCESS 0 ··· 672 675 uint32_t nargs, target_ulong args, 673 676 uint32_t nret, target_ulong rets); 674 677 void spapr_rtas_register(int token, const char *name, spapr_rtas_fn fn); 678 + static inline void spapr_rtas_unregister(int token) 679 + { 680 + spapr_rtas_register(token, NULL, NULL); 681 + } 675 682 target_ulong spapr_rtas_call(PowerPCCPU *cpu, SpaprMachineState *sm, 676 683 uint32_t token, uint32_t nargs, target_ulong args, 677 684 uint32_t nret, target_ulong rets); ··· 777 784 Error **errp); 778 785 void spapr_clear_pending_events(SpaprMachineState *spapr); 779 786 int spapr_max_server_number(SpaprMachineState *spapr); 787 + void spapr_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 788 + uint64_t pte0, uint64_t pte1); 780 789 781 790 /* DRC callbacks. */ 782 791 void spapr_core_release(DeviceState *dev);
+1 -1
target/ppc/cpu-models.c
··· 740 740 POWERPC_DEF("7457a_v1.2", CPU_POWERPC_74x7A_v12, 7455, 741 741 "PowerPC 7457A v1.2 (G4)") 742 742 /* 64 bits PowerPC */ 743 - #if defined (TARGET_PPC64) 743 + #if defined(TARGET_PPC64) 744 744 POWERPC_DEF("970_v2.2", CPU_POWERPC_970_v22, 970, 745 745 "PowerPC 970 v2.2") 746 746 POWERPC_DEF("970fx_v1.0", CPU_POWERPC_970FX_v10, 970,
+2 -1
target/ppc/cpu-models.h
··· 393 393 CPU_POWERPC_RS64IV = 0x00370000, 394 394 #endif /* defined(TARGET_PPC64) */ 395 395 /* Original POWER */ 396 - /* XXX: should be POWER (RIOS), RSC3308, RSC4608, 396 + /* 397 + * XXX: should be POWER (RIOS), RSC3308, RSC4608, 397 398 * POWER2 (RIOS2) & RSC2 (P2SC) here 398 399 */ 399 400 /* PA Semi core */
+131 -110
target/ppc/cpu.h
··· 23 23 #include "qemu-common.h" 24 24 #include "qemu/int128.h" 25 25 26 - //#define PPC_EMULATE_32BITS_HYPV 26 + /* #define PPC_EMULATE_32BITS_HYPV */ 27 27 28 - #if defined (TARGET_PPC64) 28 + #if defined(TARGET_PPC64) 29 29 /* PowerPC 64 definitions */ 30 30 #define TARGET_LONG_BITS 64 31 31 #define TARGET_PAGE_BITS 12 32 32 33 33 #define TCG_GUEST_DEFAULT_MO 0 34 34 35 - /* Note that the official physical address space bits is 62-M where M 36 - is implementation dependent. I've not looked up M for the set of 37 - cpus we emulate at the system level. */ 35 + /* 36 + * Note that the official physical address space bits is 62-M where M 37 + * is implementation dependent. I've not looked up M for the set of 38 + * cpus we emulate at the system level. 39 + */ 38 40 #define TARGET_PHYS_ADDR_SPACE_BITS 62 39 41 40 - /* Note that the PPC environment architecture talks about 80 bit virtual 41 - addresses, with segmentation. Obviously that's not all visible to a 42 - single process, which is all we're concerned with here. */ 42 + /* 43 + * Note that the PPC environment architecture talks about 80 bit 44 + * virtual addresses, with segmentation. Obviously that's not all 45 + * visible to a single process, which is all we're concerned with 46 + * here. 47 + */ 43 48 #ifdef TARGET_ABI32 44 49 # define TARGET_VIRT_ADDR_SPACE_BITS 32 45 50 #else ··· 49 54 #define TARGET_PAGE_BITS_64K 16 50 55 #define TARGET_PAGE_BITS_16M 24 51 56 52 - #else /* defined (TARGET_PPC64) */ 57 + #else /* defined(TARGET_PPC64) */ 53 58 /* PowerPC 32 definitions */ 54 59 #define TARGET_LONG_BITS 32 55 60 #define TARGET_PAGE_BITS 12 ··· 57 62 #define TARGET_PHYS_ADDR_SPACE_BITS 36 58 63 #define TARGET_VIRT_ADDR_SPACE_BITS 32 59 64 60 - #endif /* defined (TARGET_PPC64) */ 65 + #endif /* defined(TARGET_PPC64) */ 61 66 62 67 #define CPUArchState struct CPUPPCState 63 68 64 69 #include "exec/cpu-defs.h" 65 70 #include "cpu-qom.h" 66 71 67 - #if defined (TARGET_PPC64) 72 + #if defined(TARGET_PPC64) 68 73 #define PPC_ELF_MACHINE EM_PPC64 69 74 #else 70 75 #define PPC_ELF_MACHINE EM_PPC ··· 237 242 const char *name; 238 243 target_ulong default_value; 239 244 #ifdef CONFIG_KVM 240 - /* We (ab)use the fact that all the SPRs will have ids for the 245 + /* 246 + * We (ab)use the fact that all the SPRs will have ids for the 241 247 * ONE_REG interface will have KVM_REG_PPC to use 0 as meaning, 242 - * don't sync this */ 248 + * don't sync this 249 + */ 243 250 uint64_t one_reg_id; 244 251 #endif 245 252 }; ··· 656 663 #define fpscr_eex (((env->fpscr) >> FPSCR_XX) & ((env->fpscr) >> FPSCR_XE) & \ 657 664 0x1F) 658 665 659 - #define FP_FX (1ull << FPSCR_FX) 660 - #define FP_FEX (1ull << FPSCR_FEX) 661 - #define FP_VX (1ull << FPSCR_VX) 662 - #define FP_OX (1ull << FPSCR_OX) 663 - #define FP_UX (1ull << FPSCR_UX) 664 - #define FP_ZX (1ull << FPSCR_ZX) 665 - #define FP_XX (1ull << FPSCR_XX) 666 - #define FP_VXSNAN (1ull << FPSCR_VXSNAN) 667 - #define FP_VXISI (1ull << FPSCR_VXISI) 668 - #define FP_VXIDI (1ull << FPSCR_VXIDI) 669 - #define FP_VXZDZ (1ull << FPSCR_VXZDZ) 670 - #define FP_VXIMZ (1ull << FPSCR_VXIMZ) 671 - #define FP_VXVC (1ull << FPSCR_VXVC) 672 - #define FP_FR (1ull << FSPCR_FR) 673 - #define FP_FI (1ull << FPSCR_FI) 674 - #define FP_C (1ull << FPSCR_C) 675 - #define FP_FL (1ull << FPSCR_FL) 676 - #define FP_FG (1ull << FPSCR_FG) 677 - #define FP_FE (1ull << FPSCR_FE) 678 - #define FP_FU (1ull << FPSCR_FU) 679 - #define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU) 680 - #define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU) 681 - #define FP_VXSOFT (1ull << FPSCR_VXSOFT) 682 - #define FP_VXSQRT (1ull << FPSCR_VXSQRT) 683 - #define FP_VXCVI (1ull << FPSCR_VXCVI) 684 - #define FP_VE (1ull << FPSCR_VE) 685 - #define FP_OE (1ull << FPSCR_OE) 686 - #define FP_UE (1ull << FPSCR_UE) 687 - #define FP_ZE (1ull << FPSCR_ZE) 688 - #define FP_XE (1ull << FPSCR_XE) 689 - #define FP_NI (1ull << FPSCR_NI) 690 - #define FP_RN1 (1ull << FPSCR_RN1) 691 - #define FP_RN (1ull << FPSCR_RN) 666 + #define FP_FX (1ull << FPSCR_FX) 667 + #define FP_FEX (1ull << FPSCR_FEX) 668 + #define FP_VX (1ull << FPSCR_VX) 669 + #define FP_OX (1ull << FPSCR_OX) 670 + #define FP_UX (1ull << FPSCR_UX) 671 + #define FP_ZX (1ull << FPSCR_ZX) 672 + #define FP_XX (1ull << FPSCR_XX) 673 + #define FP_VXSNAN (1ull << FPSCR_VXSNAN) 674 + #define FP_VXISI (1ull << FPSCR_VXISI) 675 + #define FP_VXIDI (1ull << FPSCR_VXIDI) 676 + #define FP_VXZDZ (1ull << FPSCR_VXZDZ) 677 + #define FP_VXIMZ (1ull << FPSCR_VXIMZ) 678 + #define FP_VXVC (1ull << FPSCR_VXVC) 679 + #define FP_FR (1ull << FSPCR_FR) 680 + #define FP_FI (1ull << FPSCR_FI) 681 + #define FP_C (1ull << FPSCR_C) 682 + #define FP_FL (1ull << FPSCR_FL) 683 + #define FP_FG (1ull << FPSCR_FG) 684 + #define FP_FE (1ull << FPSCR_FE) 685 + #define FP_FU (1ull << FPSCR_FU) 686 + #define FP_FPCC (FP_FL | FP_FG | FP_FE | FP_FU) 687 + #define FP_FPRF (FP_C | FP_FL | FP_FG | FP_FE | FP_FU) 688 + #define FP_VXSOFT (1ull << FPSCR_VXSOFT) 689 + #define FP_VXSQRT (1ull << FPSCR_VXSQRT) 690 + #define FP_VXCVI (1ull << FPSCR_VXCVI) 691 + #define FP_VE (1ull << FPSCR_VE) 692 + #define FP_OE (1ull << FPSCR_OE) 693 + #define FP_UE (1ull << FPSCR_UE) 694 + #define FP_ZE (1ull << FPSCR_ZE) 695 + #define FP_XE (1ull << FPSCR_XE) 696 + #define FP_NI (1ull << FPSCR_NI) 697 + #define FP_RN1 (1ull << FPSCR_RN1) 698 + #define FP_RN (1ull << FPSCR_RN) 692 699 693 700 /* the exception bits which can be cleared by mcrfs - includes FX */ 694 701 #define FP_EX_CLEAR_BITS (FP_FX | FP_OX | FP_UX | FP_ZX | \ ··· 698 705 699 706 /*****************************************************************************/ 700 707 /* Vector status and control register */ 701 - #define VSCR_NJ 16 /* Vector non-java */ 702 - #define VSCR_SAT 0 /* Vector saturation */ 708 + #define VSCR_NJ 16 /* Vector non-java */ 709 + #define VSCR_SAT 0 /* Vector saturation */ 703 710 704 711 /*****************************************************************************/ 705 712 /* BookE e500 MMU registers */ ··· 962 969 /*****************************************************************************/ 963 970 /* The whole PowerPC CPU context */ 964 971 965 - /* PowerPC needs eight modes for different hypervisor/supervisor/guest + 966 - * real/paged mode combinations. The other two modes are for external PID 967 - * load/store. 972 + /* 973 + * PowerPC needs eight modes for different hypervisor/supervisor/guest 974 + * + real/paged mode combinations. The other two modes are for 975 + * external PID load/store. 968 976 */ 969 977 #define NB_MMU_MODES 10 970 978 #define MMU_MODE8_SUFFIX _epl ··· 976 984 #define PPC_CPU_INDIRECT_OPCODES_LEN 0x20 977 985 978 986 struct CPUPPCState { 979 - /* First are the most commonly used resources 980 - * during translated code execution 987 + /* 988 + * First are the most commonly used resources during translated 989 + * code execution 981 990 */ 982 991 /* general purpose registers */ 983 992 target_ulong gpr[32]; ··· 1023 1032 /* High part of 128-bit helper return. */ 1024 1033 uint64_t retxh; 1025 1034 1026 - int access_type; /* when a memory exception occurs, the access 1027 - type is stored here */ 1035 + /* when a memory exception occurs, the access type is stored here */ 1036 + int access_type; 1028 1037 1029 1038 CPU_COMMON 1030 1039 ··· 1072 1081 /* SPE registers */ 1073 1082 uint64_t spe_acc; 1074 1083 uint32_t spe_fscr; 1075 - /* SPE and Altivec can share a status since they will never be used 1076 - * simultaneously */ 1084 + /* 1085 + * SPE and Altivec can share a status since they will never be 1086 + * used simultaneously 1087 + */ 1077 1088 float_status vec_status; 1078 1089 1079 1090 /* Internal devices resources */ ··· 1103 1114 int error_code; 1104 1115 uint32_t pending_interrupts; 1105 1116 #if !defined(CONFIG_USER_ONLY) 1106 - /* This is the IRQ controller, which is implementation dependent 1117 + /* 1118 + * This is the IRQ controller, which is implementation dependent 1107 1119 * and only relevant when emulating a complete machine. 1108 1120 */ 1109 1121 uint32_t irq_input_state; ··· 1117 1129 hwaddr mpic_iack; 1118 1130 /* true when the external proxy facility mode is enabled */ 1119 1131 bool mpic_proxy; 1120 - /* set when the processor has an HV mode, thus HV priv 1132 + /* 1133 + * set when the processor has an HV mode, thus HV priv 1121 1134 * instructions and SPRs are diallowed if MSR:HV is 0 1122 1135 */ 1123 1136 bool has_hv_mode; ··· 1149 1162 1150 1163 /* booke timers */ 1151 1164 1152 - /* Specifies bit locations of the Time Base used to signal a fixed timer 1153 - * exception on a transition from 0 to 1. (watchdog or fixed-interval timer) 1165 + /* 1166 + * Specifies bit locations of the Time Base used to signal a fixed 1167 + * timer exception on a transition from 0 to 1. (watchdog or 1168 + * fixed-interval timer) 1154 1169 * 1155 1170 * 0 selects the least significant bit. 1156 1171 * 63 selects the most significant bit. ··· 1250 1265 void (*unmap_hptes)(PPCVirtualHypervisor *vhyp, 1251 1266 const ppc_hash_pte64_t *hptes, 1252 1267 hwaddr ptex, int n); 1253 - void (*store_hpte)(PPCVirtualHypervisor *vhyp, hwaddr ptex, 1254 - uint64_t pte0, uint64_t pte1); 1268 + void (*hpte_set_c)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1); 1269 + void (*hpte_set_r)(PPCVirtualHypervisor *vhyp, hwaddr ptex, uint64_t pte1); 1255 1270 void (*get_pate)(PPCVirtualHypervisor *vhyp, ppc_v3_pate_t *entry); 1256 1271 target_ulong (*encode_hpt_for_kvm_pr)(PPCVirtualHypervisor *vhyp); 1257 1272 }; ··· 1290 1305 1291 1306 /*****************************************************************************/ 1292 1307 void ppc_translate_init(void); 1293 - /* you can call this signal handler from your SIGBUS and SIGSEGV 1294 - signal handlers to inform the virtual CPU of exceptions. non zero 1295 - is returned if the signal was handled by the virtual CPU. */ 1296 - int cpu_ppc_signal_handler (int host_signum, void *pinfo, 1297 - void *puc); 1308 + /* 1309 + * you can call this signal handler from your SIGBUS and SIGSEGV 1310 + * signal handlers to inform the virtual CPU of exceptions. non zero 1311 + * is returned if the signal was handled by the virtual CPU. 1312 + */ 1313 + int cpu_ppc_signal_handler(int host_signum, void *pinfo, void *puc); 1298 1314 #if defined(CONFIG_USER_ONLY) 1299 1315 int ppc_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw, 1300 1316 int mmu_idx); 1301 1317 #endif 1302 1318 1303 1319 #if !defined(CONFIG_USER_ONLY) 1304 - void ppc_store_sdr1 (CPUPPCState *env, target_ulong value); 1320 + void ppc_store_sdr1(CPUPPCState *env, target_ulong value); 1305 1321 void ppc_store_ptcr(CPUPPCState *env, target_ulong value); 1306 1322 #endif /* !defined(CONFIG_USER_ONLY) */ 1307 - void ppc_store_msr (CPUPPCState *env, target_ulong value); 1323 + void ppc_store_msr(CPUPPCState *env, target_ulong value); 1308 1324 1309 1325 void ppc_cpu_list(void); 1310 1326 1311 1327 /* Time-base and decrementer management */ 1312 1328 #ifndef NO_CPU_IO_DEFS 1313 - uint64_t cpu_ppc_load_tbl (CPUPPCState *env); 1314 - uint32_t cpu_ppc_load_tbu (CPUPPCState *env); 1315 - void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value); 1316 - void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value); 1317 - uint64_t cpu_ppc_load_atbl (CPUPPCState *env); 1318 - uint32_t cpu_ppc_load_atbu (CPUPPCState *env); 1319 - void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value); 1320 - void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value); 1329 + uint64_t cpu_ppc_load_tbl(CPUPPCState *env); 1330 + uint32_t cpu_ppc_load_tbu(CPUPPCState *env); 1331 + void cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value); 1332 + void cpu_ppc_store_tbl(CPUPPCState *env, uint32_t value); 1333 + uint64_t cpu_ppc_load_atbl(CPUPPCState *env); 1334 + uint32_t cpu_ppc_load_atbu(CPUPPCState *env); 1335 + void cpu_ppc_store_atbl(CPUPPCState *env, uint32_t value); 1336 + void cpu_ppc_store_atbu(CPUPPCState *env, uint32_t value); 1321 1337 bool ppc_decr_clear_on_delivery(CPUPPCState *env); 1322 1338 target_ulong cpu_ppc_load_decr(CPUPPCState *env); 1323 1339 void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value); 1324 1340 target_ulong cpu_ppc_load_hdecr(CPUPPCState *env); 1325 1341 void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value); 1326 - uint64_t cpu_ppc_load_purr (CPUPPCState *env); 1327 - uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env); 1328 - uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env); 1342 + uint64_t cpu_ppc_load_purr(CPUPPCState *env); 1343 + uint32_t cpu_ppc601_load_rtcl(CPUPPCState *env); 1344 + uint32_t cpu_ppc601_load_rtcu(CPUPPCState *env); 1329 1345 #if !defined(CONFIG_USER_ONLY) 1330 - void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value); 1331 - void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value); 1332 - target_ulong load_40x_pit (CPUPPCState *env); 1333 - void store_40x_pit (CPUPPCState *env, target_ulong val); 1334 - void store_40x_dbcr0 (CPUPPCState *env, uint32_t val); 1335 - void store_40x_sler (CPUPPCState *env, uint32_t val); 1336 - void store_booke_tcr (CPUPPCState *env, target_ulong val); 1337 - void store_booke_tsr (CPUPPCState *env, target_ulong val); 1338 - void ppc_tlb_invalidate_all (CPUPPCState *env); 1339 - void ppc_tlb_invalidate_one (CPUPPCState *env, target_ulong addr); 1346 + void cpu_ppc601_store_rtcl(CPUPPCState *env, uint32_t value); 1347 + void cpu_ppc601_store_rtcu(CPUPPCState *env, uint32_t value); 1348 + target_ulong load_40x_pit(CPUPPCState *env); 1349 + void store_40x_pit(CPUPPCState *env, target_ulong val); 1350 + void store_40x_dbcr0(CPUPPCState *env, uint32_t val); 1351 + void store_40x_sler(CPUPPCState *env, uint32_t val); 1352 + void store_booke_tcr(CPUPPCState *env, target_ulong val); 1353 + void store_booke_tsr(CPUPPCState *env, target_ulong val); 1354 + void ppc_tlb_invalidate_all(CPUPPCState *env); 1355 + void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr); 1340 1356 void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp); 1341 1357 #endif 1342 1358 #endif ··· 1349 1365 1350 1366 gprv = env->gpr[gprn]; 1351 1367 if (env->flags & POWERPC_FLAG_SPE) { 1352 - /* If the CPU implements the SPE extension, we have to get the 1368 + /* 1369 + * If the CPU implements the SPE extension, we have to get the 1353 1370 * high bits of the GPR from the gprh storage area 1354 1371 */ 1355 1372 gprv &= 0xFFFFFFFFULL; ··· 1360 1377 } 1361 1378 1362 1379 /* Device control registers */ 1363 - int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp); 1364 - int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val); 1380 + int ppc_dcr_read(ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp); 1381 + int ppc_dcr_write(ppc_dcr_t *dcr_env, int dcrn, uint32_t val); 1365 1382 1366 1383 #define POWERPC_CPU_TYPE_SUFFIX "-" TYPE_POWERPC_CPU 1367 1384 #define POWERPC_CPU_TYPE_NAME(model) model POWERPC_CPU_TYPE_SUFFIX ··· 1372 1389 1373 1390 /* MMU modes definitions */ 1374 1391 #define MMU_USER_IDX 0 1375 - static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch) 1392 + static inline int cpu_mmu_index(CPUPPCState *env, bool ifetch) 1376 1393 { 1377 1394 return ifetch ? env->immu_idx : env->dmmu_idx; 1378 1395 } ··· 1990 2007 /* External Input Interrupt Directed to Guest State */ 1991 2008 #define EPCR_EXTGS (1 << 31) 1992 2009 1993 - #define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ 1994 - #define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */ 1995 - #define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */ 1996 - #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ 1997 - #define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ 2010 + #define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */ 2011 + #define L1CSR0_CUL 0x00000400 /* (D-)Cache Unable to Lock */ 2012 + #define L1CSR0_DCLFR 0x00000100 /* D-Cache Lock Flash Reset */ 2013 + #define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */ 2014 + #define L1CSR0_DCE 0x00000001 /* Data Cache Enable */ 1998 2015 1999 - #define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */ 2000 - #define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */ 2001 - #define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */ 2002 - #define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */ 2003 - #define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */ 2016 + #define L1CSR1_CPE 0x00010000 /* Instruction Cache Parity Enable */ 2017 + #define L1CSR1_ICUL 0x00000400 /* I-Cache Unable to Lock */ 2018 + #define L1CSR1_ICLFR 0x00000100 /* I-Cache Lock Flash Reset */ 2019 + #define L1CSR1_ICFI 0x00000002 /* Instruction Cache Flash Invalidate */ 2020 + #define L1CSR1_ICE 0x00000001 /* Instruction Cache Enable */ 2004 2021 2005 2022 /* HID0 bits */ 2006 2023 #define HID0_DEEPNAP (1 << 24) /* pre-2.06 */ ··· 2226 2243 }; 2227 2244 2228 2245 /*****************************************************************************/ 2229 - /* Memory access type : 2246 + /* 2247 + * Memory access type : 2230 2248 * may be needed for precise access rights control and precise exceptions. 2231 2249 */ 2232 2250 enum { ··· 2242 2260 ACCESS_CACHE = 0x60, /* Cache manipulation */ 2243 2261 }; 2244 2262 2245 - /* Hardware interruption sources: 2246 - * all those exception can be raised simulteaneously 2263 + /* 2264 + * Hardware interrupt sources: 2265 + * all those exception can be raised simulteaneously 2247 2266 */ 2248 2267 /* Input pins definitions */ 2249 2268 enum { ··· 2325 2344 enum { 2326 2345 /* POWER7 input pins */ 2327 2346 POWER7_INPUT_INT = 0, 2328 - /* POWER7 probably has other inputs, but we don't care about them 2347 + /* 2348 + * POWER7 probably has other inputs, but we don't care about them 2329 2349 * for any existing machine. We can wire these up when we need 2330 - * them */ 2350 + * them 2351 + */ 2331 2352 POWER7_INPUT_NB, 2332 2353 }; 2333 2354
+7 -7
target/ppc/dfp_helper.c
··· 1104 1104 } \ 1105 1105 } \ 1106 1106 \ 1107 - while (offset < (size)/4) { \ 1107 + while (offset < (size) / 4) { \ 1108 1108 n++; \ 1109 - digits[(size)/4-n] = dfp_get_bcd_digit_##size(dfp.b64, offset++); \ 1110 - if (digits[(size)/4-n] > 10) { \ 1109 + digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(dfp.b64, offset++); \ 1110 + if (digits[(size) / 4 - n] > 10) { \ 1111 1111 dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ 1112 1112 return; \ 1113 1113 } else { \ 1114 - nonzero |= (digits[(size)/4-n] > 0); \ 1114 + nonzero |= (digits[(size) / 4 - n] > 0); \ 1115 1115 } \ 1116 1116 } \ 1117 1117 \ 1118 1118 if (nonzero) { \ 1119 - decNumberSetBCD(&dfp.t, digits+((size)/4)-n, n); \ 1119 + decNumberSetBCD(&dfp.t, digits + ((size) / 4) - n, n); \ 1120 1120 } \ 1121 1121 \ 1122 1122 if (s && sgn) { \ ··· 1170 1170 static void dfp_set_raw_exp_64(uint64_t *t, uint64_t raw) 1171 1171 { 1172 1172 *t &= 0x8003ffffffffffffULL; 1173 - *t |= (raw << (63-13)); 1173 + *t |= (raw << (63 - 13)); 1174 1174 } 1175 1175 1176 1176 static void dfp_set_raw_exp_128(uint64_t *t, uint64_t raw) 1177 1177 { 1178 1178 t[HI_IDX] &= 0x80003fffffffffffULL; 1179 - t[HI_IDX] |= (raw << (63-17)); 1179 + t[HI_IDX] |= (raw << (63 - 17)); 1180 1180 } 1181 1181 1182 1182 #define DFP_HELPER_IEX(op, size) \
+53 -34
target/ppc/excp_helper.c
··· 25 25 #include "internal.h" 26 26 #include "helper_regs.h" 27 27 28 - //#define DEBUG_OP 29 - //#define DEBUG_SOFTWARE_TLB 30 - //#define DEBUG_EXCEPTIONS 28 + /* #define DEBUG_OP */ 29 + /* #define DEBUG_SOFTWARE_TLB */ 30 + /* #define DEBUG_EXCEPTIONS */ 31 31 32 32 #ifdef DEBUG_EXCEPTIONS 33 33 # define LOG_EXCP(...) qemu_log(__VA_ARGS__) ··· 126 126 return offset; 127 127 } 128 128 129 - /* Note that this function should be greatly optimized 130 - * when called with a constant excp, from ppc_hw_interrupt 129 + /* 130 + * Note that this function should be greatly optimized when called 131 + * with a constant excp, from ppc_hw_interrupt 131 132 */ 132 133 static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) 133 134 { ··· 147 148 msr = env->msr & ~0x783f0000ULL; 148 149 } 149 150 150 - /* new interrupt handler msr preserves existing HV and ME unless 151 + /* 152 + * new interrupt handler msr preserves existing HV and ME unless 151 153 * explicitly overriden 152 154 */ 153 155 new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); ··· 166 168 excp = powerpc_reset_wakeup(cs, env, excp, &msr); 167 169 } 168 170 169 - /* Exception targetting modifiers 171 + /* 172 + * Exception targetting modifiers 170 173 * 171 174 * LPES0 is supported on POWER7/8/9 172 175 * LPES1 is not supported (old iSeries mode) ··· 194 197 ail = 0; 195 198 } 196 199 197 - /* Hypervisor emulation assistance interrupt only exists on server 200 + /* 201 + * Hypervisor emulation assistance interrupt only exists on server 198 202 * arch 2.05 server or later. We also don't want to generate it if 199 203 * we don't have HVB in msr_mask (PAPR mode). 200 204 */ ··· 229 233 break; 230 234 case POWERPC_EXCP_MCHECK: /* Machine check exception */ 231 235 if (msr_me == 0) { 232 - /* Machine check exception is not enabled. 233 - * Enter checkstop state. 236 + /* 237 + * Machine check exception is not enabled. Enter 238 + * checkstop state. 234 239 */ 235 240 fprintf(stderr, "Machine check while not allowed. " 236 241 "Entering checkstop state\n"); ··· 242 247 cpu_interrupt_exittb(cs); 243 248 } 244 249 if (env->msr_mask & MSR_HVB) { 245 - /* ISA specifies HV, but can be delivered to guest with HV clear 246 - * (e.g., see FWNMI in PAPR). 250 + /* 251 + * ISA specifies HV, but can be delivered to guest with HV 252 + * clear (e.g., see FWNMI in PAPR). 247 253 */ 248 254 new_msr |= (target_ulong)MSR_HVB; 249 255 } ··· 294 300 break; 295 301 case POWERPC_EXCP_ALIGN: /* Alignment exception */ 296 302 /* Get rS/rD and rA from faulting opcode */ 297 - /* Note: the opcode fields will not be set properly for a direct 298 - * store load/store, but nobody cares as nobody actually uses 299 - * direct store segments. 303 + /* 304 + * Note: the opcode fields will not be set properly for a 305 + * direct store load/store, but nobody cares as nobody 306 + * actually uses direct store segments. 300 307 */ 301 308 env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; 302 309 break; ··· 310 317 return; 311 318 } 312 319 313 - /* FP exceptions always have NIP pointing to the faulting 320 + /* 321 + * FP exceptions always have NIP pointing to the faulting 314 322 * instruction, so always use store_next and claim we are 315 323 * precise in the MSR. 316 324 */ ··· 341 349 dump_syscall(env); 342 350 lev = env->error_code; 343 351 344 - /* We need to correct the NIP which in this case is supposed 352 + /* 353 + * We need to correct the NIP which in this case is supposed 345 354 * to point to the next instruction 346 355 */ 347 356 env->nip += 4; ··· 425 434 new_msr |= ((target_ulong)1 << MSR_ME); 426 435 } 427 436 if (env->msr_mask & MSR_HVB) { 428 - /* ISA specifies HV, but can be delivered to guest with HV clear 429 - * (e.g., see FWNMI in PAPR, NMI injection in QEMU). 437 + /* 438 + * ISA specifies HV, but can be delivered to guest with HV 439 + * clear (e.g., see FWNMI in PAPR, NMI injection in QEMU). 430 440 */ 431 441 new_msr |= (target_ulong)MSR_HVB; 432 442 } else { ··· 675 685 env->spr[asrr1] = env->spr[srr1]; 676 686 } 677 687 678 - /* Sort out endianness of interrupt, this differs depending on the 688 + /* 689 + * Sort out endianness of interrupt, this differs depending on the 679 690 * CPU, the HV mode, etc... 680 691 */ 681 692 #ifdef TARGET_PPC64 ··· 716 727 } 717 728 vector |= env->excp_prefix; 718 729 719 - /* AIL only works if there is no HV transition and we are running with 720 - * translations enabled 730 + /* 731 + * AIL only works if there is no HV transition and we are running 732 + * with translations enabled 721 733 */ 722 734 if (!((msr >> MSR_IR) & 1) || !((msr >> MSR_DR) & 1) || 723 735 ((new_msr & MSR_HVB) && !(msr & MSR_HVB))) { ··· 745 757 } 746 758 } 747 759 #endif 748 - /* We don't use hreg_store_msr here as already have treated 749 - * any special case that could occur. Just store MSR and update hflags 760 + /* 761 + * We don't use hreg_store_msr here as already have treated any 762 + * special case that could occur. Just store MSR and update hflags 750 763 * 751 764 * Note: We *MUST* not use hreg_store_msr() as-is anyway because it 752 765 * will prevent setting of the HV bit which some exceptions might need ··· 762 775 /* Reset the reservation */ 763 776 env->reserve_addr = -1; 764 777 765 - /* Any interrupt is context synchronizing, check if TCG TLB 766 - * needs a delayed flush on ppc64 778 + /* 779 + * Any interrupt is context synchronizing, check if TCG TLB needs 780 + * a delayed flush on ppc64 767 781 */ 768 782 check_tlb_flush(env, false); 769 783 } ··· 1015 1029 cs = CPU(ppc_env_get_cpu(env)); 1016 1030 cs->halted = 1; 1017 1031 1018 - /* The architecture specifies that HDEC interrupts are 1019 - * discarded in PM states 1032 + /* 1033 + * The architecture specifies that HDEC interrupts are discarded 1034 + * in PM states 1020 1035 */ 1021 1036 env->pending_interrupts &= ~(1 << PPC_INTERRUPT_HDECR); 1022 1037 ··· 1047 1062 #if defined(DEBUG_OP) 1048 1063 cpu_dump_rfi(env->nip, env->msr); 1049 1064 #endif 1050 - /* No need to raise an exception here, 1051 - * as rfi is always the last insn of a TB 1065 + /* 1066 + * No need to raise an exception here, as rfi is always the last 1067 + * insn of a TB 1052 1068 */ 1053 1069 cpu_interrupt_exittb(cs); 1054 1070 /* Reset the reservation */ ··· 1067 1083 #if defined(TARGET_PPC64) 1068 1084 void helper_rfid(CPUPPCState *env) 1069 1085 { 1070 - /* The architeture defines a number of rules for which bits 1071 - * can change but in practice, we handle this in hreg_store_msr() 1086 + /* 1087 + * The architeture defines a number of rules for which bits can 1088 + * change but in practice, we handle this in hreg_store_msr() 1072 1089 * which will be called by do_rfi(), so there is no need to filter 1073 1090 * here 1074 1091 */ ··· 1206 1223 { 1207 1224 int msg = rb & DBELL_TYPE_MASK; 1208 1225 1209 - /* A Directed Hypervisor Doorbell message is sent only if the 1226 + /* 1227 + * A Directed Hypervisor Doorbell message is sent only if the 1210 1228 * message type is 5. All other types are reserved and the 1211 - * instruction is a no-op */ 1229 + * instruction is a no-op 1230 + */ 1212 1231 return msg == DBELL_TYPE_DBELL_SERVER ? PPC_INTERRUPT_HDOORBELL : -1; 1213 1232 } 1214 1233
+83 -51
target/ppc/fpu_helper.c
··· 90 90 ret = extract64(arg, 62, 2) << 30; 91 91 ret |= extract64(arg, 29, 30); 92 92 } else { 93 - /* Zero or Denormal result. If the exponent is in bounds for 94 - * a single-precision denormal result, extract the proper bits. 95 - * If the input is not zero, and the exponent is out of bounds, 96 - * then the result is undefined; this underflows to zero. 93 + /* 94 + * Zero or Denormal result. If the exponent is in bounds for 95 + * a single-precision denormal result, extract the proper 96 + * bits. If the input is not zero, and the exponent is out of 97 + * bounds, then the result is undefined; this underflows to 98 + * zero. 97 99 */ 98 100 ret = extract64(arg, 63, 1) << 31; 99 101 if (unlikely(exp >= 874)) { ··· 1090 1092 fe_flag = 1; 1091 1093 } else if (unlikely(float64_is_neg(frb))) { 1092 1094 fe_flag = 1; 1093 - } else if (!float64_is_zero(frb) && (e_b <= (-1022+52))) { 1095 + } else if (!float64_is_zero(frb) && (e_b <= (-1022 + 52))) { 1094 1096 fe_flag = 1; 1095 1097 } 1096 1098 ··· 1789 1791 #define float64_to_float64(x, env) x 1790 1792 1791 1793 1792 - /* VSX_ADD_SUB - VSX floating point add/subract 1794 + /* 1795 + * VSX_ADD_SUB - VSX floating point add/subract 1793 1796 * name - instruction mnemonic 1794 1797 * op - operation (add or sub) 1795 1798 * nels - number of elements (1, 2 or 4) ··· 1872 1875 do_float_check_status(env, GETPC()); 1873 1876 } 1874 1877 1875 - /* VSX_MUL - VSX floating point multiply 1878 + /* 1879 + * VSX_MUL - VSX floating point multiply 1876 1880 * op - instruction mnemonic 1877 1881 * nels - number of elements (1, 2 or 4) 1878 1882 * tp - type (float32 or float64) ··· 1950 1954 do_float_check_status(env, GETPC()); 1951 1955 } 1952 1956 1953 - /* VSX_DIV - VSX floating point divide 1957 + /* 1958 + * VSX_DIV - VSX floating point divide 1954 1959 * op - instruction mnemonic 1955 1960 * nels - number of elements (1, 2 or 4) 1956 1961 * tp - type (float32 or float64) ··· 2034 2039 do_float_check_status(env, GETPC()); 2035 2040 } 2036 2041 2037 - /* VSX_RE - VSX floating point reciprocal estimate 2042 + /* 2043 + * VSX_RE - VSX floating point reciprocal estimate 2038 2044 * op - instruction mnemonic 2039 2045 * nels - number of elements (1, 2 or 4) 2040 2046 * tp - type (float32 or float64) ··· 2075 2081 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0) 2076 2082 VSX_RE(xvresp, 4, float32, VsrW(i), 0, 0) 2077 2083 2078 - /* VSX_SQRT - VSX floating point square root 2084 + /* 2085 + * VSX_SQRT - VSX floating point square root 2079 2086 * op - instruction mnemonic 2080 2087 * nels - number of elements (1, 2 or 4) 2081 2088 * tp - type (float32 or float64) ··· 2124 2131 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0) 2125 2132 VSX_SQRT(xvsqrtsp, 4, float32, VsrW(i), 0, 0) 2126 2133 2127 - /* VSX_RSQRTE - VSX floating point reciprocal square root estimate 2134 + /* 2135 + *VSX_RSQRTE - VSX floating point reciprocal square root estimate 2128 2136 * op - instruction mnemonic 2129 2137 * nels - number of elements (1, 2 or 4) 2130 2138 * tp - type (float32 or float64) ··· 2174 2182 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0) 2175 2183 VSX_RSQRTE(xvrsqrtesp, 4, float32, VsrW(i), 0, 0) 2176 2184 2177 - /* VSX_TDIV - VSX floating point test for divide 2185 + /* 2186 + * VSX_TDIV - VSX floating point test for divide 2178 2187 * op - instruction mnemonic 2179 2188 * nels - number of elements (1, 2 or 4) 2180 2189 * tp - type (float32 or float64) ··· 2207 2216 if (unlikely(tp##_is_any_nan(xa.fld) || \ 2208 2217 tp##_is_any_nan(xb.fld))) { \ 2209 2218 fe_flag = 1; \ 2210 - } else if ((e_b <= emin) || (e_b >= (emax-2))) { \ 2219 + } else if ((e_b <= emin) || (e_b >= (emax - 2))) { \ 2211 2220 fe_flag = 1; \ 2212 2221 } else if (!tp##_is_zero(xa.fld) && \ 2213 2222 (((e_a - e_b) >= emax) || \ 2214 - ((e_a - e_b) <= (emin+1)) || \ 2215 - (e_a <= (emin+nbits)))) { \ 2223 + ((e_a - e_b) <= (emin + 1)) || \ 2224 + (e_a <= (emin + nbits)))) { \ 2216 2225 fe_flag = 1; \ 2217 2226 } \ 2218 2227 \ 2219 2228 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \ 2220 - /* XB is not zero because of the above check and */ \ 2221 - /* so must be denormalized. */ \ 2229 + /* \ 2230 + * XB is not zero because of the above check and so \ 2231 + * must be denormalized. \ 2232 + */ \ 2222 2233 fg_flag = 1; \ 2223 2234 } \ 2224 2235 } \ ··· 2231 2242 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52) 2232 2243 VSX_TDIV(xvtdivsp, 4, float32, VsrW(i), -126, 127, 23) 2233 2244 2234 - /* VSX_TSQRT - VSX floating point test for square root 2245 + /* 2246 + * VSX_TSQRT - VSX floating point test for square root 2235 2247 * op - instruction mnemonic 2236 2248 * nels - number of elements (1, 2 or 4) 2237 2249 * tp - type (float32 or float64) ··· 2266 2278 } else if (unlikely(tp##_is_neg(xb.fld))) { \ 2267 2279 fe_flag = 1; \ 2268 2280 } else if (!tp##_is_zero(xb.fld) && \ 2269 - (e_b <= (emin+nbits))) { \ 2281 + (e_b <= (emin + nbits))) { \ 2270 2282 fe_flag = 1; \ 2271 2283 } \ 2272 2284 \ 2273 2285 if (unlikely(tp##_is_zero_or_denormal(xb.fld))) { \ 2274 - /* XB is not zero because of the above check and */ \ 2275 - /* therefore must be denormalized. */ \ 2286 + /* \ 2287 + * XB is not zero because of the above check and \ 2288 + * therefore must be denormalized. \ 2289 + */ \ 2276 2290 fg_flag = 1; \ 2277 2291 } \ 2278 2292 } \ ··· 2285 2299 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52) 2286 2300 VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23) 2287 2301 2288 - /* VSX_MADD - VSX floating point muliply/add variations 2302 + /* 2303 + * VSX_MADD - VSX floating point muliply/add variations 2289 2304 * op - instruction mnemonic 2290 2305 * nels - number of elements (1, 2 or 4) 2291 2306 * tp - type (float32 or float64) ··· 2322 2337 float_status tstat = env->fp_status; \ 2323 2338 set_float_exception_flags(0, &tstat); \ 2324 2339 if (r2sp && (tstat.float_rounding_mode == float_round_nearest_even)) {\ 2325 - /* Avoid double rounding errors by rounding the intermediate */ \ 2326 - /* result to odd. */ \ 2340 + /* \ 2341 + * Avoid double rounding errors by rounding the intermediate \ 2342 + * result to odd. \ 2343 + */ \ 2327 2344 set_float_rounding_mode(float_round_to_zero, &tstat); \ 2328 2345 xt_out.fld = tp##_muladd(xa.fld, b->fld, c->fld, \ 2329 2346 maddflgs, &tstat); \ ··· 2388 2405 VSX_MADD(xvnmsubasp, 4, float32, VsrW(i), NMSUB_FLGS, 1, 0, 0) 2389 2406 VSX_MADD(xvnmsubmsp, 4, float32, VsrW(i), NMSUB_FLGS, 0, 0, 0) 2390 2407 2391 - /* VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision 2408 + /* 2409 + * VSX_SCALAR_CMP_DP - VSX scalar floating point compare double precision 2392 2410 * op - instruction mnemonic 2393 2411 * cmp - comparison operation 2394 2412 * exp - expected result of comparison ··· 2604 2622 VSX_SCALAR_CMPQ(xscmpoqp, 1) 2605 2623 VSX_SCALAR_CMPQ(xscmpuqp, 0) 2606 2624 2607 - /* VSX_MAX_MIN - VSX floating point maximum/minimum 2625 + /* 2626 + * VSX_MAX_MIN - VSX floating point maximum/minimum 2608 2627 * name - instruction mnemonic 2609 2628 * op - operation (max or min) 2610 2629 * nels - number of elements (1, 2 or 4) ··· 2733 2752 VSX_MAX_MINJ(xsmaxjdp, 1); 2734 2753 VSX_MAX_MINJ(xsminjdp, 0); 2735 2754 2736 - /* VSX_CMP - VSX floating point compare 2755 + /* 2756 + * VSX_CMP - VSX floating point compare 2737 2757 * op - instruction mnemonic 2738 2758 * nels - number of elements (1, 2 or 4) 2739 2759 * tp - type (float32 or float64) ··· 2778 2798 } \ 2779 2799 \ 2780 2800 putVSR(xT(opcode), &xt, env); \ 2781 - if ((opcode >> (31-21)) & 1) { \ 2801 + if ((opcode >> (31 - 21)) & 1) { \ 2782 2802 env->crf[6] = (all_true ? 0x8 : 0) | (all_false ? 0x2 : 0); \ 2783 2803 } \ 2784 2804 do_float_check_status(env, GETPC()); \ ··· 2793 2813 VSX_CMP(xvcmpgtsp, 4, float32, VsrW(i), lt, 1, 1) 2794 2814 VSX_CMP(xvcmpnesp, 4, float32, VsrW(i), eq, 0, 0) 2795 2815 2796 - /* VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion 2816 + /* 2817 + * VSX_CVT_FP_TO_FP - VSX floating point/floating point conversion 2797 2818 * op - instruction mnemonic 2798 2819 * nels - number of elements (1, 2 or 4) 2799 2820 * stp - source type (float32 or float64) ··· 2829 2850 2830 2851 VSX_CVT_FP_TO_FP(xscvdpsp, 1, float64, float32, VsrD(0), VsrW(0), 1) 2831 2852 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1) 2832 - VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2*i), 0) 2833 - VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2*i), VsrD(i), 0) 2853 + VSX_CVT_FP_TO_FP(xvcvdpsp, 2, float64, float32, VsrD(i), VsrW(2 * i), 0) 2854 + VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0) 2834 2855 2835 - /* VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion 2856 + /* 2857 + * VSX_CVT_FP_TO_FP_VECTOR - VSX floating point/floating point conversion 2836 2858 * op - instruction mnemonic 2837 2859 * nels - number of elements (1, 2 or 4) 2838 2860 * stp - source type (float32 or float64) ··· 2868 2890 2869 2891 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1) 2870 2892 2871 - /* VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion 2893 + /* 2894 + * VSX_CVT_FP_TO_FP_HP - VSX floating point/floating point conversion 2872 2895 * involving one half precision value 2873 2896 * op - instruction mnemonic 2874 2897 * nels - number of elements (1, 2 or 4) ··· 2953 2976 return float32_to_float64(xb >> 32, &tstat); 2954 2977 } 2955 2978 2956 - /* VSX_CVT_FP_TO_INT - VSX floating point to integer conversion 2979 + /* 2980 + * VSX_CVT_FP_TO_INT - VSX floating point to integer conversion 2957 2981 * op - instruction mnemonic 2958 2982 * nels - number of elements (1, 2 or 4) 2959 2983 * stp - source type (float32 or float64) ··· 2996 3020 VSX_CVT_FP_TO_INT(xscvdpuxws, 1, float64, uint32, VsrD(0), VsrW(1), 0U) 2997 3021 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), \ 2998 3022 0x8000000000000000ULL) 2999 - VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2*i), \ 3023 + VSX_CVT_FP_TO_INT(xvcvdpsxws, 2, float64, int32, VsrD(i), VsrW(2 * i), \ 3000 3024 0x80000000U) 3001 3025 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), 0ULL) 3002 - VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2*i), 0U) 3003 - VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2*i), VsrD(i), \ 3026 + VSX_CVT_FP_TO_INT(xvcvdpuxws, 2, float64, uint32, VsrD(i), VsrW(2 * i), 0U) 3027 + VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), \ 3004 3028 0x8000000000000000ULL) 3005 3029 VSX_CVT_FP_TO_INT(xvcvspsxws, 4, float32, int32, VsrW(i), VsrW(i), 0x80000000U) 3006 - VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2*i), VsrD(i), 0ULL) 3030 + VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), 0ULL) 3007 3031 VSX_CVT_FP_TO_INT(xvcvspuxws, 4, float32, uint32, VsrW(i), VsrW(i), 0U) 3008 3032 3009 - /* VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion 3033 + /* 3034 + * VSX_CVT_FP_TO_INT_VECTOR - VSX floating point to integer conversion 3010 3035 * op - instruction mnemonic 3011 3036 * stp - source type (float32 or float64) 3012 3037 * ttp - target type (int32, uint32, int64 or uint64) ··· 3040 3065 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL) 3041 3066 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL) 3042 3067 3043 - /* VSX_CVT_INT_TO_FP - VSX integer to floating point conversion 3068 + /* 3069 + * VSX_CVT_INT_TO_FP - VSX integer to floating point conversion 3044 3070 * op - instruction mnemonic 3045 3071 * nels - number of elements (1, 2 or 4) 3046 3072 * stp - source type (int32, uint32, int64 or uint64) ··· 3079 3105 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1) 3080 3106 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0) 3081 3107 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0) 3082 - VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2*i), VsrD(i), 0, 0) 3083 - VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2*i), VsrD(i), 0, 0) 3084 - VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2*i), 0, 0) 3085 - VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2*i), 0, 0) 3108 + VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0) 3109 + VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0) 3110 + VSX_CVT_INT_TO_FP(xvcvsxdsp, 2, int64, float32, VsrD(i), VsrW(2 * i), 0, 0) 3111 + VSX_CVT_INT_TO_FP(xvcvuxdsp, 2, uint64, float32, VsrD(i), VsrW(2 * i), 0, 0) 3086 3112 VSX_CVT_INT_TO_FP(xvcvsxwsp, 4, int32, float32, VsrW(i), VsrW(i), 0, 0) 3087 3113 VSX_CVT_INT_TO_FP(xvcvuxwsp, 4, uint32, float32, VsrW(i), VsrW(i), 0, 0) 3088 3114 3089 - /* VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion 3115 + /* 3116 + * VSX_CVT_INT_TO_FP_VECTOR - VSX integer to floating point conversion 3090 3117 * op - instruction mnemonic 3091 3118 * stp - source type (int32, uint32, int64 or uint64) 3092 3119 * ttp - target type (float32 or float64) ··· 3111 3138 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128) 3112 3139 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128) 3113 3140 3114 - /* For "use current rounding mode", define a value that will not be one of 3115 - * the existing rounding model enums. 3141 + /* 3142 + * For "use current rounding mode", define a value that will not be 3143 + * one of the existing rounding model enums. 3116 3144 */ 3117 3145 #define FLOAT_ROUND_CURRENT (float_round_nearest_even + float_round_down + \ 3118 3146 float_round_up + float_round_to_zero) 3119 3147 3120 - /* VSX_ROUND - VSX floating point round 3148 + /* 3149 + * VSX_ROUND - VSX floating point round 3121 3150 * op - instruction mnemonic 3122 3151 * nels - number of elements (1, 2 or 4) 3123 3152 * tp - type (float32 or float64) ··· 3150 3179 } \ 3151 3180 } \ 3152 3181 \ 3153 - /* If this is not a "use current rounding mode" instruction, \ 3182 + /* \ 3183 + * If this is not a "use current rounding mode" instruction, \ 3154 3184 * then inhibit setting of the XX bit and restore rounding \ 3155 - * mode from FPSCR */ \ 3185 + * mode from FPSCR \ 3186 + */ \ 3156 3187 if (rmode != FLOAT_ROUND_CURRENT) { \ 3157 3188 fpscr_set_rounding_mode(env); \ 3158 3189 env->fp_status.float_exception_flags &= ~float_flag_inexact; \ ··· 3234 3265 putVSR(xT(opcode), &xt, env); 3235 3266 } 3236 3267 3237 - /* VSX_TEST_DC - VSX floating point test data class 3268 + /* 3269 + * VSX_TEST_DC - VSX floating point test data class 3238 3270 * op - instruction mnemonic 3239 3271 * nels - number of elements (1, 2 or 4) 3240 3272 * xbn - VSR register number
+19 -15
target/ppc/gdbstub.c
··· 33 33 return 8; 34 34 case 64 ... 95: 35 35 return 16; 36 - case 64+32: /* nip */ 37 - case 65+32: /* msr */ 38 - case 67+32: /* lr */ 39 - case 68+32: /* ctr */ 40 - case 70+32: /* fpscr */ 36 + case 64 + 32: /* nip */ 37 + case 65 + 32: /* msr */ 38 + case 67 + 32: /* lr */ 39 + case 68 + 32: /* ctr */ 40 + case 70 + 32: /* fpscr */ 41 41 return 8; 42 - case 66+32: /* cr */ 43 - case 69+32: /* xer */ 42 + case 66 + 32: /* cr */ 43 + case 69 + 32: /* xer */ 44 44 return 4; 45 45 default: 46 46 return 0; ··· 84 84 } 85 85 } 86 86 87 - /* We need to present the registers to gdb in the "current" memory ordering. 88 - For user-only mode we get this for free; TARGET_WORDS_BIGENDIAN is set to 89 - the proper ordering for the binary, and cannot be changed. 90 - For system mode, TARGET_WORDS_BIGENDIAN is always set, and we must check 91 - the current mode of the chip to see if we're running in little-endian. */ 87 + /* 88 + * We need to present the registers to gdb in the "current" memory 89 + * ordering. For user-only mode we get this for free; 90 + * TARGET_WORDS_BIGENDIAN is set to the proper ordering for the 91 + * binary, and cannot be changed. For system mode, 92 + * TARGET_WORDS_BIGENDIAN is always set, and we must check the current 93 + * mode of the chip to see if we're running in little-endian. 94 + */ 92 95 void ppc_maybe_bswap_register(CPUPPCState *env, uint8_t *mem_buf, int len) 93 96 { 94 97 #ifndef CONFIG_USER_ONLY ··· 104 107 #endif 105 108 } 106 109 107 - /* Old gdb always expects FP registers. Newer (xml-aware) gdb only 110 + /* 111 + * Old gdb always expects FP registers. Newer (xml-aware) gdb only 108 112 * expects whatever the target description contains. Due to a 109 113 * historical mishap the FP registers appear in between core integer 110 - * regs and PC, MSR, CR, and so forth. We hack round this by giving the 111 - * FP regs zero size when talking to a newer gdb. 114 + * regs and PC, MSR, CR, and so forth. We hack round this by giving 115 + * the FP regs zero size when talking to a newer gdb. 112 116 */ 113 117 114 118 int ppc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
+6 -4
target/ppc/helper_regs.h
··· 44 44 45 45 static inline void hreg_compute_mem_idx(CPUPPCState *env) 46 46 { 47 - /* This is our encoding for server processors. The architecture 47 + /* 48 + * This is our encoding for server processors. The architecture 48 49 * specifies that there is no such thing as userspace with 49 - * translation off, however it appears that MacOS does it and 50 - * some 32-bit CPUs support it. Weird... 50 + * translation off, however it appears that MacOS does it and some 51 + * 32-bit CPUs support it. Weird... 51 52 * 52 53 * 0 = Guest User space virtual mode 53 54 * 1 = Guest Kernel space virtual mode ··· 143 144 /* Change the exception prefix on PowerPC 601 */ 144 145 env->excp_prefix = ((value >> MSR_EP) & 1) * 0xFFF00000; 145 146 } 146 - /* If PR=1 then EE, IR and DR must be 1 147 + /* 148 + * If PR=1 then EE, IR and DR must be 1 147 149 * 148 150 * Note: We only enforce this on 64-bit server processors. 149 151 * It appears that:
+39 -31
target/ppc/int_helper.c
··· 137 137 /* if x = 0xab, returns 0xababababababababa */ 138 138 #define pattern(x) (((x) & 0xff) * (~(target_ulong)0 / 0xff)) 139 139 140 - /* substract 1 from each byte, and with inverse, check if MSB is set at each 140 + /* 141 + * subtract 1 from each byte, and with inverse, check if MSB is set at each 141 142 * byte. 142 143 * i.e. ((0x00 - 0x01) & ~(0x00)) & 0x80 143 144 * (0xFF & 0xFF) & 0x80 = 0x80 (zero found) ··· 156 157 #undef haszero 157 158 #undef hasvalue 158 159 159 - /* Return invalid random number. 160 + /* 161 + * Return invalid random number. 160 162 * 161 163 * FIXME: Add rng backend or other mechanism to get cryptographically suitable 162 164 * random number ··· 181 183 uint64_t ra = 0; 182 184 183 185 for (i = 0; i < 8; i++) { 184 - int index = (rs >> (i*8)) & 0xFF; 186 + int index = (rs >> (i * 8)) & 0xFF; 185 187 if (index < 64) { 186 188 if (rb & PPC_BIT(index)) { 187 189 ra |= 1 << i; ··· 370 372 /* 602 specific instructions */ 371 373 /* mfrom is the most crazy instruction ever seen, imho ! */ 372 374 /* Real implementation uses a ROM table. Do the same */ 373 - /* Extremely decomposed: 375 + /* 376 + * Extremely decomposed: 374 377 * -arg / 256 375 378 * return 256 * log10(10 + 1.0) + 0.5 376 379 */ ··· 393 396 for (index = 0; index < ARRAY_SIZE(r->element); index++) 394 397 #else 395 398 #define VECTOR_FOR_INORDER_I(index, element) \ 396 - for (index = ARRAY_SIZE(r->element)-1; index >= 0; index--) 399 + for (index = ARRAY_SIZE(r->element) - 1; index >= 0; index--) 397 400 #endif 398 401 399 402 /* Saturating arithmetic helpers. */ ··· 634 637 } \ 635 638 } 636 639 637 - /* VABSDU - Vector absolute difference unsigned 640 + /* 641 + * VABSDU - Vector absolute difference unsigned 638 642 * name - instruction mnemonic suffix (b: byte, h: halfword, w: word) 639 643 * element - element type to access from vector 640 644 */ ··· 739 743 } \ 740 744 } 741 745 742 - /* VCMPNEZ - Vector compare not equal to zero 746 + /* 747 + * VCMPNEZ - Vector compare not equal to zero 743 748 * suffix - instruction mnemonic suffix (b: byte, h: halfword, w: word) 744 749 * element - element type to access from vector 745 750 */ ··· 1138 1143 #define VBPERMQ_DW(index) (((index) & 0x40) != 0) 1139 1144 #define EXTRACT_BIT(avr, i, index) (extract64((avr)->u64[i], index, 1)) 1140 1145 #else 1141 - #define VBPERMQ_INDEX(avr, i) ((avr)->u8[15-(i)]) 1146 + #define VBPERMQ_INDEX(avr, i) ((avr)->u8[15 - (i)]) 1142 1147 #define VBPERMD_INDEX(i) (1 - i) 1143 1148 #define VBPERMQ_DW(index) (((index) & 0x40) == 0) 1144 1149 #define EXTRACT_BIT(avr, i, index) \ ··· 1169 1174 int index = VBPERMQ_INDEX(b, i); 1170 1175 1171 1176 if (index < 128) { 1172 - uint64_t mask = (1ull << (63-(index & 0x3F))); 1177 + uint64_t mask = (1ull << (63 - (index & 0x3F))); 1173 1178 if (a->u64[VBPERMQ_DW(index)] & mask) { 1174 1179 perm |= (0x8000 >> i); 1175 1180 } ··· 1449 1454 1450 1455 VECTOR_FOR_INORDER_I(i, u8) { 1451 1456 #if defined(HOST_WORDS_BIGENDIAN) 1452 - t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7); 1457 + t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (i & 7); 1453 1458 #else 1454 - t[i>>3] |= VGBBD_MASKS[b->u8[i]] >> (7-(i & 7)); 1459 + t[i >> 3] |= VGBBD_MASKS[b->u8[i]] >> (7 - (i & 7)); 1455 1460 #endif 1456 1461 } 1457 1462 ··· 1463 1468 void helper_##name(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ 1464 1469 { \ 1465 1470 int i, j; \ 1466 - trgtyp prod[sizeof(ppc_avr_t)/sizeof(a->srcfld[0])]; \ 1471 + trgtyp prod[sizeof(ppc_avr_t) / sizeof(a->srcfld[0])]; \ 1467 1472 \ 1468 1473 VECTOR_FOR_INORDER_I(i, srcfld) { \ 1469 1474 prod[i] = 0; \ 1470 1475 for (j = 0; j < sizeof(a->srcfld[0]) * 8; j++) { \ 1471 - if (a->srcfld[i] & (1ull<<j)) { \ 1476 + if (a->srcfld[i] & (1ull << j)) { \ 1472 1477 prod[i] ^= ((trgtyp)b->srcfld[i] << j); \ 1473 1478 } \ 1474 1479 } \ 1475 1480 } \ 1476 1481 \ 1477 1482 VECTOR_FOR_INORDER_I(i, trgfld) { \ 1478 - r->trgfld[i] = prod[2*i] ^ prod[2*i+1]; \ 1483 + r->trgfld[i] = prod[2 * i] ^ prod[2 * i + 1]; \ 1479 1484 } \ 1480 1485 } 1481 1486 ··· 1493 1498 VECTOR_FOR_INORDER_I(i, u64) { 1494 1499 prod[i] = 0; 1495 1500 for (j = 0; j < 64; j++) { 1496 - if (a->u64[i] & (1ull<<j)) { 1501 + if (a->u64[i] & (1ull << j)) { 1497 1502 prod[i] ^= (((__uint128_t)b->u64[i]) << j); 1498 1503 } 1499 1504 } ··· 1508 1513 VECTOR_FOR_INORDER_I(i, u64) { 1509 1514 prod[i].VsrD(1) = prod[i].VsrD(0) = 0; 1510 1515 for (j = 0; j < 64; j++) { 1511 - if (a->u64[i] & (1ull<<j)) { 1516 + if (a->u64[i] & (1ull << j)) { 1512 1517 ppc_avr_t bshift; 1513 1518 if (j == 0) { 1514 1519 bshift.VsrD(0) = 0; ··· 1548 1553 VECTOR_FOR_INORDER_I(j, u32) { 1549 1554 uint32_t e = x[i]->u32[j]; 1550 1555 1551 - result.u16[4*i+j] = (((e >> 9) & 0xfc00) | 1552 - ((e >> 6) & 0x3e0) | 1553 - ((e >> 3) & 0x1f)); 1556 + result.u16[4 * i + j] = (((e >> 9) & 0xfc00) | 1557 + ((e >> 6) & 0x3e0) | 1558 + ((e >> 3) & 0x1f)); 1554 1559 } 1555 1560 } 1556 1561 *r = result; ··· 1568 1573 \ 1569 1574 VECTOR_FOR_INORDER_I(i, from) { \ 1570 1575 result.to[i] = cvt(a0->from[i], &sat); \ 1571 - result.to[i+ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat); \ 1576 + result.to[i + ARRAY_SIZE(r->from)] = cvt(a1->from[i], &sat);\ 1572 1577 } \ 1573 1578 *r = result; \ 1574 1579 if (dosat && sat) { \ ··· 1736 1741 VEXTU_X_DO(vextuwrx, 32, 0) 1737 1742 #undef VEXTU_X_DO 1738 1743 1739 - /* The specification says that the results are undefined if all of the 1740 - * shift counts are not identical. We check to make sure that they are 1741 - * to conform to what real hardware appears to do. */ 1744 + /* 1745 + * The specification says that the results are undefined if all of the 1746 + * shift counts are not identical. We check to make sure that they 1747 + * are to conform to what real hardware appears to do. 1748 + */ 1742 1749 #define VSHIFT(suffix, leftp) \ 1743 1750 void helper_vs##suffix(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) \ 1744 1751 { \ ··· 1805 1812 int i; 1806 1813 unsigned int shift, bytes; 1807 1814 1808 - /* Use reverse order, as destination and source register can be same. Its 1809 - * being modified in place saving temporary, reverse order will guarantee 1810 - * that computed result is not fed back. 1815 + /* 1816 + * Use reverse order, as destination and source register can be 1817 + * same. Its being modified in place saving temporary, reverse 1818 + * order will guarantee that computed result is not fed back. 1811 1819 */ 1812 1820 for (i = ARRAY_SIZE(r->u8) - 1; i >= 0; i--) { 1813 1821 shift = b->u8[i] & 0x7; /* extract shift value */ ··· 1840 1848 1841 1849 #if defined(HOST_WORDS_BIGENDIAN) 1842 1850 memmove(&r->u8[0], &a->u8[sh], 16 - sh); 1843 - memset(&r->u8[16-sh], 0, sh); 1851 + memset(&r->u8[16 - sh], 0, sh); 1844 1852 #else 1845 1853 memmove(&r->u8[sh], &a->u8[0], 16 - sh); 1846 1854 memset(&r->u8[0], 0, sh); ··· 2112 2120 ppc_avr_t result; \ 2113 2121 \ 2114 2122 for (i = 0; i < ARRAY_SIZE(r->u32); i++) { \ 2115 - uint16_t e = b->u16[hi ? i : i+4]; \ 2123 + uint16_t e = b->u16[hi ? i : i + 4]; \ 2116 2124 uint8_t a = (e >> 15) ? 0xff : 0; \ 2117 2125 uint8_t r = (e >> 10) & 0x1f; \ 2118 2126 uint8_t g = (e >> 5) & 0x1f; \ ··· 2463 2471 { 2464 2472 if (n & 1) { 2465 2473 bcd->u8[BCD_DIG_BYTE(n)] &= 0x0F; 2466 - bcd->u8[BCD_DIG_BYTE(n)] |= (digit<<4); 2474 + bcd->u8[BCD_DIG_BYTE(n)] |= (digit << 4); 2467 2475 } else { 2468 2476 bcd->u8[BCD_DIG_BYTE(n)] &= 0xF0; 2469 2477 bcd->u8[BCD_DIG_BYTE(n)] |= digit; ··· 3220 3228 3221 3229 for (i = 0; i < ARRAY_SIZE(r->u64); i++) { 3222 3230 if (st == 0) { 3223 - if ((six & (0x8 >> (2*i))) == 0) { 3231 + if ((six & (0x8 >> (2 * i))) == 0) { 3224 3232 r->VsrD(i) = ror64(a->VsrD(i), 1) ^ 3225 3233 ror64(a->VsrD(i), 8) ^ 3226 3234 (a->VsrD(i) >> 7); ··· 3230 3238 (a->VsrD(i) >> 6); 3231 3239 } 3232 3240 } else { /* st == 1 */ 3233 - if ((six & (0x8 >> (2*i))) == 0) { 3241 + if ((six & (0x8 >> (2 * i))) == 0) { 3234 3242 r->VsrD(i) = ror64(a->VsrD(i), 28) ^ 3235 3243 ror64(a->VsrD(i), 34) ^ 3236 3244 ror64(a->VsrD(i), 39);
+139 -105
target/ppc/kvm.c
··· 49 49 #include "elf.h" 50 50 #include "sysemu/kvm_int.h" 51 51 52 - //#define DEBUG_KVM 53 - 54 - #ifdef DEBUG_KVM 55 - #define DPRINTF(fmt, ...) \ 56 - do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) 57 - #else 58 - #define DPRINTF(fmt, ...) \ 59 - do { } while (0) 60 - #endif 61 - 62 52 #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/" 63 53 64 54 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 65 55 KVM_CAP_LAST_INFO 66 56 }; 67 57 68 - static int cap_interrupt_unset = false; 69 - static int cap_interrupt_level = false; 58 + static int cap_interrupt_unset; 59 + static int cap_interrupt_level; 70 60 static int cap_segstate; 71 61 static int cap_booke_sregs; 72 62 static int cap_ppc_smt; ··· 96 86 97 87 static uint32_t debug_inst_opcode; 98 88 99 - /* XXX We have a race condition where we actually have a level triggered 89 + /* 90 + * XXX We have a race condition where we actually have a level triggered 100 91 * interrupt, but the infrastructure can't expose that yet, so the guest 101 92 * takes but ignores it, goes to sleep and never gets notified that there's 102 93 * still an interrupt pending. ··· 114 105 qemu_cpu_kick(CPU(cpu)); 115 106 } 116 107 117 - /* Check whether we are running with KVM-PR (instead of KVM-HV). This 108 + /* 109 + * Check whether we are running with KVM-PR (instead of KVM-HV). This 118 110 * should only be used for fallback tests - generally we should use 119 111 * explicit capabilities for the features we want, rather than 120 - * assuming what is/isn't available depending on the KVM variant. */ 112 + * assuming what is/isn't available depending on the KVM variant. 113 + */ 121 114 static bool kvmppc_is_pr(KVMState *ks) 122 115 { 123 116 /* Assume KVM-PR if the GET_PVINFO capability is available */ ··· 143 136 cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR); 144 137 cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR); 145 138 cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG); 146 - /* Note: we don't set cap_papr here, because this capability is 147 - * only activated after this by kvmppc_set_papr() */ 139 + /* 140 + * Note: we don't set cap_papr here, because this capability is 141 + * only activated after this by kvmppc_set_papr() 142 + */ 148 143 cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD); 149 144 cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL); 150 145 cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT); ··· 160 155 * in KVM at this moment. 161 156 * 162 157 * TODO: call kvm_vm_check_extension() with the right capability 163 - * after the kernel starts implementing it.*/ 158 + * after the kernel starts implementing it. 159 + */ 164 160 cap_ppc_pvr_compat = false; 165 161 166 162 if (!cap_interrupt_level) { ··· 186 182 int ret; 187 183 188 184 if (cenv->excp_model == POWERPC_EXCP_BOOKE) { 189 - /* What we're really trying to say is "if we're on BookE, we use 190 - the native PVR for now". This is the only sane way to check 191 - it though, so we potentially confuse users that they can run 192 - BookE guests on BookS. Let's hope nobody dares enough :) */ 185 + /* 186 + * What we're really trying to say is "if we're on BookE, we 187 + * use the native PVR for now". This is the only sane way to 188 + * check it though, so we potentially confuse users that they 189 + * can run BookE guests on BookS. Let's hope nobody dares 190 + * enough :) 191 + */ 193 192 return 0; 194 193 } else { 195 194 if (!cap_segstate) { ··· 421 420 } 422 421 423 422 if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) { 424 - /* Mostly what guest pagesizes we can use are related to the 423 + /* 424 + * Mostly what guest pagesizes we can use are related to the 425 425 * host pages used to map guest RAM, which is handled in the 426 426 * platform code. Cache-Inhibited largepages (64k) however are 427 427 * used for I/O, so if they're mapped to the host at all it 428 428 * will be a normal mapping, not a special hugepage one used 429 - * for RAM. */ 429 + * for RAM. 430 + */ 430 431 if (getpagesize() < 0x10000) { 431 432 error_setg(errp, 432 433 "KVM can't supply 64kiB CI pages, which guest expects"); ··· 440 441 return POWERPC_CPU(cpu)->vcpu_id; 441 442 } 442 443 443 - /* e500 supports 2 h/w breakpoint and 2 watchpoint. 444 - * book3s supports only 1 watchpoint, so array size 445 - * of 4 is sufficient for now. 444 + /* 445 + * e500 supports 2 h/w breakpoint and 2 watchpoint. book3s supports 446 + * only 1 watchpoint, so array size of 4 is sufficient for now. 446 447 */ 447 448 #define MAX_HW_BKPTS 4 448 449 ··· 497 498 break; 498 499 case POWERPC_MMU_2_07: 499 500 if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) { 500 - /* KVM-HV has transactional memory on POWER8 also without the 501 - * KVM_CAP_PPC_HTM extension, so enable it here instead as 502 - * long as it's availble to userspace on the host. */ 501 + /* 502 + * KVM-HV has transactional memory on POWER8 also without 503 + * the KVM_CAP_PPC_HTM extension, so enable it here 504 + * instead as long as it's availble to userspace on the 505 + * host. 506 + */ 503 507 if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) { 504 508 cap_htm = true; 505 509 } ··· 626 630 reg.addr = (uintptr_t)&fpscr; 627 631 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg); 628 632 if (ret < 0) { 629 - DPRINTF("Unable to set FPSCR to KVM: %s\n", strerror(errno)); 633 + trace_kvm_failed_fpscr_set(strerror(errno)); 630 634 return ret; 631 635 } 632 636 ··· 647 651 648 652 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg); 649 653 if (ret < 0) { 650 - DPRINTF("Unable to set %s%d to KVM: %s\n", vsx ? "VSR" : "FPR", 651 - i, strerror(errno)); 654 + trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i, 655 + strerror(errno)); 652 656 return ret; 653 657 } 654 658 } ··· 659 663 reg.addr = (uintptr_t)&env->vscr; 660 664 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg); 661 665 if (ret < 0) { 662 - DPRINTF("Unable to set VSCR to KVM: %s\n", strerror(errno)); 666 + trace_kvm_failed_vscr_set(strerror(errno)); 663 667 return ret; 664 668 } 665 669 ··· 668 672 reg.addr = (uintptr_t)cpu_avr_ptr(env, i); 669 673 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg); 670 674 if (ret < 0) { 671 - DPRINTF("Unable to set VR%d to KVM: %s\n", i, strerror(errno)); 675 + trace_kvm_failed_vr_set(i, strerror(errno)); 672 676 return ret; 673 677 } 674 678 } ··· 693 697 reg.addr = (uintptr_t)&fpscr; 694 698 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg); 695 699 if (ret < 0) { 696 - DPRINTF("Unable to get FPSCR from KVM: %s\n", strerror(errno)); 700 + trace_kvm_failed_fpscr_get(strerror(errno)); 697 701 return ret; 698 702 } else { 699 703 env->fpscr = fpscr; ··· 709 713 710 714 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg); 711 715 if (ret < 0) { 712 - DPRINTF("Unable to get %s%d from KVM: %s\n", 713 - vsx ? "VSR" : "FPR", i, strerror(errno)); 716 + trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i, 717 + strerror(errno)); 714 718 return ret; 715 719 } else { 716 720 #ifdef HOST_WORDS_BIGENDIAN ··· 733 737 reg.addr = (uintptr_t)&env->vscr; 734 738 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg); 735 739 if (ret < 0) { 736 - DPRINTF("Unable to get VSCR from KVM: %s\n", strerror(errno)); 740 + trace_kvm_failed_vscr_get(strerror(errno)); 737 741 return ret; 738 742 } 739 743 ··· 742 746 reg.addr = (uintptr_t)cpu_avr_ptr(env, i); 743 747 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg); 744 748 if (ret < 0) { 745 - DPRINTF("Unable to get VR%d from KVM: %s\n", 746 - i, strerror(errno)); 749 + trace_kvm_failed_vr_get(i, strerror(errno)); 747 750 return ret; 748 751 } 749 752 } ··· 764 767 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr; 765 768 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg); 766 769 if (ret < 0) { 767 - DPRINTF("Unable to get VPA address from KVM: %s\n", strerror(errno)); 770 + trace_kvm_failed_vpa_addr_get(strerror(errno)); 768 771 return ret; 769 772 } 770 773 ··· 774 777 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr; 775 778 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg); 776 779 if (ret < 0) { 777 - DPRINTF("Unable to get SLB shadow state from KVM: %s\n", 778 - strerror(errno)); 780 + trace_kvm_failed_slb_get(strerror(errno)); 779 781 return ret; 780 782 } 781 783 ··· 785 787 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr; 786 788 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg); 787 789 if (ret < 0) { 788 - DPRINTF("Unable to get dispatch trace log state from KVM: %s\n", 789 - strerror(errno)); 790 + trace_kvm_failed_dtl_get(strerror(errno)); 790 791 return ret; 791 792 } 792 793 ··· 800 801 struct kvm_one_reg reg; 801 802 int ret; 802 803 803 - /* SLB shadow or DTL can't be registered unless a master VPA is 804 + /* 805 + * SLB shadow or DTL can't be registered unless a master VPA is 804 806 * registered. That means when restoring state, if a VPA *is* 805 807 * registered, we need to set that up first. If not, we need to 806 - * deregister the others before deregistering the master VPA */ 808 + * deregister the others before deregistering the master VPA 809 + */ 807 810 assert(spapr_cpu->vpa_addr 808 811 || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr)); 809 812 ··· 812 815 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr; 813 816 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg); 814 817 if (ret < 0) { 815 - DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno)); 818 + trace_kvm_failed_vpa_addr_set(strerror(errno)); 816 819 return ret; 817 820 } 818 821 } ··· 823 826 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr; 824 827 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg); 825 828 if (ret < 0) { 826 - DPRINTF("Unable to set SLB shadow state to KVM: %s\n", strerror(errno)); 829 + trace_kvm_failed_slb_set(strerror(errno)); 827 830 return ret; 828 831 } 829 832 ··· 833 836 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr; 834 837 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg); 835 838 if (ret < 0) { 836 - DPRINTF("Unable to set dispatch trace log state to KVM: %s\n", 837 - strerror(errno)); 839 + trace_kvm_failed_dtl_set(strerror(errno)); 838 840 return ret; 839 841 } 840 842 ··· 843 845 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr; 844 846 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg); 845 847 if (ret < 0) { 846 - DPRINTF("Unable to set VPA address to KVM: %s\n", strerror(errno)); 848 + trace_kvm_failed_null_vpa_addr_set(strerror(errno)); 847 849 return ret; 848 850 } 849 851 } ··· 929 931 930 932 regs.pid = env->spr[SPR_BOOKE_PID]; 931 933 932 - for (i = 0;i < 32; i++) 934 + for (i = 0; i < 32; i++) { 933 935 regs.gpr[i] = env->gpr[i]; 936 + } 934 937 935 938 regs.cr = 0; 936 939 for (i = 0; i < 8; i++) { ··· 938 941 } 939 942 940 943 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs); 941 - if (ret < 0) 944 + if (ret < 0) { 942 945 return ret; 946 + } 943 947 944 948 kvm_put_fp(cs); 945 949 ··· 962 966 if (cap_one_reg) { 963 967 int i; 964 968 965 - /* We deliberately ignore errors here, for kernels which have 969 + /* 970 + * We deliberately ignore errors here, for kernels which have 966 971 * the ONE_REG calls, but don't support the specific 967 972 * registers, there's a reasonable chance things will still 968 - * work, at least until we try to migrate. */ 973 + * work, at least until we try to migrate. 974 + */ 969 975 for (i = 0; i < 1024; i++) { 970 976 uint64_t id = env->spr_cb[i].one_reg_id; 971 977 ··· 996 1002 997 1003 if (cap_papr) { 998 1004 if (kvm_put_vpa(cs) < 0) { 999 - DPRINTF("Warning: Unable to set VPA information to KVM\n"); 1005 + trace_kvm_failed_put_vpa(); 1000 1006 } 1001 1007 } 1002 1008 ··· 1207 1213 int i, ret; 1208 1214 1209 1215 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs); 1210 - if (ret < 0) 1216 + if (ret < 0) { 1211 1217 return ret; 1218 + } 1212 1219 1213 1220 cr = regs.cr; 1214 1221 for (i = 7; i >= 0; i--) { ··· 1236 1243 1237 1244 env->spr[SPR_BOOKE_PID] = regs.pid; 1238 1245 1239 - for (i = 0;i < 32; i++) 1246 + for (i = 0; i < 32; i++) { 1240 1247 env->gpr[i] = regs.gpr[i]; 1248 + } 1241 1249 1242 1250 kvm_get_fp(cs); 1243 1251 ··· 1262 1270 if (cap_one_reg) { 1263 1271 int i; 1264 1272 1265 - /* We deliberately ignore errors here, for kernels which have 1273 + /* 1274 + * We deliberately ignore errors here, for kernels which have 1266 1275 * the ONE_REG calls, but don't support the specific 1267 1276 * registers, there's a reasonable chance things will still 1268 - * work, at least until we try to migrate. */ 1277 + * work, at least until we try to migrate. 1278 + */ 1269 1279 for (i = 0; i < 1024; i++) { 1270 1280 uint64_t id = env->spr_cb[i].one_reg_id; 1271 1281 ··· 1296 1306 1297 1307 if (cap_papr) { 1298 1308 if (kvm_get_vpa(cs) < 0) { 1299 - DPRINTF("Warning: Unable to get VPA information from KVM\n"); 1309 + trace_kvm_failed_get_vpa(); 1300 1310 } 1301 1311 } 1302 1312 ··· 1339 1349 1340 1350 qemu_mutex_lock_iothread(); 1341 1351 1342 - /* PowerPC QEMU tracks the various core input pins (interrupt, critical 1343 - * interrupt, reset, etc) in PPC-specific env->irq_input_state. */ 1352 + /* 1353 + * PowerPC QEMU tracks the various core input pins (interrupt, 1354 + * critical interrupt, reset, etc) in PPC-specific 1355 + * env->irq_input_state. 1356 + */ 1344 1357 if (!cap_interrupt_level && 1345 1358 run->ready_for_interrupt_injection && 1346 1359 (cs->interrupt_request & CPU_INTERRUPT_HARD) && 1347 - (env->irq_input_state & (1<<PPC_INPUT_INT))) 1360 + (env->irq_input_state & (1 << PPC_INPUT_INT))) 1348 1361 { 1349 - /* For now KVM disregards the 'irq' argument. However, in the 1350 - * future KVM could cache it in-kernel to avoid a heavyweight exit 1351 - * when reading the UIC. 1362 + /* 1363 + * For now KVM disregards the 'irq' argument. However, in the 1364 + * future KVM could cache it in-kernel to avoid a heavyweight 1365 + * exit when reading the UIC. 1352 1366 */ 1353 1367 irq = KVM_INTERRUPT_SET; 1354 1368 1355 - DPRINTF("injected interrupt %d\n", irq); 1369 + trace_kvm_injected_interrupt(irq); 1356 1370 r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq); 1357 1371 if (r < 0) { 1358 1372 printf("cpu %d fail inject %x\n", cs->cpu_index, irq); ··· 1363 1377 (NANOSECONDS_PER_SECOND / 50)); 1364 1378 } 1365 1379 1366 - /* We don't know if there are more interrupts pending after this. However, 1367 - * the guest will return to userspace in the course of handling this one 1368 - * anyways, so we will get a chance to deliver the rest. */ 1380 + /* 1381 + * We don't know if there are more interrupts pending after 1382 + * this. However, the guest will return to userspace in the course 1383 + * of handling this one anyways, so we will get a chance to 1384 + * deliver the rest. 1385 + */ 1369 1386 1370 1387 qemu_mutex_unlock_iothread(); 1371 1388 } ··· 1394 1411 } 1395 1412 1396 1413 /* map dcr access to existing qemu dcr emulation */ 1397 - static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data) 1414 + static int kvmppc_handle_dcr_read(CPUPPCState *env, 1415 + uint32_t dcrn, uint32_t *data) 1398 1416 { 1399 - if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) 1417 + if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) { 1400 1418 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn); 1419 + } 1401 1420 1402 1421 return 0; 1403 1422 } 1404 1423 1405 - static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data) 1424 + static int kvmppc_handle_dcr_write(CPUPPCState *env, 1425 + uint32_t dcrn, uint32_t data) 1406 1426 { 1407 - if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) 1427 + if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) { 1408 1428 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn); 1429 + } 1409 1430 1410 1431 return 0; 1411 1432 } ··· 1697 1718 switch (run->exit_reason) { 1698 1719 case KVM_EXIT_DCR: 1699 1720 if (run->dcr.is_write) { 1700 - DPRINTF("handle dcr write\n"); 1721 + trace_kvm_handle_dcr_write(); 1701 1722 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data); 1702 1723 } else { 1703 - DPRINTF("handle dcr read\n"); 1724 + trace_kvm_handle_drc_read(); 1704 1725 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data); 1705 1726 } 1706 1727 break; 1707 1728 case KVM_EXIT_HLT: 1708 - DPRINTF("handle halt\n"); 1729 + trace_kvm_handle_halt(); 1709 1730 ret = kvmppc_handle_halt(cpu); 1710 1731 break; 1711 1732 #if defined(TARGET_PPC64) 1712 1733 case KVM_EXIT_PAPR_HCALL: 1713 - DPRINTF("handle PAPR hypercall\n"); 1734 + trace_kvm_handle_papr_hcall(); 1714 1735 run->papr_hcall.ret = spapr_hypercall(cpu, 1715 1736 run->papr_hcall.nr, 1716 1737 run->papr_hcall.args); ··· 1718 1739 break; 1719 1740 #endif 1720 1741 case KVM_EXIT_EPR: 1721 - DPRINTF("handle epr\n"); 1742 + trace_kvm_handle_epr(); 1722 1743 run->epr.epr = ldl_phys(cs->as, env->mpic_iack); 1723 1744 ret = 0; 1724 1745 break; 1725 1746 case KVM_EXIT_WATCHDOG: 1726 - DPRINTF("handle watchdog expiry\n"); 1747 + trace_kvm_handle_watchdog_expiry(); 1727 1748 watchdog_perform_action(); 1728 1749 ret = 0; 1729 1750 break; 1730 1751 1731 1752 case KVM_EXIT_DEBUG: 1732 - DPRINTF("handle debug exception\n"); 1753 + trace_kvm_handle_debug_exception(); 1733 1754 if (kvm_handle_debug(cpu, run)) { 1734 1755 ret = EXCP_DEBUG; 1735 1756 break; ··· 1832 1853 ret = 0; 1833 1854 break; 1834 1855 } 1835 - } while(*line); 1856 + } while (*line); 1836 1857 1837 1858 fclose(f); 1838 1859 ··· 1849 1870 return retval; 1850 1871 } 1851 1872 1852 - if (!(ns = strchr(line, ':'))) { 1873 + ns = strchr(line, ':'); 1874 + if (!ns) { 1853 1875 return retval; 1854 1876 } 1855 1877 ··· 1875 1897 struct dirent *dirp; 1876 1898 DIR *dp; 1877 1899 1878 - if ((dp = opendir(PROC_DEVTREE_CPU)) == NULL) { 1900 + dp = opendir(PROC_DEVTREE_CPU); 1901 + if (!dp) { 1879 1902 printf("Can't open directory " PROC_DEVTREE_CPU "\n"); 1880 1903 return -1; 1881 1904 } ··· 1929 1952 return 0; 1930 1953 } 1931 1954 1932 - /* Read a CPU node property from the host device tree that's a single 1955 + /* 1956 + * Read a CPU node property from the host device tree that's a single 1933 1957 * integer (32-bit or 64-bit). Returns 0 if anything goes wrong 1934 - * (can't find or open the property, or doesn't understand the 1935 - * format) */ 1958 + * (can't find or open the property, or doesn't understand the format) 1959 + */ 1936 1960 static uint64_t kvmppc_read_int_cpu_dt(const char *propname) 1937 1961 { 1938 1962 char buf[PATH_MAX], *tmp; ··· 1991 2015 1992 2016 int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len) 1993 2017 { 1994 - uint32_t *hc = (uint32_t*)buf; 2018 + uint32_t *hc = (uint32_t *)buf; 1995 2019 struct kvm_ppc_pvinfo pvinfo; 1996 2020 1997 2021 if (!kvmppc_get_pvinfo(env, &pvinfo)) { ··· 2064 2088 exit(1); 2065 2089 } 2066 2090 2067 - /* Update the capability flag so we sync the right information 2068 - * with kvm */ 2091 + /* 2092 + * Update the capability flag so we sync the right information 2093 + * with kvm 2094 + */ 2069 2095 cap_papr = 1; 2070 2096 } 2071 2097 ··· 2133 2159 long rampagesize, best_page_shift; 2134 2160 int i; 2135 2161 2136 - /* Find the largest hardware supported page size that's less than 2137 - * or equal to the (logical) backing page size of guest RAM */ 2162 + /* 2163 + * Find the largest hardware supported page size that's less than 2164 + * or equal to the (logical) backing page size of guest RAM 2165 + */ 2138 2166 kvm_get_smmu_info(&info, &error_fatal); 2139 2167 rampagesize = qemu_minrampagesize(); 2140 2168 best_page_shift = 0; ··· 2184 2212 int fd; 2185 2213 void *table; 2186 2214 2187 - /* Must set fd to -1 so we don't try to munmap when called for 2215 + /* 2216 + * Must set fd to -1 so we don't try to munmap when called for 2188 2217 * destroying the table, which the upper layers -will- do 2189 2218 */ 2190 2219 *pfd = -1; ··· 2229 2258 len = nb_table * sizeof(uint64_t); 2230 2259 /* FIXME: round this up to page size */ 2231 2260 2232 - table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 2261 + table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 2233 2262 if (table == MAP_FAILED) { 2234 2263 fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n", 2235 2264 liobn); ··· 2272 2301 int ret; 2273 2302 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift); 2274 2303 if (ret == -ENOTTY) { 2275 - /* At least some versions of PR KVM advertise the 2304 + /* 2305 + * At least some versions of PR KVM advertise the 2276 2306 * capability, but don't implement the ioctl(). Oops. 2277 2307 * Return 0 so that we allocate the htab in qemu, as is 2278 - * correct for PR. */ 2308 + * correct for PR. 2309 + */ 2279 2310 return 0; 2280 2311 } else if (ret < 0) { 2281 2312 return ret; ··· 2283 2314 return shift; 2284 2315 } 2285 2316 2286 - /* We have a kernel that predates the htab reset calls. For PR 2317 + /* 2318 + * We have a kernel that predates the htab reset calls. For PR 2287 2319 * KVM, we need to allocate the htab ourselves, for an HV KVM of 2288 - * this era, it has allocated a 16MB fixed size hash table already. */ 2320 + * this era, it has allocated a 16MB fixed size hash table 2321 + * already. 2322 + */ 2289 2323 if (kvmppc_is_pr(kvm_state)) { 2290 2324 /* PR - tell caller to allocate htab */ 2291 2325 return 0; ··· 2667 2701 } 2668 2702 } 2669 2703 } while ((rc != 0) 2670 - && ((max_ns < 0) 2671 - || ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns))); 2704 + && ((max_ns < 0) || 2705 + ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns))); 2672 2706 2673 2707 return (rc == 0) ? 1 : 0; 2674 2708 } ··· 2677 2711 uint16_t n_valid, uint16_t n_invalid) 2678 2712 { 2679 2713 struct kvm_get_htab_header *buf; 2680 - size_t chunksize = sizeof(*buf) + n_valid*HASH_PTE_SIZE_64; 2714 + size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64; 2681 2715 ssize_t rc; 2682 2716 2683 2717 buf = alloca(chunksize); ··· 2685 2719 buf->n_valid = n_valid; 2686 2720 buf->n_invalid = n_invalid; 2687 2721 2688 - qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64*n_valid); 2722 + qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid); 2689 2723 2690 2724 rc = write(fd, buf, chunksize); 2691 2725 if (rc < 0) {
+2 -1
target/ppc/kvm_ppc.h
··· 117 117 return 0; 118 118 } 119 119 120 - static inline int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len) 120 + static inline int kvmppc_get_hypercall(CPUPPCState *env, 121 + uint8_t *buf, int buf_len) 121 122 { 122 123 return -1; 123 124 }
+63 -43
target/ppc/machine.c
··· 24 24 #endif 25 25 target_ulong xer; 26 26 27 - for (i = 0; i < 32; i++) 27 + for (i = 0; i < 32; i++) { 28 28 qemu_get_betls(f, &env->gpr[i]); 29 + } 29 30 #if !defined(TARGET_PPC64) 30 - for (i = 0; i < 32; i++) 31 + for (i = 0; i < 32; i++) { 31 32 qemu_get_betls(f, &env->gprh[i]); 33 + } 32 34 #endif 33 35 qemu_get_betls(f, &env->lr); 34 36 qemu_get_betls(f, &env->ctr); 35 - for (i = 0; i < 8; i++) 37 + for (i = 0; i < 8; i++) { 36 38 qemu_get_be32s(f, &env->crf[i]); 39 + } 37 40 qemu_get_betls(f, &xer); 38 41 cpu_write_xer(env, xer); 39 42 qemu_get_betls(f, &env->reserve_addr); 40 43 qemu_get_betls(f, &env->msr); 41 - for (i = 0; i < 4; i++) 44 + for (i = 0; i < 4; i++) { 42 45 qemu_get_betls(f, &env->tgpr[i]); 46 + } 43 47 for (i = 0; i < 32; i++) { 44 48 union { 45 49 float64 d; ··· 56 60 qemu_get_sbe32s(f, &slb_nr); 57 61 #endif 58 62 qemu_get_betls(f, &sdr1); 59 - for (i = 0; i < 32; i++) 63 + for (i = 0; i < 32; i++) { 60 64 qemu_get_betls(f, &env->sr[i]); 61 - for (i = 0; i < 2; i++) 62 - for (j = 0; j < 8; j++) 65 + } 66 + for (i = 0; i < 2; i++) { 67 + for (j = 0; j < 8; j++) { 63 68 qemu_get_betls(f, &env->DBAT[i][j]); 64 - for (i = 0; i < 2; i++) 65 - for (j = 0; j < 8; j++) 69 + } 70 + } 71 + for (i = 0; i < 2; i++) { 72 + for (j = 0; j < 8; j++) { 66 73 qemu_get_betls(f, &env->IBAT[i][j]); 74 + } 75 + } 67 76 qemu_get_sbe32s(f, &env->nb_tlb); 68 77 qemu_get_sbe32s(f, &env->tlb_per_way); 69 78 qemu_get_sbe32s(f, &env->nb_ways); ··· 71 80 qemu_get_sbe32s(f, &env->id_tlbs); 72 81 qemu_get_sbe32s(f, &env->nb_pids); 73 82 if (env->tlb.tlb6) { 74 - // XXX assumes 6xx 83 + /* XXX assumes 6xx */ 75 84 for (i = 0; i < env->nb_tlb; i++) { 76 85 qemu_get_betls(f, &env->tlb.tlb6[i].pte0); 77 86 qemu_get_betls(f, &env->tlb.tlb6[i].pte1); 78 87 qemu_get_betls(f, &env->tlb.tlb6[i].EPN); 79 88 } 80 89 } 81 - for (i = 0; i < 4; i++) 90 + for (i = 0; i < 4; i++) { 82 91 qemu_get_betls(f, &env->pb[i]); 83 - for (i = 0; i < 1024; i++) 92 + } 93 + for (i = 0; i < 1024; i++) { 84 94 qemu_get_betls(f, &env->spr[i]); 95 + } 85 96 if (!cpu->vhyp) { 86 97 ppc_store_sdr1(env, sdr1); 87 98 } ··· 94 105 qemu_get_sbe32s(f, &env->error_code); 95 106 qemu_get_be32s(f, &env->pending_interrupts); 96 107 qemu_get_be32s(f, &env->irq_input_state); 97 - for (i = 0; i < POWERPC_EXCP_NB; i++) 108 + for (i = 0; i < POWERPC_EXCP_NB; i++) { 98 109 qemu_get_betls(f, &env->excp_vectors[i]); 110 + } 99 111 qemu_get_betls(f, &env->excp_prefix); 100 112 qemu_get_betls(f, &env->ivor_mask); 101 113 qemu_get_betls(f, &env->ivpr_mask); ··· 253 265 env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; 254 266 255 267 for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { 256 - env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i]; 257 - env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i]; 258 - env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i]; 259 - env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i]; 268 + env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i]; 269 + env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i]; 270 + env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i]; 271 + env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i]; 260 272 } 261 - for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { 262 - env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4]; 263 - env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4]; 264 - env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4]; 265 - env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4]; 273 + for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { 274 + env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4]; 275 + env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4]; 276 + env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4]; 277 + env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4]; 266 278 } 267 279 268 280 /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ 269 281 if (cpu->pre_2_8_migration) { 270 - /* Mask out bits that got added to msr_mask since the versions 271 - * which stupidly included it in the migration stream. */ 282 + /* 283 + * Mask out bits that got added to msr_mask since the versions 284 + * which stupidly included it in the migration stream. 285 + */ 272 286 target_ulong metamask = 0 273 287 #if defined(TARGET_PPC64) 274 288 | (1ULL << MSR_TS0) ··· 277 291 ; 278 292 cpu->mig_msr_mask = env->msr_mask & ~metamask; 279 293 cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; 280 - /* CPU models supported by old machines all have PPC_MEM_TLBIE, 281 - * so we set it unconditionally to allow backward migration from 282 - * a POWER9 host to a POWER8 host. 294 + /* 295 + * CPU models supported by old machines all have 296 + * PPC_MEM_TLBIE, so we set it unconditionally to allow 297 + * backward migration from a POWER9 host to a POWER8 host. 283 298 */ 284 299 cpu->mig_insns_flags |= PPC_MEM_TLBIE; 285 300 cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; ··· 379 394 env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; 380 395 381 396 for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { 382 - env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i]; 383 - env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1]; 384 - env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i]; 385 - env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1]; 397 + env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i]; 398 + env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1]; 399 + env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i]; 400 + env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1]; 386 401 } 387 - for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { 388 - env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i]; 389 - env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1]; 390 - env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i]; 391 - env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1]; 402 + for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { 403 + env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i]; 404 + env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1]; 405 + env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i]; 406 + env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1]; 392 407 } 393 408 394 409 if (!cpu->vhyp) { 395 410 ppc_store_sdr1(env, env->spr[SPR_SDR1]); 396 411 } 397 412 398 - /* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB before restoring */ 413 + /* 414 + * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB 415 + * before restoring 416 + */ 399 417 msr = env->msr; 400 418 env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); 401 419 ppc_store_msr(env, msr); ··· 409 427 { 410 428 PowerPCCPU *cpu = opaque; 411 429 412 - return (cpu->env.insns_flags & PPC_FLOAT); 430 + return cpu->env.insns_flags & PPC_FLOAT; 413 431 } 414 432 415 433 static const VMStateDescription vmstate_fpu = { ··· 428 446 { 429 447 PowerPCCPU *cpu = opaque; 430 448 431 - return (cpu->env.insns_flags & PPC_ALTIVEC); 449 + return cpu->env.insns_flags & PPC_ALTIVEC; 432 450 } 433 451 434 452 static int get_vscr(QEMUFile *f, void *opaque, size_t size, ··· 483 501 { 484 502 PowerPCCPU *cpu = opaque; 485 503 486 - return (cpu->env.insns_flags2 & PPC2_VSX); 504 + return cpu->env.insns_flags2 & PPC2_VSX; 487 505 } 488 506 489 507 static const VMStateDescription vmstate_vsx = { ··· 591 609 PowerPCCPU *cpu = opaque; 592 610 593 611 /* We don't support any of the old segment table based 64-bit CPUs */ 594 - return (cpu->env.mmu_model & POWERPC_MMU_64); 612 + return cpu->env.mmu_model & POWERPC_MMU_64; 595 613 } 596 614 597 615 static int slb_post_load(void *opaque, int version_id) ··· 600 618 CPUPPCState *env = &cpu->env; 601 619 int i; 602 620 603 - /* We've pulled in the raw esid and vsid values from the migration 604 - * stream, but we need to recompute the page size pointers */ 621 + /* 622 + * We've pulled in the raw esid and vsid values from the migration 623 + * stream, but we need to recompute the page size pointers 624 + */ 605 625 for (i = 0; i < cpu->hash64_opts->slb_size; i++) { 606 626 if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) { 607 627 /* Migration source had bad values in its SLB */
+19 -14
target/ppc/mem_helper.c
··· 27 27 #include "internal.h" 28 28 #include "qemu/atomic128.h" 29 29 30 - //#define DEBUG_OP 30 + /* #define DEBUG_OP */ 31 31 32 32 static inline bool needs_byteswap(const CPUPPCState *env) 33 33 { ··· 103 103 do_lsw(env, addr, nb, reg, GETPC()); 104 104 } 105 105 106 - /* PPC32 specification says we must generate an exception if 107 - * rA is in the range of registers to be loaded. 108 - * In an other hand, IBM says this is valid, but rA won't be loaded. 109 - * For now, I'll follow the spec... 106 + /* 107 + * PPC32 specification says we must generate an exception if rA is in 108 + * the range of registers to be loaded. In an other hand, IBM says 109 + * this is valid, but rA won't be loaded. For now, I'll follow the 110 + * spec... 110 111 */ 111 112 void helper_lswx(CPUPPCState *env, target_ulong addr, uint32_t reg, 112 113 uint32_t ra, uint32_t rb) ··· 199 200 void helper_icbi(CPUPPCState *env, target_ulong addr) 200 201 { 201 202 addr &= ~(env->dcache_line_size - 1); 202 - /* Invalidate one cache line : 203 + /* 204 + * Invalidate one cache line : 203 205 * PowerPC specification says this is to be treated like a load 204 206 * (not a fetch) by the MMU. To be sure it will be so, 205 207 * do the load "by hand". ··· 346 348 #define LO_IDX 0 347 349 #endif 348 350 349 - /* We use msr_le to determine index ordering in a vector. However, 350 - byteswapping is not simply controlled by msr_le. We also need to take 351 - into account endianness of the target. This is done for the little-endian 352 - PPC64 user-mode target. */ 351 + /* 352 + * We use msr_le to determine index ordering in a vector. However, 353 + * byteswapping is not simply controlled by msr_le. We also need to 354 + * take into account endianness of the target. This is done for the 355 + * little-endian PPC64 user-mode target. 356 + */ 353 357 354 358 #define LVE(name, access, swap, element) \ 355 359 void helper_##name(CPUPPCState *env, ppc_avr_t *r, \ 356 360 target_ulong addr) \ 357 361 { \ 358 362 size_t n_elems = ARRAY_SIZE(r->element); \ 359 - int adjust = HI_IDX*(n_elems - 1); \ 363 + int adjust = HI_IDX * (n_elems - 1); \ 360 364 int sh = sizeof(r->element[0]) >> 1; \ 361 365 int index = (addr & 0xf) >> sh; \ 362 366 if (msr_le) { \ ··· 476 480 477 481 void helper_tbegin(CPUPPCState *env) 478 482 { 479 - /* As a degenerate implementation, always fail tbegin. The reason 483 + /* 484 + * As a degenerate implementation, always fail tbegin. The reason 480 485 * given is "Nesting overflow". The "persistent" bit is set, 481 486 * providing a hint to the error handler to not retry. The TFIAR 482 487 * captures the address of the failure, which is this tbegin 483 - * instruction. Instruction execution will continue with the 484 - * next instruction in memory, which is precisely what we want. 488 + * instruction. Instruction execution will continue with the next 489 + * instruction in memory, which is precisely what we want. 485 490 */ 486 491 487 492 env->spr[SPR_TEXASR] =
+1 -2
target/ppc/mfrom_table.inc.c
··· 1 - static const uint8_t mfrom_ROM_table[602] = 2 - { 1 + static const uint8_t mfrom_ROM_table[602] = { 3 2 77, 77, 76, 76, 75, 75, 74, 74, 4 3 73, 73, 72, 72, 71, 71, 70, 70, 5 4 69, 69, 68, 68, 68, 67, 67, 66,
+5 -3
target/ppc/mfrom_table_gen.c
··· 2 2 #include "qemu/osdep.h" 3 3 #include <math.h> 4 4 5 - int main (void) 5 + int main(void) 6 6 { 7 7 double d; 8 8 uint8_t n; ··· 10 10 11 11 printf("static const uint8_t mfrom_ROM_table[602] =\n{\n "); 12 12 for (i = 0; i < 602; i++) { 13 - /* Extremely decomposed: 13 + /* 14 + * Extremely decomposed: 14 15 * -T0 / 256 15 16 * T0 = 256 * log10(10 + 1.0) + 0.5 16 17 */ ··· 23 24 d += 0.5; 24 25 n = d; 25 26 printf("%3d, ", n); 26 - if ((i & 7) == 7) 27 + if ((i & 7) == 7) { 27 28 printf("\n "); 29 + } 28 30 } 29 31 printf("\n};\n"); 30 32
+5 -4
target/ppc/misc_helper.c
··· 210 210 hreg_store_msr(env, value, 0); 211 211 } 212 212 213 - /* This code is lifted from MacOnLinux. It is called whenever 214 - * THRM1,2 or 3 is read an fixes up the values in such a way 215 - * that will make MacOS not hang. These registers exist on some 216 - * 75x and 74xx processors. 213 + /* 214 + * This code is lifted from MacOnLinux. It is called whenever THRM1,2 215 + * or 3 is read an fixes up the values in such a way that will make 216 + * MacOS not hang. These registers exist on some 75x and 74xx 217 + * processors. 217 218 */ 218 219 void helper_fixup_thrm(CPUPPCState *env) 219 220 {
+41 -18
target/ppc/mmu-hash32.c
··· 27 27 #include "mmu-hash32.h" 28 28 #include "exec/log.h" 29 29 30 - //#define DEBUG_BAT 30 + /* #define DEBUG_BAT */ 31 31 32 32 #ifdef DEBUG_BATS 33 33 # define LOG_BATS(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) ··· 228 228 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 229 229 230 230 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 231 - /* Memory-forced I/O controller interface access */ 232 - /* If T=1 and BUID=x'07F', the 601 performs a memory access 231 + /* 232 + * Memory-forced I/O controller interface access 233 + * 234 + * If T=1 and BUID=x'07F', the 601 performs a memory access 233 235 * to SR[28-31] LA[4-31], bypassing all protection mechanisms. 234 236 */ 235 237 *raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); ··· 265 267 } 266 268 return 1; 267 269 case ACCESS_CACHE: 268 - /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ 269 - /* Should make the instruction do no-op. 270 - * As it already do no-op, it's quite easy :-) 270 + /* 271 + * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 272 + * 273 + * Should make the instruction do no-op. As it already do 274 + * no-op, it's quite easy :-) 271 275 */ 272 276 *raddr = eaddr; 273 277 return 0; ··· 341 345 return -1; 342 346 } 343 347 348 + static void ppc_hash32_set_r(PowerPCCPU *cpu, hwaddr pte_offset, uint32_t pte1) 349 + { 350 + target_ulong base = ppc_hash32_hpt_base(cpu); 351 + hwaddr offset = pte_offset + 6; 352 + 353 + /* The HW performs a non-atomic byte update */ 354 + stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 355 + } 356 + 357 + static void ppc_hash32_set_c(PowerPCCPU *cpu, hwaddr pte_offset, uint64_t pte1) 358 + { 359 + target_ulong base = ppc_hash32_hpt_base(cpu); 360 + hwaddr offset = pte_offset + 7; 361 + 362 + /* The HW performs a non-atomic byte update */ 363 + stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 364 + } 365 + 344 366 static hwaddr ppc_hash32_htab_lookup(PowerPCCPU *cpu, 345 367 target_ulong sr, target_ulong eaddr, 346 368 ppc_hash_pte32_t *pte) ··· 399 421 hwaddr pte_offset; 400 422 ppc_hash_pte32_t pte; 401 423 int prot; 402 - uint32_t new_pte1; 403 424 const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; 404 425 hwaddr raddr; 405 426 ··· 515 536 516 537 /* 8. Update PTE referenced and changed bits if necessary */ 517 538 518 - new_pte1 = pte.pte1 | HPTE32_R_R; /* set referenced bit */ 519 - if (rwx == 1) { 520 - new_pte1 |= HPTE32_R_C; /* set changed (dirty) bit */ 521 - } else { 522 - /* Treat the page as read-only for now, so that a later write 523 - * will pass through this function again to set the C bit */ 524 - prot &= ~PAGE_WRITE; 525 - } 526 - 527 - if (new_pte1 != pte.pte1) { 528 - ppc_hash32_store_hpte1(cpu, pte_offset, new_pte1); 539 + if (!(pte.pte1 & HPTE32_R_R)) { 540 + ppc_hash32_set_r(cpu, pte_offset, pte.pte1); 529 541 } 542 + if (!(pte.pte1 & HPTE32_R_C)) { 543 + if (rwx == 1) { 544 + ppc_hash32_set_c(cpu, pte_offset, pte.pte1); 545 + } else { 546 + /* 547 + * Treat the page as read-only for now, so that a later write 548 + * will pass through this function again to set the C bit 549 + */ 550 + prot &= ~PAGE_WRITE; 551 + } 552 + } 530 553 531 554 /* 9. Determine the real address from the PTE */ 532 555
+83 -53
target/ppc/mmu-hash64.c
··· 30 30 #include "hw/hw.h" 31 31 #include "mmu-book3s-v3.h" 32 32 33 - //#define DEBUG_SLB 33 + /* #define DEBUG_SLB */ 34 34 35 35 #ifdef DEBUG_SLB 36 36 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__) ··· 58 58 59 59 LOG_SLB("%s: slot %d %016" PRIx64 " %016" 60 60 PRIx64 "\n", __func__, n, slb->esid, slb->vsid); 61 - /* We check for 1T matches on all MMUs here - if the MMU 61 + /* 62 + * We check for 1T matches on all MMUs here - if the MMU 62 63 * doesn't have 1T segment support, we will have prevented 1T 63 - * entries from being inserted in the slbmte code. */ 64 + * entries from being inserted in the slbmte code. 65 + */ 64 66 if (((slb->esid == esid_256M) && 65 67 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M)) 66 68 || ((slb->esid == esid_1T) && ··· 103 105 104 106 if (slb->esid & SLB_ESID_V) { 105 107 slb->esid &= ~SLB_ESID_V; 106 - /* XXX: given the fact that segment size is 256 MB or 1TB, 108 + /* 109 + * XXX: given the fact that segment size is 256 MB or 1TB, 107 110 * and we still don't have a tlb_flush_mask(env, n, mask) 108 111 * in QEMU, we just invalidate all TLBs 109 112 */ ··· 126 129 if (slb->esid & SLB_ESID_V) { 127 130 slb->esid &= ~SLB_ESID_V; 128 131 129 - /* XXX: given the fact that segment size is 256 MB or 1TB, 132 + /* 133 + * XXX: given the fact that segment size is 256 MB or 1TB, 130 134 * and we still don't have a tlb_flush_mask(env, n, mask) 131 135 * in QEMU, we just invalidate all TLBs 132 136 */ ··· 306 310 { 307 311 CPUPPCState *env = &cpu->env; 308 312 unsigned pp, key; 309 - /* Some pp bit combinations have undefined behaviour, so default 310 - * to no access in those cases */ 313 + /* 314 + * Some pp bit combinations have undefined behaviour, so default 315 + * to no access in those cases 316 + */ 311 317 int prot = 0; 312 318 313 319 key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP) ··· 376 382 } 377 383 378 384 key = HPTE64_R_KEY(pte.pte1); 379 - amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3; 385 + amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3; 380 386 381 387 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */ 382 388 /* env->spr[SPR_AMR]); */ ··· 547 553 if (*pshift == 0) { 548 554 continue; 549 555 } 550 - /* We don't do anything with pshift yet as qemu TLB only deals 551 - * with 4K pages anyway 556 + /* 557 + * We don't do anything with pshift yet as qemu TLB only 558 + * deals with 4K pages anyway 552 559 */ 553 560 pte->pte0 = pte0; 554 561 pte->pte1 = pte1; ··· 572 579 uint64_t vsid, epnmask, epn, ptem; 573 580 const PPCHash64SegmentPageSizes *sps = slb->sps; 574 581 575 - /* The SLB store path should prevent any bad page size encodings 576 - * getting in there, so: */ 582 + /* 583 + * The SLB store path should prevent any bad page size encodings 584 + * getting in there, so: 585 + */ 577 586 assert(sps); 578 587 579 588 /* If ISL is set in LPCR we need to clamp the page size to 4K */ ··· 716 725 } 717 726 718 727 728 + static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 729 + { 730 + hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16; 731 + 732 + if (cpu->vhyp) { 733 + PPCVirtualHypervisorClass *vhc = 734 + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 735 + vhc->hpte_set_r(cpu->vhyp, ptex, pte1); 736 + return; 737 + } 738 + base = ppc_hash64_hpt_base(cpu); 739 + 740 + 741 + /* The HW performs a non-atomic byte update */ 742 + stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01); 743 + } 744 + 745 + static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) 746 + { 747 + hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15; 748 + 749 + if (cpu->vhyp) { 750 + PPCVirtualHypervisorClass *vhc = 751 + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 752 + vhc->hpte_set_c(cpu->vhyp, ptex, pte1); 753 + return; 754 + } 755 + base = ppc_hash64_hpt_base(cpu); 756 + 757 + /* The HW performs a non-atomic byte update */ 758 + stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80); 759 + } 760 + 719 761 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, 720 762 int rwx, int mmu_idx) 721 763 { ··· 726 768 hwaddr ptex; 727 769 ppc_hash_pte64_t pte; 728 770 int exec_prot, pp_prot, amr_prot, prot; 729 - uint64_t new_pte1; 730 771 const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC}; 731 772 hwaddr raddr; 732 773 733 774 assert((rwx == 0) || (rwx == 1) || (rwx == 2)); 734 775 735 - /* Note on LPCR usage: 970 uses HID4, but our special variant 736 - * of store_spr copies relevant fields into env->spr[SPR_LPCR]. 737 - * Similarily we filter unimplemented bits when storing into 738 - * LPCR depending on the MMU version. This code can thus just 739 - * use the LPCR "as-is". 776 + /* 777 + * Note on LPCR usage: 970 uses HID4, but our special variant of 778 + * store_spr copies relevant fields into env->spr[SPR_LPCR]. 779 + * Similarily we filter unimplemented bits when storing into LPCR 780 + * depending on the MMU version. This code can thus just use the 781 + * LPCR "as-is". 740 782 */ 741 783 742 784 /* 1. Handle real mode accesses */ 743 785 if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { 744 - /* Translation is supposedly "off" */ 745 - /* In real mode the top 4 effective address bits are (mostly) ignored */ 786 + /* 787 + * Translation is supposedly "off", but in real mode the top 4 788 + * effective address bits are (mostly) ignored 789 + */ 746 790 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 747 791 748 792 /* In HV mode, add HRMOR if top EA bit is clear */ ··· 871 915 872 916 /* 6. Update PTE referenced and changed bits if necessary */ 873 917 874 - new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */ 875 - if (rwx == 1) { 876 - new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */ 877 - } else { 878 - /* Treat the page as read-only for now, so that a later write 879 - * will pass through this function again to set the C bit */ 880 - prot &= ~PAGE_WRITE; 918 + if (!(pte.pte1 & HPTE64_R_R)) { 919 + ppc_hash64_set_r(cpu, ptex, pte.pte1); 881 920 } 882 - 883 - if (new_pte1 != pte.pte1) { 884 - ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1); 921 + if (!(pte.pte1 & HPTE64_R_C)) { 922 + if (rwx == 1) { 923 + ppc_hash64_set_c(cpu, ptex, pte.pte1); 924 + } else { 925 + /* 926 + * Treat the page as read-only for now, so that a later write 927 + * will pass through this function again to set the C bit 928 + */ 929 + prot &= ~PAGE_WRITE; 930 + } 885 931 } 886 932 887 933 /* 7. Determine the real address from the PTE */ ··· 940 986 & TARGET_PAGE_MASK; 941 987 } 942 988 943 - void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 944 - uint64_t pte0, uint64_t pte1) 945 - { 946 - hwaddr base; 947 - hwaddr offset = ptex * HASH_PTE_SIZE_64; 948 - 949 - if (cpu->vhyp) { 950 - PPCVirtualHypervisorClass *vhc = 951 - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); 952 - vhc->store_hpte(cpu->vhyp, ptex, pte0, pte1); 953 - return; 954 - } 955 - base = ppc_hash64_hpt_base(cpu); 956 - 957 - stq_phys(CPU(cpu)->as, base + offset, pte0); 958 - stq_phys(CPU(cpu)->as, base + offset + HASH_PTE_SIZE_64 / 2, pte1); 959 - } 960 - 961 989 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex, 962 990 target_ulong pte0, target_ulong pte1) 963 991 { ··· 1023 1051 return; 1024 1052 } 1025 1053 1026 - /* Make one up. Mostly ignore the ESID which will not be 1027 - * needed for translation 1054 + /* 1055 + * Make one up. Mostly ignore the ESID which will not be needed 1056 + * for translation 1028 1057 */ 1029 1058 vsid = SLB_VSID_VRMA; 1030 1059 vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT; ··· 1080 1109 } 1081 1110 env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26; 1082 1111 1083 - /* XXX We could also write LPID from HID4 here 1112 + /* 1113 + * XXX We could also write LPID from HID4 here 1084 1114 * but since we don't tag any translation on it 1085 1115 * it doesn't actually matter 1086 - */ 1087 - /* XXX For proper emulation of 970 we also need 1116 + * 1117 + * XXX For proper emulation of 970 we also need 1088 1118 * to dig HRMOR out of HID5 1089 1119 */ 1090 1120 break;
-2
target/ppc/mmu-hash64.h
··· 10 10 hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr); 11 11 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw, 12 12 int mmu_idx); 13 - void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex, 14 - uint64_t pte0, uint64_t pte1); 15 13 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, 16 14 target_ulong pte_index, 17 15 target_ulong pte0, target_ulong pte1);
+13 -3
target/ppc/mmu-radix64.c
··· 228 228 ppc_v3_pate_t pate; 229 229 230 230 assert((rwx == 0) || (rwx == 1) || (rwx == 2)); 231 - assert(ppc64_use_proc_tbl(cpu)); 232 231 233 - /* Real Mode Access */ 234 - if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { 232 + /* HV or virtual hypervisor Real Mode Access */ 233 + if ((msr_hv || cpu->vhyp) && 234 + (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0)))) { 235 235 /* In real mode top 4 effective addr bits (mostly) ignored */ 236 236 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; 237 237 ··· 239 239 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, 240 240 TARGET_PAGE_SIZE); 241 241 return 0; 242 + } 243 + 244 + /* 245 + * Check UPRT (we avoid the check in real mode to deal with 246 + * transitional states during kexec. 247 + */ 248 + if (!ppc64_use_proc_tbl(cpu)) { 249 + qemu_log_mask(LOG_GUEST_ERROR, 250 + "LPCR:UPRT not set in radix mode ! LPCR=" 251 + TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); 242 252 } 243 253 244 254 /* Virtual Mode Access - get the fully qualified address */
+91 -53
target/ppc/mmu_helper.c
··· 33 33 #include "mmu-book3s-v3.h" 34 34 #include "mmu-radix64.h" 35 35 36 - //#define DEBUG_MMU 37 - //#define DEBUG_BATS 38 - //#define DEBUG_SOFTWARE_TLB 39 - //#define DUMP_PAGE_TABLES 40 - //#define FLUSH_ALL_TLBS 36 + /* #define DEBUG_MMU */ 37 + /* #define DEBUG_BATS */ 38 + /* #define DEBUG_SOFTWARE_TLB */ 39 + /* #define DUMP_PAGE_TABLES */ 40 + /* #define FLUSH_ALL_TLBS */ 41 41 42 42 #ifdef DEBUG_MMU 43 43 # define LOG_MMU_STATE(cpu) log_cpu_state_mask(CPU_LOG_MMU, (cpu), 0) ··· 152 152 } 153 153 154 154 static inline int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, 155 - target_ulong pte1, int h, int rw, int type) 155 + target_ulong pte1, int h, 156 + int rw, int type) 156 157 { 157 158 target_ulong ptem, mmask; 158 159 int access, ret, pteh, ptev, pp; ··· 332 333 pte_is_valid(tlb->pte0) ? "valid" : "inval", 333 334 tlb->EPN, eaddr, tlb->pte1, 334 335 rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D'); 335 - switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 0, rw, access_type)) { 336 + switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 337 + 0, rw, access_type)) { 336 338 case -3: 337 339 /* TLB inconsistency */ 338 340 return -1; ··· 347 349 break; 348 350 case 0: 349 351 /* access granted */ 350 - /* XXX: we should go on looping to check all TLBs consistency 351 - * but we can speed-up the whole thing as the 352 - * result would be undefined if TLBs are not consistent. 352 + /* 353 + * XXX: we should go on looping to check all TLBs 354 + * consistency but we can speed-up the whole thing as 355 + * the result would be undefined if TLBs are not 356 + * consistent. 353 357 */ 354 358 ret = 0; 355 359 best = nr; ··· 550 554 qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); 551 555 /* Direct-store segment : absolutely *BUGGY* for now */ 552 556 553 - /* Direct-store implies a 32-bit MMU. 557 + /* 558 + * Direct-store implies a 32-bit MMU. 554 559 * Check the Segment Register's bus unit ID (BUID). 555 560 */ 556 561 sr = env->sr[eaddr >> 28]; 557 562 if ((sr & 0x1FF00000) >> 20 == 0x07f) { 558 - /* Memory-forced I/O controller interface access */ 559 - /* If T=1 and BUID=x'07F', the 601 performs a memory access 560 - * to SR[28-31] LA[4-31], bypassing all protection mechanisms. 563 + /* 564 + * Memory-forced I/O controller interface access 565 + * 566 + * If T=1 and BUID=x'07F', the 601 performs a memory 567 + * access to SR[28-31] LA[4-31], bypassing all protection 568 + * mechanisms. 561 569 */ 562 570 ctx->raddr = ((sr & 0xF) << 28) | (eaddr & 0x0FFFFFFF); 563 571 ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; ··· 578 586 /* lwarx, ldarx or srwcx. */ 579 587 return -4; 580 588 case ACCESS_CACHE: 581 - /* dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi */ 582 - /* Should make the instruction do no-op. 583 - * As it already do no-op, it's quite easy :-) 589 + /* 590 + * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi 591 + * 592 + * Should make the instruction do no-op. As it already do 593 + * no-op, it's quite easy :-) 584 594 */ 585 595 ctx->raddr = eaddr; 586 596 return 0; ··· 942 952 return esr; 943 953 } 944 954 945 - /* Get EPID register given the mmu_idx. If this is regular load, 946 - * construct the EPID access bits from current processor state */ 947 - 948 - /* Get the effective AS and PR bits and the PID. The PID is returned only if 949 - * EPID load is requested, otherwise the caller must detect the correct EPID. 950 - * Return true if valid EPID is returned. */ 955 + /* 956 + * Get EPID register given the mmu_idx. If this is regular load, 957 + * construct the EPID access bits from current processor state 958 + * 959 + * Get the effective AS and PR bits and the PID. The PID is returned 960 + * only if EPID load is requested, otherwise the caller must detect 961 + * the correct EPID. Return true if valid EPID is returned. 962 + */ 951 963 static bool mmubooke206_get_as(CPUPPCState *env, 952 964 int mmu_idx, uint32_t *epid_out, 953 965 bool *as_out, bool *pr_out) ··· 1369 1381 1370 1382 case POWERPC_MMU_SOFT_4xx_Z: 1371 1383 if (unlikely(msr_pe != 0)) { 1372 - /* 403 family add some particular protections, 1373 - * using PBL/PBU registers for accesses with no translation. 1384 + /* 1385 + * 403 family add some particular protections, using 1386 + * PBL/PBU registers for accesses with no translation. 1374 1387 */ 1375 1388 in_plb = 1376 1389 /* Check PLB validity */ ··· 1453 1466 if (real_mode) { 1454 1467 ret = check_physical(env, ctx, eaddr, rw); 1455 1468 } else { 1456 - cpu_abort(CPU(cpu), "PowerPC in real mode do not do any translation\n"); 1469 + cpu_abort(CPU(cpu), 1470 + "PowerPC in real mode do not do any translation\n"); 1457 1471 } 1458 1472 return -1; 1459 1473 default: ··· 1498 1512 1499 1513 if (unlikely(get_physical_address(env, &ctx, addr, 0, ACCESS_INT) != 0)) { 1500 1514 1501 - /* Some MMUs have separate TLBs for code and data. If we only try an 1502 - * ACCESS_INT, we may not be able to read instructions mapped by code 1503 - * TLBs, so we also try a ACCESS_CODE. 1515 + /* 1516 + * Some MMUs have separate TLBs for code and data. If we only 1517 + * try an ACCESS_INT, we may not be able to read instructions 1518 + * mapped by code TLBs, so we also try a ACCESS_CODE. 1504 1519 */ 1505 1520 if (unlikely(get_physical_address(env, &ctx, addr, 0, 1506 1521 ACCESS_CODE) != 0)) { ··· 1805 1820 1806 1821 base = BATu & ~0x0001FFFF; 1807 1822 end = base + mask + 0x00020000; 1823 + if (((end - base) >> TARGET_PAGE_BITS) > 1024) { 1824 + /* Flushing 1024 4K pages is slower than a complete flush */ 1825 + LOG_BATS("Flush all BATs\n"); 1826 + tlb_flush(CPU(cs)); 1827 + LOG_BATS("Flush done\n"); 1828 + return; 1829 + } 1808 1830 LOG_BATS("Flush BAT from " TARGET_FMT_lx " to " TARGET_FMT_lx " (" 1809 1831 TARGET_FMT_lx ")\n", base, end, mask); 1810 1832 for (page = base; page != end; page += TARGET_PAGE_SIZE) { ··· 1834 1856 #if !defined(FLUSH_ALL_TLBS) 1835 1857 do_invalidate_BAT(env, env->IBAT[0][nr], mask); 1836 1858 #endif 1837 - /* When storing valid upper BAT, mask BEPI and BRPN 1838 - * and invalidate all TLBs covered by this BAT 1859 + /* 1860 + * When storing valid upper BAT, mask BEPI and BRPN and 1861 + * invalidate all TLBs covered by this BAT 1839 1862 */ 1840 1863 mask = (value << 15) & 0x0FFE0000UL; 1841 1864 env->IBAT[0][nr] = (value & 0x00001FFFUL) | ··· 1865 1888 1866 1889 dump_store_bat(env, 'D', 0, nr, value); 1867 1890 if (env->DBAT[0][nr] != value) { 1868 - /* When storing valid upper BAT, mask BEPI and BRPN 1869 - * and invalidate all TLBs covered by this BAT 1891 + /* 1892 + * When storing valid upper BAT, mask BEPI and BRPN and 1893 + * invalidate all TLBs covered by this BAT 1870 1894 */ 1871 1895 mask = (value << 15) & 0x0FFE0000UL; 1872 1896 #if !defined(FLUSH_ALL_TLBS) ··· 1913 1937 do_inval = 1; 1914 1938 #endif 1915 1939 } 1916 - /* When storing valid upper BAT, mask BEPI and BRPN 1917 - * and invalidate all TLBs covered by this BAT 1940 + /* 1941 + * When storing valid upper BAT, mask BEPI and BRPN and 1942 + * invalidate all TLBs covered by this BAT 1918 1943 */ 1919 1944 env->IBAT[0][nr] = (value & 0x00001FFFUL) | 1920 1945 (value & ~0x0001FFFFUL & ~mask); ··· 2027 2052 #if defined(TARGET_PPC64) 2028 2053 if (env->mmu_model & POWERPC_MMU_64) { 2029 2054 /* tlbie invalidate TLBs for all segments */ 2030 - /* XXX: given the fact that there are too many segments to invalidate, 2055 + /* 2056 + * XXX: given the fact that there are too many segments to invalidate, 2031 2057 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU, 2032 2058 * we just invalidate all TLBs 2033 2059 */ ··· 2044 2070 break; 2045 2071 case POWERPC_MMU_32B: 2046 2072 case POWERPC_MMU_601: 2047 - /* Actual CPUs invalidate entire congruence classes based on the 2048 - * geometry of their TLBs and some OSes take that into account, 2049 - * we just mark the TLB to be flushed later (context synchronizing 2050 - * event or sync instruction on 32-bit). 2073 + /* 2074 + * Actual CPUs invalidate entire congruence classes based on 2075 + * the geometry of their TLBs and some OSes take that into 2076 + * account, we just mark the TLB to be flushed later (context 2077 + * synchronizing event or sync instruction on 32-bit). 2051 2078 */ 2052 2079 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH; 2053 2080 break; ··· 2152 2179 #endif 2153 2180 if (env->sr[srnum] != value) { 2154 2181 env->sr[srnum] = value; 2155 - /* Invalidating 256MB of virtual memory in 4kB pages is way longer than 2156 - flusing the whole TLB. */ 2182 + /* 2183 + * Invalidating 256MB of virtual memory in 4kB pages is way 2184 + * longer than flusing the whole TLB. 2185 + */ 2157 2186 #if !defined(FLUSH_ALL_TLBS) && 0 2158 2187 { 2159 2188 target_ulong page, end; ··· 2264 2293 int nb_BATs; 2265 2294 target_ulong ret = 0; 2266 2295 2267 - /* We don't have to generate many instances of this instruction, 2296 + /* 2297 + * We don't have to generate many instances of this instruction, 2268 2298 * as rac is supervisor only. 2299 + * 2300 + * XXX: FIX THIS: Pretend we have no BAT 2269 2301 */ 2270 - /* XXX: FIX THIS: Pretend we have no BAT */ 2271 2302 nb_BATs = env->nb_BATs; 2272 2303 env->nb_BATs = 0; 2273 2304 if (get_physical_address(env, &ctx, addr, 0, ACCESS_INT) == 0) { ··· 2422 2453 } 2423 2454 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT) 2424 2455 & PPC4XX_TLBHI_SIZE_MASK); 2425 - /* We cannot handle TLB size < TARGET_PAGE_SIZE. 2456 + /* 2457 + * We cannot handle TLB size < TARGET_PAGE_SIZE. 2426 2458 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY 2427 2459 */ 2428 2460 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) { ··· 2742 2774 } 2743 2775 2744 2776 if (tlb->mas1 & MAS1_VALID) { 2745 - /* Invalidate the page in QEMU TLB if it was a valid entry. 2777 + /* 2778 + * Invalidate the page in QEMU TLB if it was a valid entry. 2746 2779 * 2747 2780 * In "PowerPC e500 Core Family Reference Manual, Rev. 1", 2748 2781 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction": ··· 2751 2784 * "Note that when an L2 TLB entry is written, it may be displacing an 2752 2785 * already valid entry in the same L2 TLB location (a victim). If a 2753 2786 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1 2754 - * TLB entry is automatically invalidated." */ 2787 + * TLB entry is automatically invalidated." 2788 + */ 2755 2789 flush_page(env, tlb); 2756 2790 } 2757 2791 ··· 2777 2811 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E; 2778 2812 2779 2813 if (!msr_cm) { 2780 - /* Executing a tlbwe instruction in 32-bit mode will set 2781 - * bits 0:31 of the TLB EPN field to zero. 2814 + /* 2815 + * Executing a tlbwe instruction in 32-bit mode will set bits 2816 + * 0:31 of the TLB EPN field to zero. 2782 2817 */ 2783 2818 mask &= 0xffffffff; 2784 2819 } ··· 3022 3057 3023 3058 /*****************************************************************************/ 3024 3059 3025 - /* try to fill the TLB and return an exception if error. If retaddr is 3026 - NULL, it means that the function was called in C code (i.e. not 3027 - from generated code or from helper.c) */ 3028 - /* XXX: fix it to restore all registers */ 3060 + /* 3061 + * try to fill the TLB and return an exception if error. If retaddr is 3062 + * NULL, it means that the function was called in C code (i.e. not 3063 + * from generated code or from helper.c) 3064 + * 3065 + * XXX: fix it to restore all registers 3066 + */ 3029 3067 void tlb_fill(CPUState *cs, target_ulong addr, int size, 3030 3068 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) 3031 3069 {
+6 -5
target/ppc/monitor.c
··· 27 27 #include "monitor/hmp-target.h" 28 28 #include "hmp.h" 29 29 30 - static target_long monitor_get_ccr (const struct MonitorDef *md, int val) 30 + static target_long monitor_get_ccr(const struct MonitorDef *md, int val) 31 31 { 32 32 CPUArchState *env = mon_get_cpu_env(); 33 33 unsigned int u; 34 34 int i; 35 35 36 36 u = 0; 37 - for (i = 0; i < 8; i++) 37 + for (i = 0; i < 8; i++) { 38 38 u |= env->crf[i] << (32 - (4 * (i + 1))); 39 + } 39 40 40 41 return u; 41 42 } 42 43 43 - static target_long monitor_get_decr (const struct MonitorDef *md, int val) 44 + static target_long monitor_get_decr(const struct MonitorDef *md, int val) 44 45 { 45 46 CPUArchState *env = mon_get_cpu_env(); 46 47 return cpu_ppc_load_decr(env); 47 48 } 48 49 49 - static target_long monitor_get_tbu (const struct MonitorDef *md, int val) 50 + static target_long monitor_get_tbu(const struct MonitorDef *md, int val) 50 51 { 51 52 CPUArchState *env = mon_get_cpu_env(); 52 53 return cpu_ppc_load_tbu(env); 53 54 } 54 55 55 - static target_long monitor_get_tbl (const struct MonitorDef *md, int val) 56 + static target_long monitor_get_tbl(const struct MonitorDef *md, int val) 56 57 { 57 58 CPUArchState *env = mon_get_cpu_env(); 58 59 return cpu_ppc_load_tbl(env);
+27 -2
target/ppc/trace-events
··· 1 1 # See docs/devel/tracing.txt for syntax documentation. 2 2 3 3 # kvm.c 4 - kvm_failed_spr_set(int str, const char *msg) "Warning: Unable to set SPR %d to KVM: %s" 5 - kvm_failed_spr_get(int str, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s" 4 + kvm_failed_spr_set(int spr, const char *msg) "Warning: Unable to set SPR %d to KVM: %s" 5 + kvm_failed_spr_get(int spr, const char *msg) "Warning: Unable to retrieve SPR %d from KVM: %s" 6 + kvm_failed_fpscr_set(const char *msg) "Unable to set FPSCR to KVM: %s" 7 + kvm_failed_fp_set(const char *fpname, int fpnum, const char *msg) "Unable to set %s%d to KVM: %s" 8 + kvm_failed_vscr_set(const char *msg) "Unable to set VSCR to KVM: %s" 9 + kvm_failed_vr_set(int vr, const char *msg) "Unable to set VR%d to KVM: %s" 10 + kvm_failed_fpscr_get(const char *msg) "Unable to get FPSCR from KVM: %s" 11 + kvm_failed_fp_get(const char *fpname, int fpnum, const char *msg) "Unable to get %s%d from KVM: %s" 12 + kvm_failed_vscr_get(const char *msg) "Unable to get VSCR from KVM: %s" 13 + kvm_failed_vr_get(int vr, const char *msg) "Unable to get VR%d from KVM: %s" 14 + kvm_failed_vpa_addr_get(const char *msg) "Unable to get VPA address from KVM: %s" 15 + kvm_failed_slb_get(const char *msg) "Unable to get SLB shadow state from KVM: %s" 16 + kvm_failed_dtl_get(const char *msg) "Unable to get dispatch trace log state from KVM: %s" 17 + kvm_failed_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s" 18 + kvm_failed_slb_set(const char *msg) "Unable to set SLB shadow state to KVM: %s" 19 + kvm_failed_dtl_set(const char *msg) "Unable to set dispatch trace log state to KVM: %s" 20 + kvm_failed_null_vpa_addr_set(const char *msg) "Unable to set VPA address to KVM: %s" 21 + kvm_failed_put_vpa(void) "Warning: Unable to set VPA information to KVM" 22 + kvm_failed_get_vpa(void) "Warning: Unable to get VPA information from KVM" 23 + kvm_injected_interrupt(int irq) "injected interrupt %d" 24 + kvm_handle_dcr_write(void) "handle dcr write" 25 + kvm_handle_drc_read(void) "handle dcr read" 26 + kvm_handle_halt(void) "handle halt" 27 + kvm_handle_papr_hcall(void) "handle PAPR hypercall" 28 + kvm_handle_epr(void) "handle epr" 29 + kvm_handle_watchdog_expiry(void) "handle watchdog expiry" 30 + kvm_handle_debug_exception(void) "handle debug exception"
+313 -191
target/ppc/translate.c
··· 42 42 #define GDBSTUB_SINGLE_STEP 0x4 43 43 44 44 /* Include definitions for instructions classes and implementations flags */ 45 - //#define PPC_DEBUG_DISAS 46 - //#define DO_PPC_STATISTICS 45 + /* #define PPC_DEBUG_DISAS */ 46 + /* #define DO_PPC_STATISTICS */ 47 47 48 48 #ifdef PPC_DEBUG_DISAS 49 49 # define LOG_DISAS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__) ··· 54 54 /* Code translation helpers */ 55 55 56 56 /* global register indexes */ 57 - static char cpu_reg_names[10*3 + 22*4 /* GPR */ 58 - + 10*4 + 22*5 /* SPE GPRh */ 59 - + 8*5 /* CRF */]; 57 + static char cpu_reg_names[10 * 3 + 22 * 4 /* GPR */ 58 + + 10 * 4 + 22 * 5 /* SPE GPRh */ 59 + + 8 * 5 /* CRF */]; 60 60 static TCGv cpu_gpr[32]; 61 61 static TCGv cpu_gprh[32]; 62 62 static TCGv_i32 cpu_crf[8]; ··· 78 78 void ppc_translate_init(void) 79 79 { 80 80 int i; 81 - char* p; 81 + char *p; 82 82 size_t cpu_reg_names_size; 83 83 84 84 p = cpu_reg_names; ··· 146 146 offsetof(CPUPPCState, fpscr), "fpscr"); 147 147 148 148 cpu_access_type = tcg_global_mem_new_i32(cpu_env, 149 - offsetof(CPUPPCState, access_type), "access_type"); 149 + offsetof(CPUPPCState, access_type), 150 + "access_type"); 150 151 } 151 152 152 153 /* internal defines */ ··· 246 247 { 247 248 TCGv_i32 t0, t1; 248 249 249 - /* These are all synchronous exceptions, we set the PC back to 250 - * the faulting instruction 250 + /* 251 + * These are all synchronous exceptions, we set the PC back to the 252 + * faulting instruction 251 253 */ 252 254 if (ctx->exception == POWERPC_EXCP_NONE) { 253 255 gen_update_nip(ctx, ctx->base.pc_next - 4); ··· 264 266 { 265 267 TCGv_i32 t0; 266 268 267 - /* These are all synchronous exceptions, we set the PC back to 268 - * the faulting instruction 269 + /* 270 + * These are all synchronous exceptions, we set the PC back to the 271 + * faulting instruction 269 272 */ 270 273 if (ctx->exception == POWERPC_EXCP_NONE) { 271 274 gen_update_nip(ctx, ctx->base.pc_next - 4); ··· 320 323 { 321 324 TCGv_i32 t0; 322 325 323 - /* These are all synchronous exceptions, we set the PC back to 324 - * the faulting instruction 326 + /* 327 + * These are all synchronous exceptions, we set the PC back to the 328 + * faulting instruction 325 329 */ 326 330 if ((ctx->exception != POWERPC_EXCP_BRANCH) && 327 331 (ctx->exception != POWERPC_EXCP_SYNC)) { ··· 602 606 603 607 tcg_gen_movi_tl(t0, CRF_EQ); 604 608 tcg_gen_movi_tl(t1, CRF_LT); 605 - tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), t0, arg0, arg1, t1, t0); 609 + tcg_gen_movcond_tl((s ? TCG_COND_LT : TCG_COND_LTU), 610 + t0, arg0, arg1, t1, t0); 606 611 tcg_gen_movi_tl(t1, CRF_GT); 607 - tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), t0, arg0, arg1, t1, t0); 612 + tcg_gen_movcond_tl((s ? TCG_COND_GT : TCG_COND_GTU), 613 + t0, arg0, arg1, t1, t0); 608 614 609 615 tcg_gen_trunc_tl_i32(t, t0); 610 616 tcg_gen_trunc_tl_i32(cpu_crf[crf], cpu_so); ··· 840 846 841 847 if (compute_ca) { 842 848 if (NARROW_MODE(ctx)) { 843 - /* Caution: a non-obvious corner case of the spec is that we 844 - must produce the *entire* 64-bit addition, but produce the 845 - carry into bit 32. */ 849 + /* 850 + * Caution: a non-obvious corner case of the spec is that 851 + * we must produce the *entire* 64-bit addition, but 852 + * produce the carry into bit 32. 853 + */ 846 854 TCGv t1 = tcg_temp_new(); 847 855 tcg_gen_xor_tl(t1, arg1, arg2); /* add without carry */ 848 856 tcg_gen_add_tl(t0, arg1, arg2); ··· 1017 1025 tcg_temp_free_i32(t2); 1018 1026 tcg_temp_free_i32(t3); 1019 1027 1020 - if (unlikely(Rc(ctx->opcode) != 0)) 1028 + if (unlikely(Rc(ctx->opcode) != 0)) { 1021 1029 gen_set_Rc0(ctx, ret); 1030 + } 1022 1031 } 1023 1032 /* Div functions */ 1024 1033 #define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ 1025 - static void glue(gen_, name)(DisasContext *ctx) \ 1034 + static void glue(gen_, name)(DisasContext *ctx) \ 1026 1035 { \ 1027 1036 gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ 1028 1037 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ ··· 1091 1100 tcg_temp_free_i64(t2); 1092 1101 tcg_temp_free_i64(t3); 1093 1102 1094 - if (unlikely(Rc(ctx->opcode) != 0)) 1103 + if (unlikely(Rc(ctx->opcode) != 0)) { 1095 1104 gen_set_Rc0(ctx, ret); 1105 + } 1096 1106 } 1097 1107 1098 1108 #define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ 1099 - static void glue(gen_, name)(DisasContext *ctx) \ 1109 + static void glue(gen_, name)(DisasContext *ctx) \ 1100 1110 { \ 1101 1111 gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \ 1102 1112 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ ··· 1219 1229 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); 1220 1230 tcg_temp_free_i32(t0); 1221 1231 tcg_temp_free_i32(t1); 1222 - if (unlikely(Rc(ctx->opcode) != 0)) 1232 + if (unlikely(Rc(ctx->opcode) != 0)) { 1223 1233 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1234 + } 1224 1235 } 1225 1236 1226 1237 /* mulhwu mulhwu. */ ··· 1235 1246 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); 1236 1247 tcg_temp_free_i32(t0); 1237 1248 tcg_temp_free_i32(t1); 1238 - if (unlikely(Rc(ctx->opcode) != 0)) 1249 + if (unlikely(Rc(ctx->opcode) != 0)) { 1239 1250 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1251 + } 1240 1252 } 1241 1253 1242 1254 /* mullw mullw. */ ··· 1255 1267 tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1256 1268 cpu_gpr[rB(ctx->opcode)]); 1257 1269 #endif 1258 - if (unlikely(Rc(ctx->opcode) != 0)) 1270 + if (unlikely(Rc(ctx->opcode) != 0)) { 1259 1271 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1272 + } 1260 1273 } 1261 1274 1262 1275 /* mullwo mullwo. */ ··· 1284 1297 1285 1298 tcg_temp_free_i32(t0); 1286 1299 tcg_temp_free_i32(t1); 1287 - if (unlikely(Rc(ctx->opcode) != 0)) 1300 + if (unlikely(Rc(ctx->opcode) != 0)) { 1288 1301 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1302 + } 1289 1303 } 1290 1304 1291 1305 /* mulli */ ··· 1325 1339 { 1326 1340 tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1327 1341 cpu_gpr[rB(ctx->opcode)]); 1328 - if (unlikely(Rc(ctx->opcode) != 0)) 1342 + if (unlikely(Rc(ctx->opcode) != 0)) { 1329 1343 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 1344 + } 1330 1345 } 1331 1346 1332 1347 /* mulldo mulldo. */ ··· 1369 1384 if (compute_ca) { 1370 1385 /* dest = ~arg1 + arg2 [+ ca]. */ 1371 1386 if (NARROW_MODE(ctx)) { 1372 - /* Caution: a non-obvious corner case of the spec is that we 1373 - must produce the *entire* 64-bit addition, but produce the 1374 - carry into bit 32. */ 1387 + /* 1388 + * Caution: a non-obvious corner case of the spec is that 1389 + * we must produce the *entire* 64-bit addition, but 1390 + * produce the carry into bit 32. 1391 + */ 1375 1392 TCGv inv1 = tcg_temp_new(); 1376 1393 TCGv t1 = tcg_temp_new(); 1377 1394 tcg_gen_not_tl(inv1, arg1); ··· 1404 1421 gen_op_arith_compute_ca32(ctx, t0, arg1, arg2, cpu_ca32, 1); 1405 1422 } 1406 1423 } else if (add_ca) { 1407 - /* Since we're ignoring carry-out, we can simplify the 1408 - standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. */ 1424 + /* 1425 + * Since we're ignoring carry-out, we can simplify the 1426 + * standard ~arg1 + arg2 + ca to arg2 - arg1 + ca - 1. 1427 + */ 1409 1428 tcg_gen_sub_tl(t0, arg2, arg1); 1410 1429 tcg_gen_add_tl(t0, t0, cpu_ca); 1411 1430 tcg_gen_subi_tl(t0, t0, 1); ··· 1493 1512 1494 1513 /*** Integer logical ***/ 1495 1514 #define GEN_LOGICAL2(name, tcg_op, opc, type) \ 1496 - static void glue(gen_, name)(DisasContext *ctx) \ 1515 + static void glue(gen_, name)(DisasContext *ctx) \ 1497 1516 { \ 1498 1517 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \ 1499 1518 cpu_gpr[rB(ctx->opcode)]); \ ··· 1502 1521 } 1503 1522 1504 1523 #define GEN_LOGICAL1(name, tcg_op, opc, type) \ 1505 - static void glue(gen_, name)(DisasContext *ctx) \ 1524 + static void glue(gen_, name)(DisasContext *ctx) \ 1506 1525 { \ 1507 1526 tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \ 1508 1527 if (unlikely(Rc(ctx->opcode) != 0)) \ ··· 1517 1536 /* andi. */ 1518 1537 static void gen_andi_(DisasContext *ctx) 1519 1538 { 1520 - tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode)); 1539 + tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 1540 + UIMM(ctx->opcode)); 1521 1541 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 1522 1542 } 1523 1543 1524 1544 /* andis. */ 1525 1545 static void gen_andis_(DisasContext *ctx) 1526 1546 { 1527 - tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], UIMM(ctx->opcode) << 16); 1547 + tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 1548 + UIMM(ctx->opcode) << 16); 1528 1549 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 1529 1550 } 1530 1551 ··· 1538 1559 tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); 1539 1560 tcg_temp_free_i32(t); 1540 1561 1541 - if (unlikely(Rc(ctx->opcode) != 0)) 1562 + if (unlikely(Rc(ctx->opcode) != 0)) { 1542 1563 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 1564 + } 1543 1565 } 1544 1566 1545 1567 /* cnttzw */ ··· 1591 1613 rb = rB(ctx->opcode); 1592 1614 /* Optimisation for mr. ri case */ 1593 1615 if (rs != ra || rs != rb) { 1594 - if (rs != rb) 1616 + if (rs != rb) { 1595 1617 tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); 1596 - else 1618 + } else { 1597 1619 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]); 1598 - if (unlikely(Rc(ctx->opcode) != 0)) 1620 + } 1621 + if (unlikely(Rc(ctx->opcode) != 0)) { 1599 1622 gen_set_Rc0(ctx, cpu_gpr[ra]); 1623 + } 1600 1624 } else if (unlikely(Rc(ctx->opcode) != 0)) { 1601 1625 gen_set_Rc0(ctx, cpu_gpr[rs]); 1602 1626 #if defined(TARGET_PPC64) ··· 1654 1678 tcg_temp_free(t0); 1655 1679 } 1656 1680 #if !defined(CONFIG_USER_ONLY) 1657 - /* Pause out of TCG otherwise spin loops with smt_low eat too much 1658 - * CPU and the kernel hangs. This applies to all encodings other 1659 - * than no-op, e.g., miso(rs=26), yield(27), mdoio(29), mdoom(30), 1660 - * and all currently undefined. 1681 + /* 1682 + * Pause out of TCG otherwise spin loops with smt_low eat too 1683 + * much CPU and the kernel hangs. This applies to all 1684 + * encodings other than no-op, e.g., miso(rs=26), yield(27), 1685 + * mdoio(29), mdoom(30), and all currently undefined. 1661 1686 */ 1662 1687 gen_pause(ctx); 1663 1688 #endif ··· 1671 1696 static void gen_xor(DisasContext *ctx) 1672 1697 { 1673 1698 /* Optimisation for "set to zero" case */ 1674 - if (rS(ctx->opcode) != rB(ctx->opcode)) 1675 - tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 1676 - else 1699 + if (rS(ctx->opcode) != rB(ctx->opcode)) { 1700 + tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 1701 + cpu_gpr[rB(ctx->opcode)]); 1702 + } else { 1677 1703 tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0); 1678 - if (unlikely(Rc(ctx->opcode) != 0)) 1704 + } 1705 + if (unlikely(Rc(ctx->opcode) != 0)) { 1679 1706 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 1707 + } 1680 1708 } 1681 1709 1682 1710 /* ori */ ··· 1699 1727 /* NOP */ 1700 1728 return; 1701 1729 } 1702 - tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16); 1730 + tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 1731 + uimm << 16); 1703 1732 } 1704 1733 1705 1734 /* xori */ ··· 1723 1752 /* NOP */ 1724 1753 return; 1725 1754 } 1726 - tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm << 16); 1755 + tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 1756 + uimm << 16); 1727 1757 } 1728 1758 1729 1759 /* popcntb : PowerPC 2.03 specification */ ··· 1798 1828 static void gen_cntlzd(DisasContext *ctx) 1799 1829 { 1800 1830 tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); 1801 - if (unlikely(Rc(ctx->opcode) != 0)) 1831 + if (unlikely(Rc(ctx->opcode) != 0)) { 1802 1832 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 1833 + } 1803 1834 } 1804 1835 1805 1836 /* cnttzd */ ··· 1838 1869 uint32_t mb = MB(ctx->opcode); 1839 1870 uint32_t me = ME(ctx->opcode); 1840 1871 1841 - if (sh == (31-me) && mb <= me) { 1872 + if (sh == (31 - me) && mb <= me) { 1842 1873 tcg_gen_deposit_tl(t_ra, t_ra, t_rs, sh, me - mb + 1); 1843 1874 } else { 1844 1875 target_ulong mask; ··· 2141 2172 tcg_temp_free(t1); 2142 2173 tcg_temp_free(t0); 2143 2174 tcg_gen_ext32u_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 2144 - if (unlikely(Rc(ctx->opcode) != 0)) 2175 + if (unlikely(Rc(ctx->opcode) != 0)) { 2145 2176 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2177 + } 2146 2178 } 2147 2179 2148 2180 /* sraw & sraw. */ ··· 2150 2182 { 2151 2183 gen_helper_sraw(cpu_gpr[rA(ctx->opcode)], cpu_env, 2152 2184 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2153 - if (unlikely(Rc(ctx->opcode) != 0)) 2185 + if (unlikely(Rc(ctx->opcode) != 0)) { 2154 2186 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2187 + } 2155 2188 } 2156 2189 2157 2190 /* srawi & srawi. */ ··· 2206 2239 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 2207 2240 tcg_temp_free(t1); 2208 2241 tcg_temp_free(t0); 2209 - if (unlikely(Rc(ctx->opcode) != 0)) 2242 + if (unlikely(Rc(ctx->opcode) != 0)) { 2210 2243 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2244 + } 2211 2245 } 2212 2246 2213 2247 #if defined(TARGET_PPC64) ··· 2226 2260 tcg_gen_shl_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 2227 2261 tcg_temp_free(t1); 2228 2262 tcg_temp_free(t0); 2229 - if (unlikely(Rc(ctx->opcode) != 0)) 2263 + if (unlikely(Rc(ctx->opcode) != 0)) { 2230 2264 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2265 + } 2231 2266 } 2232 2267 2233 2268 /* srad & srad. */ ··· 2235 2270 { 2236 2271 gen_helper_srad(cpu_gpr[rA(ctx->opcode)], cpu_env, 2237 2272 cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 2238 - if (unlikely(Rc(ctx->opcode) != 0)) 2273 + if (unlikely(Rc(ctx->opcode) != 0)) { 2239 2274 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2275 + } 2240 2276 } 2241 2277 /* sradi & sradi. */ 2242 2278 static inline void gen_sradi(DisasContext *ctx, int n) ··· 2317 2353 tcg_gen_shr_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 2318 2354 tcg_temp_free(t1); 2319 2355 tcg_temp_free(t0); 2320 - if (unlikely(Rc(ctx->opcode) != 0)) 2356 + if (unlikely(Rc(ctx->opcode) != 0)) { 2321 2357 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 2358 + } 2322 2359 } 2323 2360 #endif 2324 2361 ··· 2463 2500 #endif 2464 2501 2465 2502 #define GEN_LD(name, ldop, opc, type) \ 2466 - static void glue(gen_, name)(DisasContext *ctx) \ 2503 + static void glue(gen_, name)(DisasContext *ctx) \ 2467 2504 { \ 2468 2505 TCGv EA; \ 2469 2506 gen_set_access_type(ctx, ACCESS_INT); \ ··· 2474 2511 } 2475 2512 2476 2513 #define GEN_LDU(name, ldop, opc, type) \ 2477 - static void glue(gen_, name##u)(DisasContext *ctx) \ 2514 + static void glue(gen_, name##u)(DisasContext *ctx) \ 2478 2515 { \ 2479 2516 TCGv EA; \ 2480 2517 if (unlikely(rA(ctx->opcode) == 0 || \ ··· 2494 2531 } 2495 2532 2496 2533 #define GEN_LDUX(name, ldop, opc2, opc3, type) \ 2497 - static void glue(gen_, name##ux)(DisasContext *ctx) \ 2534 + static void glue(gen_, name##ux)(DisasContext *ctx) \ 2498 2535 { \ 2499 2536 TCGv EA; \ 2500 2537 if (unlikely(rA(ctx->opcode) == 0 || \ ··· 2598 2635 /* ld - ldu */ 2599 2636 gen_qemu_ld64_i64(ctx, cpu_gpr[rD(ctx->opcode)], EA); 2600 2637 } 2601 - if (Rc(ctx->opcode)) 2638 + if (Rc(ctx->opcode)) { 2602 2639 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); 2640 + } 2603 2641 tcg_temp_free(EA); 2604 2642 } 2605 2643 ··· 2669 2707 2670 2708 /*** Integer store ***/ 2671 2709 #define GEN_ST(name, stop, opc, type) \ 2672 - static void glue(gen_, name)(DisasContext *ctx) \ 2710 + static void glue(gen_, name)(DisasContext *ctx) \ 2673 2711 { \ 2674 2712 TCGv EA; \ 2675 2713 gen_set_access_type(ctx, ACCESS_INT); \ ··· 2680 2718 } 2681 2719 2682 2720 #define GEN_STU(name, stop, opc, type) \ 2683 - static void glue(gen_, stop##u)(DisasContext *ctx) \ 2721 + static void glue(gen_, stop##u)(DisasContext *ctx) \ 2684 2722 { \ 2685 2723 TCGv EA; \ 2686 2724 if (unlikely(rA(ctx->opcode) == 0)) { \ ··· 2699 2737 } 2700 2738 2701 2739 #define GEN_STUX(name, stop, opc2, opc3, type) \ 2702 - static void glue(gen_, name##ux)(DisasContext *ctx) \ 2740 + static void glue(gen_, name##ux)(DisasContext *ctx) \ 2703 2741 { \ 2704 2742 TCGv EA; \ 2705 2743 if (unlikely(rA(ctx->opcode) == 0)) { \ ··· 2847 2885 EA = tcg_temp_new(); 2848 2886 gen_addr_imm_index(ctx, EA, 0x03); 2849 2887 gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA); 2850 - if (Rc(ctx->opcode)) 2888 + if (Rc(ctx->opcode)) { 2851 2889 tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); 2890 + } 2852 2891 tcg_temp_free(EA); 2853 2892 } 2854 2893 } ··· 2916 2955 /*** Integer load and store strings ***/ 2917 2956 2918 2957 /* lswi */ 2919 - /* PowerPC32 specification says we must generate an exception if 2920 - * rA is in the range of registers to be loaded. 2921 - * In an other hand, IBM says this is valid, but rA won't be loaded. 2922 - * For now, I'll follow the spec... 2958 + /* 2959 + * PowerPC32 specification says we must generate an exception if rA is 2960 + * in the range of registers to be loaded. In an other hand, IBM says 2961 + * this is valid, but rA won't be loaded. For now, I'll follow the 2962 + * spec... 2923 2963 */ 2924 2964 static void gen_lswi(DisasContext *ctx) 2925 2965 { ··· 2934 2974 gen_align_no_le(ctx); 2935 2975 return; 2936 2976 } 2937 - if (nb == 0) 2977 + if (nb == 0) { 2938 2978 nb = 32; 2979 + } 2939 2980 nr = DIV_ROUND_UP(nb, 4); 2940 2981 if (unlikely(lsw_reg_in_range(start, nr, ra))) { 2941 2982 gen_inval_exception(ctx, POWERPC_EXCP_INVAL_LSWX); ··· 2989 3030 gen_set_access_type(ctx, ACCESS_INT); 2990 3031 t0 = tcg_temp_new(); 2991 3032 gen_addr_register(ctx, t0); 2992 - if (nb == 0) 3033 + if (nb == 0) { 2993 3034 nb = 32; 3035 + } 2994 3036 t1 = tcg_const_i32(nb); 2995 3037 t2 = tcg_const_i32(rS(ctx->opcode)); 2996 3038 gen_helper_stsw(cpu_env, t0, t1, t2); ··· 3363 3405 3364 3406 gen_set_label(l1); 3365 3407 3366 - /* Address mismatch implies failure. But we still need to provide the 3367 - memory barrier semantics of the instruction. */ 3408 + /* 3409 + * Address mismatch implies failure. But we still need to provide 3410 + * the memory barrier semantics of the instruction. 3411 + */ 3368 3412 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); 3369 3413 tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); 3370 3414 ··· 3639 3683 static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip) 3640 3684 { 3641 3685 #if defined(TARGET_PPC64) 3642 - if (ctx->has_cfar) 3686 + if (ctx->has_cfar) { 3643 3687 tcg_gen_movi_tl(cpu_cfar, nip); 3688 + } 3644 3689 #endif 3645 3690 } 3646 3691 ··· 3732 3777 3733 3778 if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { 3734 3779 target = tcg_temp_local_new(); 3735 - if (type == BCOND_CTR) 3780 + if (type == BCOND_CTR) { 3736 3781 tcg_gen_mov_tl(target, cpu_ctr); 3737 - else if (type == BCOND_TAR) 3782 + } else if (type == BCOND_TAR) { 3738 3783 gen_load_spr(target, SPR_TAR); 3739 - else 3784 + } else { 3740 3785 tcg_gen_mov_tl(target, cpu_lr); 3786 + } 3741 3787 } else { 3742 3788 target = NULL; 3743 3789 } 3744 - if (LK(ctx->opcode)) 3790 + if (LK(ctx->opcode)) { 3745 3791 gen_setlr(ctx, ctx->base.pc_next); 3792 + } 3746 3793 l1 = gen_new_label(); 3747 3794 if ((bo & 0x4) == 0) { 3748 3795 /* Decrement and test CTR */ ··· 3857 3904 3858 3905 /*** Condition register logical ***/ 3859 3906 #define GEN_CRLOGIC(name, tcg_op, opc) \ 3860 - static void glue(gen_, name)(DisasContext *ctx) \ 3907 + static void glue(gen_, name)(DisasContext *ctx) \ 3861 3908 { \ 3862 3909 uint8_t bitmask; \ 3863 3910 int sh; \ ··· 3918 3965 #if defined(CONFIG_USER_ONLY) 3919 3966 GEN_PRIV; 3920 3967 #else 3921 - /* This instruction doesn't exist anymore on 64-bit server 3968 + /* 3969 + * This instruction doesn't exist anymore on 64-bit server 3922 3970 * processors compliant with arch 2.x 3923 3971 */ 3924 3972 if (is_book3s_arch2x(ctx)) { ··· 4157 4205 if (likely(ctx->opcode & 0x00100000)) { 4158 4206 crm = CRM(ctx->opcode); 4159 4207 if (likely(crm && ((crm & (crm - 1)) == 0))) { 4160 - crn = ctz32 (crm); 4208 + crn = ctz32(crm); 4161 4209 tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], cpu_crf[7 - crn]); 4162 4210 tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], 4163 4211 cpu_gpr[rD(ctx->opcode)], crn * 4); ··· 4222 4270 (*read_cb)(ctx, rD(ctx->opcode), sprn); 4223 4271 } else { 4224 4272 /* Privilege exception */ 4225 - /* This is a hack to avoid warnings when running Linux: 4273 + /* 4274 + * This is a hack to avoid warnings when running Linux: 4226 4275 * this OS breaks the PowerPC virtualisation model, 4227 4276 * allowing userland application to read the PVR 4228 4277 */ ··· 4245 4294 "Trying to read invalid spr %d (0x%03x) at " 4246 4295 TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); 4247 4296 4248 - /* The behaviour depends on MSR:PR and SPR# bit 0x10, 4249 - * it can generate a priv, a hv emu or a no-op 4297 + /* 4298 + * The behaviour depends on MSR:PR and SPR# bit 0x10, it can 4299 + * generate a priv, a hv emu or a no-op 4250 4300 */ 4251 4301 if (sprn & 0x10) { 4252 4302 if (ctx->pr) { ··· 4280 4330 if (likely((ctx->opcode & 0x00100000))) { 4281 4331 if (crm && ((crm & (crm - 1)) == 0)) { 4282 4332 TCGv_i32 temp = tcg_temp_new_i32(); 4283 - crn = ctz32 (crm); 4333 + crn = ctz32(crm); 4284 4334 tcg_gen_trunc_tl_i32(temp, cpu_gpr[rS(ctx->opcode)]); 4285 4335 tcg_gen_shri_i32(temp, temp, crn * 4); 4286 4336 tcg_gen_andi_i32(cpu_crf[7 - crn], temp, 0xf); ··· 4309 4359 if (ctx->opcode & 0x00010000) { 4310 4360 /* Special form that does not need any synchronisation */ 4311 4361 TCGv t0 = tcg_temp_new(); 4312 - tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE)); 4313 - tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); 4362 + tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], 4363 + (1 << MSR_RI) | (1 << MSR_EE)); 4364 + tcg_gen_andi_tl(cpu_msr, cpu_msr, 4365 + ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); 4314 4366 tcg_gen_or_tl(cpu_msr, cpu_msr, t0); 4315 4367 tcg_temp_free(t0); 4316 4368 } else { 4317 - /* XXX: we need to update nip before the store 4318 - * if we enter power saving mode, we will exit the loop 4319 - * directly from ppc_store_msr 4369 + /* 4370 + * XXX: we need to update nip before the store if we enter 4371 + * power saving mode, we will exit the loop directly from 4372 + * ppc_store_msr 4320 4373 */ 4321 4374 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 4322 4375 gen_io_start(); ··· 4342 4395 if (ctx->opcode & 0x00010000) { 4343 4396 /* Special form that does not need any synchronisation */ 4344 4397 TCGv t0 = tcg_temp_new(); 4345 - tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], (1 << MSR_RI) | (1 << MSR_EE)); 4346 - tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); 4398 + tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], 4399 + (1 << MSR_RI) | (1 << MSR_EE)); 4400 + tcg_gen_andi_tl(cpu_msr, cpu_msr, 4401 + ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); 4347 4402 tcg_gen_or_tl(cpu_msr, cpu_msr, t0); 4348 4403 tcg_temp_free(t0); 4349 4404 } else { 4350 4405 TCGv msr = tcg_temp_new(); 4351 4406 4352 - /* XXX: we need to update nip before the store 4353 - * if we enter power saving mode, we will exit the loop 4354 - * directly from ppc_store_msr 4407 + /* 4408 + * XXX: we need to update nip before the store if we enter 4409 + * power saving mode, we will exit the loop directly from 4410 + * ppc_store_msr 4355 4411 */ 4356 4412 if (tb_cflags(ctx->base.tb) & CF_USE_ICOUNT) { 4357 4413 gen_io_start(); ··· 4415 4471 TARGET_FMT_lx "\n", sprn, sprn, ctx->base.pc_next - 4); 4416 4472 4417 4473 4418 - /* The behaviour depends on MSR:PR and SPR# bit 0x10, 4419 - * it can generate a priv, a hv emu or a no-op 4474 + /* 4475 + * The behaviour depends on MSR:PR and SPR# bit 0x10, it can 4476 + * generate a priv, a hv emu or a no-op 4420 4477 */ 4421 4478 if (sprn & 0x10) { 4422 4479 if (ctx->pr) { ··· 4526 4583 /* dcbt */ 4527 4584 static void gen_dcbt(DisasContext *ctx) 4528 4585 { 4529 - /* interpreted as no-op */ 4530 - /* XXX: specification say this is treated as a load by the MMU 4531 - * but does not generate any exception 4586 + /* 4587 + * interpreted as no-op 4588 + * XXX: specification say this is treated as a load by the MMU but 4589 + * does not generate any exception 4532 4590 */ 4533 4591 } 4534 4592 4535 4593 /* dcbtep */ 4536 4594 static void gen_dcbtep(DisasContext *ctx) 4537 4595 { 4538 - /* interpreted as no-op */ 4539 - /* XXX: specification say this is treated as a load by the MMU 4540 - * but does not generate any exception 4596 + /* 4597 + * interpreted as no-op 4598 + * XXX: specification say this is treated as a load by the MMU but 4599 + * does not generate any exception 4541 4600 */ 4542 4601 } 4543 4602 4544 4603 /* dcbtst */ 4545 4604 static void gen_dcbtst(DisasContext *ctx) 4546 4605 { 4547 - /* interpreted as no-op */ 4548 - /* XXX: specification say this is treated as a load by the MMU 4549 - * but does not generate any exception 4606 + /* 4607 + * interpreted as no-op 4608 + * XXX: specification say this is treated as a load by the MMU but 4609 + * does not generate any exception 4550 4610 */ 4551 4611 } 4552 4612 4553 4613 /* dcbtstep */ 4554 4614 static void gen_dcbtstep(DisasContext *ctx) 4555 4615 { 4556 - /* interpreted as no-op */ 4557 - /* XXX: specification say this is treated as a load by the MMU 4558 - * but does not generate any exception 4616 + /* 4617 + * interpreted as no-op 4618 + * XXX: specification say this is treated as a load by the MMU but 4619 + * does not generate any exception 4559 4620 */ 4560 4621 } 4561 4622 ··· 4653 4714 /* dcba */ 4654 4715 static void gen_dcba(DisasContext *ctx) 4655 4716 { 4656 - /* interpreted as no-op */ 4657 - /* XXX: specification say this is treated as a store by the MMU 4717 + /* 4718 + * interpreted as no-op 4719 + * XXX: specification say this is treated as a store by the MMU 4658 4720 * but does not generate any exception 4659 4721 */ 4660 4722 } ··· 5021 5083 gen_set_label(l1); 5022 5084 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 5023 5085 gen_set_label(l2); 5024 - if (unlikely(Rc(ctx->opcode) != 0)) 5086 + if (unlikely(Rc(ctx->opcode) != 0)) { 5025 5087 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5088 + } 5026 5089 } 5027 5090 5028 5091 /* abso - abso. */ ··· 5044 5107 gen_set_label(l2); 5045 5108 tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 5046 5109 gen_set_label(l3); 5047 - if (unlikely(Rc(ctx->opcode) != 0)) 5110 + if (unlikely(Rc(ctx->opcode) != 0)) { 5048 5111 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5112 + } 5049 5113 } 5050 5114 5051 5115 /* clcs */ ··· 5062 5126 { 5063 5127 gen_helper_div(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], 5064 5128 cpu_gpr[rB(ctx->opcode)]); 5065 - if (unlikely(Rc(ctx->opcode) != 0)) 5129 + if (unlikely(Rc(ctx->opcode) != 0)) { 5066 5130 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5131 + } 5067 5132 } 5068 5133 5069 5134 /* divo - divo. */ ··· 5071 5136 { 5072 5137 gen_helper_divo(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], 5073 5138 cpu_gpr[rB(ctx->opcode)]); 5074 - if (unlikely(Rc(ctx->opcode) != 0)) 5139 + if (unlikely(Rc(ctx->opcode) != 0)) { 5075 5140 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5141 + } 5076 5142 } 5077 5143 5078 5144 /* divs - divs. */ ··· 5080 5146 { 5081 5147 gen_helper_divs(cpu_gpr[rD(ctx->opcode)], cpu_env, cpu_gpr[rA(ctx->opcode)], 5082 5148 cpu_gpr[rB(ctx->opcode)]); 5083 - if (unlikely(Rc(ctx->opcode) != 0)) 5149 + if (unlikely(Rc(ctx->opcode) != 0)) { 5084 5150 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5151 + } 5085 5152 } 5086 5153 5087 5154 /* divso - divso. */ ··· 5089 5156 { 5090 5157 gen_helper_divso(cpu_gpr[rD(ctx->opcode)], cpu_env, 5091 5158 cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); 5092 - if (unlikely(Rc(ctx->opcode) != 0)) 5159 + if (unlikely(Rc(ctx->opcode) != 0)) { 5093 5160 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5161 + } 5094 5162 } 5095 5163 5096 5164 /* doz - doz. */ ··· 5098 5166 { 5099 5167 TCGLabel *l1 = gen_new_label(); 5100 5168 TCGLabel *l2 = gen_new_label(); 5101 - tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1); 5102 - tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 5169 + tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], 5170 + cpu_gpr[rA(ctx->opcode)], l1); 5171 + tcg_gen_sub_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], 5172 + cpu_gpr[rA(ctx->opcode)]); 5103 5173 tcg_gen_br(l2); 5104 5174 gen_set_label(l1); 5105 5175 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0); 5106 5176 gen_set_label(l2); 5107 - if (unlikely(Rc(ctx->opcode) != 0)) 5177 + if (unlikely(Rc(ctx->opcode) != 0)) { 5108 5178 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5179 + } 5109 5180 } 5110 5181 5111 5182 /* dozo - dozo. */ ··· 5118 5189 TCGv t2 = tcg_temp_new(); 5119 5190 /* Start with XER OV disabled, the most likely case */ 5120 5191 tcg_gen_movi_tl(cpu_ov, 0); 5121 - tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], l1); 5192 + tcg_gen_brcond_tl(TCG_COND_GE, cpu_gpr[rB(ctx->opcode)], 5193 + cpu_gpr[rA(ctx->opcode)], l1); 5122 5194 tcg_gen_sub_tl(t0, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 5123 5195 tcg_gen_xor_tl(t1, cpu_gpr[rB(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 5124 5196 tcg_gen_xor_tl(t2, cpu_gpr[rA(ctx->opcode)], t0); ··· 5134 5206 tcg_temp_free(t0); 5135 5207 tcg_temp_free(t1); 5136 5208 tcg_temp_free(t2); 5137 - if (unlikely(Rc(ctx->opcode) != 0)) 5209 + if (unlikely(Rc(ctx->opcode) != 0)) { 5138 5210 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5211 + } 5139 5212 } 5140 5213 5141 5214 /* dozi */ ··· 5150 5223 gen_set_label(l1); 5151 5224 tcg_gen_movi_tl(cpu_gpr[rD(ctx->opcode)], 0); 5152 5225 gen_set_label(l2); 5153 - if (unlikely(Rc(ctx->opcode) != 0)) 5226 + if (unlikely(Rc(ctx->opcode) != 0)) { 5154 5227 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5228 + } 5155 5229 } 5156 5230 5157 5231 /* lscbx - lscbx. */ ··· 5169 5243 tcg_temp_free_i32(t3); 5170 5244 tcg_gen_andi_tl(cpu_xer, cpu_xer, ~0x7F); 5171 5245 tcg_gen_or_tl(cpu_xer, cpu_xer, t0); 5172 - if (unlikely(Rc(ctx->opcode) != 0)) 5246 + if (unlikely(Rc(ctx->opcode) != 0)) { 5173 5247 gen_set_Rc0(ctx, t0); 5248 + } 5174 5249 tcg_temp_free(t0); 5175 5250 } 5176 5251 ··· 5196 5271 tcg_temp_free(t1); 5197 5272 tcg_temp_free(t2); 5198 5273 tcg_temp_free(t3); 5199 - if (unlikely(Rc(ctx->opcode) != 0)) 5274 + if (unlikely(Rc(ctx->opcode) != 0)) { 5200 5275 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5276 + } 5201 5277 } 5202 5278 5203 5279 /* maskir - maskir. */ ··· 5210 5286 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 5211 5287 tcg_temp_free(t0); 5212 5288 tcg_temp_free(t1); 5213 - if (unlikely(Rc(ctx->opcode) != 0)) 5289 + if (unlikely(Rc(ctx->opcode) != 0)) { 5214 5290 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5291 + } 5215 5292 } 5216 5293 5217 5294 /* mul - mul. */ ··· 5230 5307 tcg_temp_free_i64(t0); 5231 5308 tcg_temp_free_i64(t1); 5232 5309 tcg_temp_free(t2); 5233 - if (unlikely(Rc(ctx->opcode) != 0)) 5310 + if (unlikely(Rc(ctx->opcode) != 0)) { 5234 5311 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5312 + } 5235 5313 } 5236 5314 5237 5315 /* mulo - mulo. */ ··· 5258 5336 tcg_temp_free_i64(t0); 5259 5337 tcg_temp_free_i64(t1); 5260 5338 tcg_temp_free(t2); 5261 - if (unlikely(Rc(ctx->opcode) != 0)) 5339 + if (unlikely(Rc(ctx->opcode) != 0)) { 5262 5340 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5341 + } 5263 5342 } 5264 5343 5265 5344 /* nabs - nabs. */ ··· 5273 5352 gen_set_label(l1); 5274 5353 tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); 5275 5354 gen_set_label(l2); 5276 - if (unlikely(Rc(ctx->opcode) != 0)) 5355 + if (unlikely(Rc(ctx->opcode) != 0)) { 5277 5356 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5357 + } 5278 5358 } 5279 5359 5280 5360 /* nabso - nabso. */ ··· 5290 5370 gen_set_label(l2); 5291 5371 /* nabs never overflows */ 5292 5372 tcg_gen_movi_tl(cpu_ov, 0); 5293 - if (unlikely(Rc(ctx->opcode) != 0)) 5373 + if (unlikely(Rc(ctx->opcode) != 0)) { 5294 5374 gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); 5375 + } 5295 5376 } 5296 5377 5297 5378 /* rlmi - rlmi. */ ··· 5303 5384 tcg_gen_andi_tl(t0, cpu_gpr[rB(ctx->opcode)], 0x1F); 5304 5385 tcg_gen_rotl_tl(t0, cpu_gpr[rS(ctx->opcode)], t0); 5305 5386 tcg_gen_andi_tl(t0, t0, MASK(mb, me)); 5306 - tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], ~MASK(mb, me)); 5387 + tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 5388 + ~MASK(mb, me)); 5307 5389 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], t0); 5308 5390 tcg_temp_free(t0); 5309 - if (unlikely(Rc(ctx->opcode) != 0)) 5391 + if (unlikely(Rc(ctx->opcode) != 0)) { 5310 5392 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5393 + } 5311 5394 } 5312 5395 5313 5396 /* rrib - rrib. */ ··· 5324 5407 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 5325 5408 tcg_temp_free(t0); 5326 5409 tcg_temp_free(t1); 5327 - if (unlikely(Rc(ctx->opcode) != 0)) 5410 + if (unlikely(Rc(ctx->opcode) != 0)) { 5328 5411 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5412 + } 5329 5413 } 5330 5414 5331 5415 /* sle - sle. */ ··· 5342 5426 gen_store_spr(SPR_MQ, t1); 5343 5427 tcg_temp_free(t0); 5344 5428 tcg_temp_free(t1); 5345 - if (unlikely(Rc(ctx->opcode) != 0)) 5429 + if (unlikely(Rc(ctx->opcode) != 0)) { 5346 5430 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5431 + } 5347 5432 } 5348 5433 5349 5434 /* sleq - sleq. */ ··· 5364 5449 tcg_temp_free(t0); 5365 5450 tcg_temp_free(t1); 5366 5451 tcg_temp_free(t2); 5367 - if (unlikely(Rc(ctx->opcode) != 0)) 5452 + if (unlikely(Rc(ctx->opcode) != 0)) { 5368 5453 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5454 + } 5369 5455 } 5370 5456 5371 5457 /* sliq - sliq. */ ··· 5381 5467 gen_store_spr(SPR_MQ, t1); 5382 5468 tcg_temp_free(t0); 5383 5469 tcg_temp_free(t1); 5384 - if (unlikely(Rc(ctx->opcode) != 0)) 5470 + if (unlikely(Rc(ctx->opcode) != 0)) { 5385 5471 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5472 + } 5386 5473 } 5387 5474 5388 5475 /* slliq - slliq. */ ··· 5399 5486 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 5400 5487 tcg_temp_free(t0); 5401 5488 tcg_temp_free(t1); 5402 - if (unlikely(Rc(ctx->opcode) != 0)) 5489 + if (unlikely(Rc(ctx->opcode) != 0)) { 5403 5490 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5491 + } 5404 5492 } 5405 5493 5406 5494 /* sllq - sllq. */ ··· 5428 5516 tcg_temp_free(t0); 5429 5517 tcg_temp_free(t1); 5430 5518 tcg_temp_free(t2); 5431 - if (unlikely(Rc(ctx->opcode) != 0)) 5519 + if (unlikely(Rc(ctx->opcode) != 0)) { 5432 5520 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5521 + } 5433 5522 } 5434 5523 5435 5524 /* slq - slq. */ ··· 5451 5540 gen_set_label(l1); 5452 5541 tcg_temp_free(t0); 5453 5542 tcg_temp_free(t1); 5454 - if (unlikely(Rc(ctx->opcode) != 0)) 5543 + if (unlikely(Rc(ctx->opcode) != 0)) { 5455 5544 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5545 + } 5456 5546 } 5457 5547 5458 5548 /* sraiq - sraiq. */ ··· 5474 5564 tcg_gen_sari_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], sh); 5475 5565 tcg_temp_free(t0); 5476 5566 tcg_temp_free(t1); 5477 - if (unlikely(Rc(ctx->opcode) != 0)) 5567 + if (unlikely(Rc(ctx->opcode) != 0)) { 5478 5568 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5569 + } 5479 5570 } 5480 5571 5481 5572 /* sraq - sraq. */ ··· 5507 5598 gen_set_label(l2); 5508 5599 tcg_temp_free(t1); 5509 5600 tcg_temp_free(t2); 5510 - if (unlikely(Rc(ctx->opcode) != 0)) 5601 + if (unlikely(Rc(ctx->opcode) != 0)) { 5511 5602 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5603 + } 5512 5604 } 5513 5605 5514 5606 /* sre - sre. */ ··· 5525 5617 gen_store_spr(SPR_MQ, t1); 5526 5618 tcg_temp_free(t0); 5527 5619 tcg_temp_free(t1); 5528 - if (unlikely(Rc(ctx->opcode) != 0)) 5620 + if (unlikely(Rc(ctx->opcode) != 0)) { 5529 5621 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5622 + } 5530 5623 } 5531 5624 5532 5625 /* srea - srea. */ ··· 5540 5633 tcg_gen_sar_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], t1); 5541 5634 tcg_temp_free(t0); 5542 5635 tcg_temp_free(t1); 5543 - if (unlikely(Rc(ctx->opcode) != 0)) 5636 + if (unlikely(Rc(ctx->opcode) != 0)) { 5544 5637 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5638 + } 5545 5639 } 5546 5640 5547 5641 /* sreq */ ··· 5562 5656 tcg_temp_free(t0); 5563 5657 tcg_temp_free(t1); 5564 5658 tcg_temp_free(t2); 5565 - if (unlikely(Rc(ctx->opcode) != 0)) 5659 + if (unlikely(Rc(ctx->opcode) != 0)) { 5566 5660 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5661 + } 5567 5662 } 5568 5663 5569 5664 /* sriq */ ··· 5579 5674 gen_store_spr(SPR_MQ, t1); 5580 5675 tcg_temp_free(t0); 5581 5676 tcg_temp_free(t1); 5582 - if (unlikely(Rc(ctx->opcode) != 0)) 5677 + if (unlikely(Rc(ctx->opcode) != 0)) { 5583 5678 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5679 + } 5584 5680 } 5585 5681 5586 5682 /* srliq */ ··· 5597 5693 tcg_gen_or_tl(cpu_gpr[rA(ctx->opcode)], t0, t1); 5598 5694 tcg_temp_free(t0); 5599 5695 tcg_temp_free(t1); 5600 - if (unlikely(Rc(ctx->opcode) != 0)) 5696 + if (unlikely(Rc(ctx->opcode) != 0)) { 5601 5697 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5698 + } 5602 5699 } 5603 5700 5604 5701 /* srlq */ ··· 5627 5724 tcg_temp_free(t0); 5628 5725 tcg_temp_free(t1); 5629 5726 tcg_temp_free(t2); 5630 - if (unlikely(Rc(ctx->opcode) != 0)) 5727 + if (unlikely(Rc(ctx->opcode) != 0)) { 5631 5728 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5729 + } 5632 5730 } 5633 5731 5634 5732 /* srq */ ··· 5650 5748 gen_set_label(l1); 5651 5749 tcg_temp_free(t0); 5652 5750 tcg_temp_free(t1); 5653 - if (unlikely(Rc(ctx->opcode) != 0)) 5751 + if (unlikely(Rc(ctx->opcode) != 0)) { 5654 5752 gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); 5753 + } 5655 5754 } 5656 5755 5657 5756 /* PowerPC 602 specific instructions */ ··· 5769 5868 tcg_gen_extract_tl(t0, t0, 28, 4); 5770 5869 gen_helper_load_sr(cpu_gpr[rd], cpu_env, t0); 5771 5870 tcg_temp_free(t0); 5772 - if (ra != 0 && ra != rd) 5871 + if (ra != 0 && ra != rd) { 5773 5872 tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rd]); 5873 + } 5774 5874 #endif /* defined(CONFIG_USER_ONLY) */ 5775 5875 } 5776 5876 ··· 6147 6247 /* icbt */ 6148 6248 static void gen_icbt_40x(DisasContext *ctx) 6149 6249 { 6150 - /* interpreted as no-op */ 6151 - /* XXX: specification say this is treated as a load by the MMU 6152 - * but does not generate any exception 6250 + /* 6251 + * interpreted as no-op 6252 + * XXX: specification say this is treated as a load by the MMU but 6253 + * does not generate any exception 6153 6254 */ 6154 6255 } 6155 6256 ··· 6440 6541 t0 = tcg_temp_new(); 6441 6542 gen_addr_reg_index(ctx, t0); 6442 6543 6443 - switch((ctx->opcode >> 21) & 0x3) { 6544 + switch ((ctx->opcode >> 21) & 0x3) { 6444 6545 case 0: 6445 6546 gen_helper_booke206_tlbilx0(cpu_env, t0); 6446 6547 break; ··· 6474 6575 tcg_gen_andi_tl(cpu_msr, cpu_msr, ~(1 << MSR_EE)); 6475 6576 tcg_gen_or_tl(cpu_msr, cpu_msr, t0); 6476 6577 tcg_temp_free(t0); 6477 - /* Stop translation to have a chance to raise an exception 6478 - * if we just set msr_ee to 1 6578 + /* 6579 + * Stop translation to have a chance to raise an exception if we 6580 + * just set msr_ee to 1 6479 6581 */ 6480 6582 gen_stop_exception(ctx); 6481 6583 #endif /* defined(CONFIG_USER_ONLY) */ ··· 6529 6631 /* icbt */ 6530 6632 static void gen_icbt_440(DisasContext *ctx) 6531 6633 { 6532 - /* interpreted as no-op */ 6533 - /* XXX: specification say this is treated as a load by the MMU 6534 - * but does not generate any exception 6634 + /* 6635 + * interpreted as no-op 6636 + * XXX: specification say this is treated as a load by the MMU but 6637 + * does not generate any exception 6535 6638 */ 6536 6639 } 6537 6640 ··· 6625 6728 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ 6626 6729 return; \ 6627 6730 } \ 6628 - /* Because tbegin always fails in QEMU, these user \ 6731 + /* \ 6732 + * Because tbegin always fails in QEMU, these user \ 6629 6733 * space instructions all have a simple implementation: \ 6630 6734 * \ 6631 6735 * CR[0] = 0b0 || MSR[TS] || 0b0 \ ··· 6641 6745 GEN_TM_NOOP(tabortdc); 6642 6746 GEN_TM_NOOP(tabortdci); 6643 6747 GEN_TM_NOOP(tsr); 6748 + 6644 6749 static inline void gen_cp_abort(DisasContext *ctx) 6645 6750 { 6646 - // Do Nothing 6751 + /* Do Nothing */ 6647 6752 } 6648 6753 6649 6754 #define GEN_CP_PASTE_NOOP(name) \ 6650 6755 static inline void gen_##name(DisasContext *ctx) \ 6651 6756 { \ 6652 - /* Generate invalid exception until \ 6653 - * we have an implementation of the copy \ 6654 - * paste facility \ 6757 + /* \ 6758 + * Generate invalid exception until we have an \ 6759 + * implementation of the copy paste facility \ 6655 6760 */ \ 6656 6761 gen_invalid(ctx); \ 6657 6762 } ··· 6665 6770 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); 6666 6771 return; 6667 6772 } 6668 - /* Because tbegin always fails, the tcheck implementation 6669 - * is simple: 6773 + /* 6774 + * Because tbegin always fails, the tcheck implementation is 6775 + * simple: 6670 6776 * 6671 6777 * CR[CRF] = TDOOMED || MSR[TS] || 0b0 6672 6778 * = 0b1 || 0b00 || 0b0 ··· 6678 6784 #define GEN_TM_PRIV_NOOP(name) \ 6679 6785 static inline void gen_##name(DisasContext *ctx) \ 6680 6786 { \ 6681 - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); \ 6787 + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); \ 6682 6788 } 6683 6789 6684 6790 #else ··· 6691 6797 gen_exception_err(ctx, POWERPC_EXCP_FU, FSCR_IC_TM); \ 6692 6798 return; \ 6693 6799 } \ 6694 - /* Because tbegin always fails, the implementation is \ 6800 + /* \ 6801 + * Because tbegin always fails, the implementation is \ 6695 6802 * simple: \ 6696 6803 * \ 6697 6804 * CR[0] = 0b0 || MSR[TS] || 0b0 \ ··· 6973 7080 GEN_HANDLER2(slbfee_, "slbfee.", 0x1F, 0x13, 0x1E, 0x001F0000, PPC_SEGMENT_64B), 6974 7081 #endif 6975 7082 GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), 6976 - /* XXX Those instructions will need to be handled differently for 6977 - * different ISA versions */ 7083 + /* 7084 + * XXX Those instructions will need to be handled differently for 7085 + * different ISA versions 7086 + */ 6978 7087 GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE), 6979 7088 GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE), 6980 7089 GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300), ··· 7444 7553 ); 7445 7554 #endif 7446 7555 for (i = 0; i < 32; i++) { 7447 - if ((i & (RGPL - 1)) == 0) 7556 + if ((i & (RGPL - 1)) == 0) { 7448 7557 qemu_fprintf(f, "GPR%02d", i); 7558 + } 7449 7559 qemu_fprintf(f, " %016" PRIx64, ppc_dump_gpr(env, i)); 7450 - if ((i & (RGPL - 1)) == (RGPL - 1)) 7560 + if ((i & (RGPL - 1)) == (RGPL - 1)) { 7451 7561 qemu_fprintf(f, "\n"); 7562 + } 7452 7563 } 7453 7564 qemu_fprintf(f, "CR "); 7454 7565 for (i = 0; i < 8; i++) ··· 7456 7567 qemu_fprintf(f, " ["); 7457 7568 for (i = 0; i < 8; i++) { 7458 7569 char a = '-'; 7459 - if (env->crf[i] & 0x08) 7570 + if (env->crf[i] & 0x08) { 7460 7571 a = 'L'; 7461 - else if (env->crf[i] & 0x04) 7572 + } else if (env->crf[i] & 0x04) { 7462 7573 a = 'G'; 7463 - else if (env->crf[i] & 0x02) 7574 + } else if (env->crf[i] & 0x02) { 7464 7575 a = 'E'; 7576 + } 7465 7577 qemu_fprintf(f, " %c%c", a, env->crf[i] & 0x01 ? 'O' : ' '); 7466 7578 } 7467 7579 qemu_fprintf(f, " ] RES " TARGET_FMT_lx "\n", ··· 7543 7655 } 7544 7656 #endif 7545 7657 7546 - if (env->spr_cb[SPR_LPCR].name) 7658 + if (env->spr_cb[SPR_LPCR].name) { 7547 7659 qemu_fprintf(f, " LPCR " TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); 7660 + } 7548 7661 7549 7662 switch (env->mmu_model) { 7550 7663 case POWERPC_MMU_32B: ··· 7610 7723 t3 = ind_table(handler); 7611 7724 for (op3 = 0; op3 < 32; op3++) { 7612 7725 handler = t3[op3]; 7613 - if (handler->count == 0) 7726 + if (handler->count == 0) { 7614 7727 continue; 7728 + } 7615 7729 qemu_printf("%02x %02x %02x (%02x %04d) %16s: " 7616 7730 "%016" PRIx64 " %" PRId64 "\n", 7617 7731 op1, op2, op3, op1, (op3 << 5) | op2, ··· 7619 7733 handler->count, handler->count); 7620 7734 } 7621 7735 } else { 7622 - if (handler->count == 0) 7736 + if (handler->count == 0) { 7623 7737 continue; 7738 + } 7624 7739 qemu_printf("%02x %02x (%02x %04d) %16s: " 7625 7740 "%016" PRIx64 " %" PRId64 "\n", 7626 7741 op1, op2, op1, op2, handler->oname, ··· 7628 7743 } 7629 7744 } 7630 7745 } else { 7631 - if (handler->count == 0) 7746 + if (handler->count == 0) { 7632 7747 continue; 7748 + } 7633 7749 qemu_printf("%02x (%02x ) %16s: %016" PRIx64 7634 7750 " %" PRId64 "\n", 7635 7751 op1, op1, handler->oname, ··· 7669 7785 || (env->mmu_model & POWERPC_MMU_64B); 7670 7786 7671 7787 ctx->fpu_enabled = !!msr_fp; 7672 - if ((env->flags & POWERPC_FLAG_SPE) && msr_spe) 7788 + if ((env->flags & POWERPC_FLAG_SPE) && msr_spe) { 7673 7789 ctx->spe_enabled = !!msr_spe; 7674 - else 7790 + } else { 7675 7791 ctx->spe_enabled = false; 7676 - if ((env->flags & POWERPC_FLAG_VRE) && msr_vr) 7792 + } 7793 + if ((env->flags & POWERPC_FLAG_VRE) && msr_vr) { 7677 7794 ctx->altivec_enabled = !!msr_vr; 7678 - else 7795 + } else { 7679 7796 ctx->altivec_enabled = false; 7797 + } 7680 7798 if ((env->flags & POWERPC_FLAG_VSX) && msr_vsx) { 7681 7799 ctx->vsx_enabled = !!msr_vsx; 7682 7800 } else { ··· 7690 7808 } 7691 7809 #endif 7692 7810 ctx->gtse = !!(env->spr[SPR_LPCR] & LPCR_GTSE); 7693 - if ((env->flags & POWERPC_FLAG_SE) && msr_se) 7811 + if ((env->flags & POWERPC_FLAG_SE) && msr_se) { 7694 7812 ctx->singlestep_enabled = CPU_SINGLE_STEP; 7695 - else 7813 + } else { 7696 7814 ctx->singlestep_enabled = 0; 7697 - if ((env->flags & POWERPC_FLAG_BE) && msr_be) 7815 + } 7816 + if ((env->flags & POWERPC_FLAG_BE) && msr_be) { 7698 7817 ctx->singlestep_enabled |= CPU_BRANCH_STEP; 7818 + } 7699 7819 if ((env->flags & POWERPC_FLAG_DE) && msr_de) { 7700 7820 ctx->singlestep_enabled = 0; 7701 7821 target_ulong dbcr0 = env->spr[SPR_BOOKE_DBCR0]; ··· 7710 7830 if (unlikely(ctx->base.singlestep_enabled)) { 7711 7831 ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP; 7712 7832 } 7713 - #if defined (DO_SINGLE_STEP) && 0 7833 + #if defined(DO_SINGLE_STEP) && 0 7714 7834 /* Single step trace mode */ 7715 7835 msr_se = 1; 7716 7836 #endif ··· 7735 7855 7736 7856 gen_debug_exception(ctx); 7737 7857 dcbase->is_jmp = DISAS_NORETURN; 7738 - /* The address covered by the breakpoint must be included in 7739 - [tb->pc, tb->pc + tb->size) in order to for it to be 7740 - properly cleared -- thus we increment the PC here so that 7741 - the logic setting tb->size below does the right thing. */ 7858 + /* 7859 + * The address covered by the breakpoint must be included in 7860 + * [tb->pc, tb->pc + tb->size) in order to for it to be properly 7861 + * cleared -- thus we increment the PC here so that the logic 7862 + * setting tb->size below does the right thing. 7863 + */ 7742 7864 ctx->base.pc_next += 4; 7743 7865 return true; 7744 7866 }
+32 -20
target/ppc/translate/fp-impl.inc.c
··· 585 585 shift = 4 * nibble; 586 586 tcg_gen_shri_tl(tmp, cpu_fpscr, shift); 587 587 tcg_gen_trunc_tl_i32(cpu_crf[crfD(ctx->opcode)], tmp); 588 - tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 0xf); 588 + tcg_gen_andi_i32(cpu_crf[crfD(ctx->opcode)], cpu_crf[crfD(ctx->opcode)], 589 + 0xf); 589 590 tcg_temp_free(tmp); 590 591 tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr); 591 592 /* Only the exception bits (including FX) should be cleared if read */ 592 - tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, ~((0xF << shift) & FP_EX_CLEAR_BITS)); 593 + tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, 594 + ~((0xF << shift) & FP_EX_CLEAR_BITS)); 593 595 /* FEX and VX need to be updated, so don't set fpscr directly */ 594 596 tmask = tcg_const_i32(1 << nibble); 595 597 gen_helper_store_fpscr(cpu_env, tnew_fpscr, tmask); ··· 735 737 736 738 /*** Floating-point load ***/ 737 739 #define GEN_LDF(name, ldop, opc, type) \ 738 - static void glue(gen_, name)(DisasContext *ctx) \ 740 + static void glue(gen_, name)(DisasContext *ctx) \ 739 741 { \ 740 742 TCGv EA; \ 741 743 TCGv_i64 t0; \ ··· 754 756 } 755 757 756 758 #define GEN_LDUF(name, ldop, opc, type) \ 757 - static void glue(gen_, name##u)(DisasContext *ctx) \ 759 + static void glue(gen_, name##u)(DisasContext *ctx) \ 758 760 { \ 759 761 TCGv EA; \ 760 762 TCGv_i64 t0; \ ··· 778 780 } 779 781 780 782 #define GEN_LDUXF(name, ldop, opc, type) \ 781 - static void glue(gen_, name##ux)(DisasContext *ctx) \ 783 + static void glue(gen_, name##ux)(DisasContext *ctx) \ 782 784 { \ 783 785 TCGv EA; \ 784 786 TCGv_i64 t0; \ ··· 802 804 } 803 805 804 806 #define GEN_LDXF(name, ldop, opc2, opc3, type) \ 805 - static void glue(gen_, name##x)(DisasContext *ctx) \ 807 + static void glue(gen_, name##x)(DisasContext *ctx) \ 806 808 { \ 807 809 TCGv EA; \ 808 810 TCGv_i64 t0; \ ··· 872 874 EA = tcg_temp_new(); 873 875 gen_addr_imm_index(ctx, EA, 0); 874 876 t0 = tcg_temp_new_i64(); 875 - /* We only need to swap high and low halves. gen_qemu_ld64_i64 does 876 - necessary 64-bit byteswap already. */ 877 + /* 878 + * We only need to swap high and low halves. gen_qemu_ld64_i64 879 + * does necessary 64-bit byteswap already. 880 + */ 877 881 if (unlikely(ctx->le_mode)) { 878 882 gen_qemu_ld64_i64(ctx, t0, EA); 879 883 set_fpr(rD(ctx->opcode) + 1, t0); ··· 904 908 EA = tcg_temp_new(); 905 909 gen_addr_reg_index(ctx, EA); 906 910 t0 = tcg_temp_new_i64(); 907 - /* We only need to swap high and low halves. gen_qemu_ld64_i64 does 908 - necessary 64-bit byteswap already. */ 911 + /* 912 + * We only need to swap high and low halves. gen_qemu_ld64_i64 913 + * does necessary 64-bit byteswap already. 914 + */ 909 915 if (unlikely(ctx->le_mode)) { 910 916 gen_qemu_ld64_i64(ctx, t0, EA); 911 917 set_fpr(rD(ctx->opcode) + 1, t0); ··· 966 972 } 967 973 /*** Floating-point store ***/ 968 974 #define GEN_STF(name, stop, opc, type) \ 969 - static void glue(gen_, name)(DisasContext *ctx) \ 975 + static void glue(gen_, name)(DisasContext *ctx) \ 970 976 { \ 971 977 TCGv EA; \ 972 978 TCGv_i64 t0; \ ··· 985 991 } 986 992 987 993 #define GEN_STUF(name, stop, opc, type) \ 988 - static void glue(gen_, name##u)(DisasContext *ctx) \ 994 + static void glue(gen_, name##u)(DisasContext *ctx) \ 989 995 { \ 990 996 TCGv EA; \ 991 997 TCGv_i64 t0; \ ··· 1009 1015 } 1010 1016 1011 1017 #define GEN_STUXF(name, stop, opc, type) \ 1012 - static void glue(gen_, name##ux)(DisasContext *ctx) \ 1018 + static void glue(gen_, name##ux)(DisasContext *ctx) \ 1013 1019 { \ 1014 1020 TCGv EA; \ 1015 1021 TCGv_i64 t0; \ ··· 1033 1039 } 1034 1040 1035 1041 #define GEN_STXF(name, stop, opc2, opc3, type) \ 1036 - static void glue(gen_, name##x)(DisasContext *ctx) \ 1042 + static void glue(gen_, name##x)(DisasContext *ctx) \ 1037 1043 { \ 1038 1044 TCGv EA; \ 1039 1045 TCGv_i64 t0; \ ··· 1103 1109 EA = tcg_temp_new(); 1104 1110 t0 = tcg_temp_new_i64(); 1105 1111 gen_addr_imm_index(ctx, EA, 0); 1106 - /* We only need to swap high and low halves. gen_qemu_st64_i64 does 1107 - necessary 64-bit byteswap already. */ 1112 + /* 1113 + * We only need to swap high and low halves. gen_qemu_st64_i64 1114 + * does necessary 64-bit byteswap already. 1115 + */ 1108 1116 if (unlikely(ctx->le_mode)) { 1109 1117 get_fpr(t0, rD(ctx->opcode) + 1); 1110 1118 gen_qemu_st64_i64(ctx, t0, EA); ··· 1135 1143 EA = tcg_temp_new(); 1136 1144 t0 = tcg_temp_new_i64(); 1137 1145 gen_addr_reg_index(ctx, EA); 1138 - /* We only need to swap high and low halves. gen_qemu_st64_i64 does 1139 - necessary 64-bit byteswap already. */ 1146 + /* 1147 + * We only need to swap high and low halves. gen_qemu_st64_i64 1148 + * does necessary 64-bit byteswap already. 1149 + */ 1140 1150 if (unlikely(ctx->le_mode)) { 1141 1151 get_fpr(t0, rD(ctx->opcode) + 1); 1142 1152 gen_qemu_st64_i64(ctx, t0, EA); ··· 1204 1214 gen_addr_add(ctx, t1, t0, 8); 1205 1215 gen_qemu_ld64_i64(ctx, t2, t1); 1206 1216 set_fpr((rd + 1) % 32, t2); 1207 - if (ra != 0) 1217 + if (ra != 0) { 1208 1218 tcg_gen_mov_tl(cpu_gpr[ra], t0); 1219 + } 1209 1220 tcg_temp_free(t0); 1210 1221 tcg_temp_free(t1); 1211 1222 tcg_temp_free_i64(t2); ··· 1229 1240 gen_qemu_ld64_i64(ctx, t2, t1); 1230 1241 set_fpr((rd + 1) % 32, t2); 1231 1242 tcg_temp_free(t1); 1232 - if (ra != 0) 1243 + if (ra != 0) { 1233 1244 tcg_gen_mov_tl(cpu_gpr[ra], t0); 1245 + } 1234 1246 tcg_temp_free(t0); 1235 1247 tcg_temp_free_i64(t2); 1236 1248 }
+9 -5
target/ppc/translate/spe-impl.inc.c
··· 18 18 TCGv_i64 tmp = tcg_temp_new_i64(); 19 19 20 20 /* tmp := rA_lo + rA_hi << 32 */ 21 - tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)], cpu_gprh[rA(ctx->opcode)]); 21 + tcg_gen_concat_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)], 22 + cpu_gprh[rA(ctx->opcode)]); 22 23 23 24 /* spe_acc := tmp */ 24 25 tcg_gen_st_i64(tmp, cpu_env, offsetof(CPUPPCState, spe_acc)); ··· 780 781 } 781 782 782 783 #define GEN_SPEOP_LDST(name, opc2, sh) \ 783 - static void glue(gen_, name)(DisasContext *ctx) \ 784 + static void glue(gen_, name)(DisasContext *ctx) \ 784 785 { \ 785 786 TCGv t0; \ 786 787 if (unlikely(!ctx->spe_enabled)) { \ ··· 1089 1090 gen_exception(ctx, POWERPC_EXCP_SPEU); 1090 1091 return; 1091 1092 } 1092 - tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], (target_long)~0x80000000LL); 1093 + tcg_gen_andi_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1094 + (target_long)~0x80000000LL); 1093 1095 } 1094 1096 static inline void gen_efsnabs(DisasContext *ctx) 1095 1097 { ··· 1097 1099 gen_exception(ctx, POWERPC_EXCP_SPEU); 1098 1100 return; 1099 1101 } 1100 - tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); 1102 + tcg_gen_ori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1103 + 0x80000000); 1101 1104 } 1102 1105 static inline void gen_efsneg(DisasContext *ctx) 1103 1106 { ··· 1105 1108 gen_exception(ctx, POWERPC_EXCP_SPEU); 1106 1109 return; 1107 1110 } 1108 - tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 0x80000000); 1111 + tcg_gen_xori_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 1112 + 0x80000000); 1109 1113 } 1110 1114 1111 1115 /* Conversion */
+15 -11
target/ppc/translate/vmx-impl.inc.c
··· 15 15 } 16 16 17 17 #define GEN_VR_LDX(name, opc2, opc3) \ 18 - static void glue(gen_, name)(DisasContext *ctx) \ 18 + static void glue(gen_, name)(DisasContext *ctx) \ 19 19 { \ 20 20 TCGv EA; \ 21 21 TCGv_i64 avr; \ ··· 28 28 EA = tcg_temp_new(); \ 29 29 gen_addr_reg_index(ctx, EA); \ 30 30 tcg_gen_andi_tl(EA, EA, ~0xf); \ 31 - /* We only need to swap high and low halves. gen_qemu_ld64_i64 does \ 32 - necessary 64-bit byteswap already. */ \ 31 + /* \ 32 + * We only need to swap high and low halves. gen_qemu_ld64_i64 \ 33 + * does necessary 64-bit byteswap already. \ 34 + */ \ 33 35 if (ctx->le_mode) { \ 34 36 gen_qemu_ld64_i64(ctx, avr, EA); \ 35 37 set_avr64(rD(ctx->opcode), avr, false); \ ··· 61 63 EA = tcg_temp_new(); \ 62 64 gen_addr_reg_index(ctx, EA); \ 63 65 tcg_gen_andi_tl(EA, EA, ~0xf); \ 64 - /* We only need to swap high and low halves. gen_qemu_st64_i64 does \ 65 - necessary 64-bit byteswap already. */ \ 66 + /* \ 67 + * We only need to swap high and low halves. gen_qemu_st64_i64 \ 68 + * does necessary 64-bit byteswap already. \ 69 + */ \ 66 70 if (ctx->le_mode) { \ 67 71 get_avr64(avr, rD(ctx->opcode), false); \ 68 72 gen_qemu_st64_i64(ctx, avr, EA); \ ··· 296 300 GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21); 297 301 298 302 #define GEN_VXFORM(name, opc2, opc3) \ 299 - static void glue(gen_, name)(DisasContext *ctx) \ 303 + static void glue(gen_, name)(DisasContext *ctx) \ 300 304 { \ 301 305 TCGv_ptr ra, rb, rd; \ 302 306 if (unlikely(!ctx->altivec_enabled)) { \ ··· 306 310 ra = gen_avr_ptr(rA(ctx->opcode)); \ 307 311 rb = gen_avr_ptr(rB(ctx->opcode)); \ 308 312 rd = gen_avr_ptr(rD(ctx->opcode)); \ 309 - gen_helper_##name (rd, ra, rb); \ 313 + gen_helper_##name(rd, ra, rb); \ 310 314 tcg_temp_free_ptr(ra); \ 311 315 tcg_temp_free_ptr(rb); \ 312 316 tcg_temp_free_ptr(rd); \ ··· 758 762 GEN_VXFORM_DUPI(vspltisw, tcg_gen_gvec_dup32i, 6, 14); 759 763 760 764 #define GEN_VXFORM_NOA(name, opc2, opc3) \ 761 - static void glue(gen_, name)(DisasContext *ctx) \ 765 + static void glue(gen_, name)(DisasContext *ctx) \ 762 766 { \ 763 767 TCGv_ptr rb, rd; \ 764 768 if (unlikely(!ctx->altivec_enabled)) { \ ··· 767 771 } \ 768 772 rb = gen_avr_ptr(rB(ctx->opcode)); \ 769 773 rd = gen_avr_ptr(rD(ctx->opcode)); \ 770 - gen_helper_##name (rd, rb); \ 774 + gen_helper_##name(rd, rb); \ 771 775 tcg_temp_free_ptr(rb); \ 772 - tcg_temp_free_ptr(rd); \ 776 + tcg_temp_free_ptr(rd); \ 773 777 } 774 778 775 779 #define GEN_VXFORM_NOA_ENV(name, opc2, opc3) \ ··· 943 947 rb = gen_avr_ptr(rB(ctx->opcode)); 944 948 rd = gen_avr_ptr(rD(ctx->opcode)); 945 949 sh = tcg_const_i32(VSH(ctx->opcode)); 946 - gen_helper_vsldoi (rd, ra, rb, sh); 950 + gen_helper_vsldoi(rd, ra, rb, sh); 947 951 tcg_temp_free_ptr(ra); 948 952 tcg_temp_free_ptr(rb); 949 953 tcg_temp_free_ptr(rd);
+8 -7
target/ppc/translate/vsx-impl.inc.c
··· 751 751 #define SGN_MASK_SP 0x8000000080000000ull 752 752 753 753 #define VSX_SCALAR_MOVE(name, op, sgn_mask) \ 754 - static void glue(gen_, name)(DisasContext * ctx) \ 754 + static void glue(gen_, name)(DisasContext *ctx) \ 755 755 { \ 756 756 TCGv_i64 xb, sgm; \ 757 757 if (unlikely(!ctx->vsx_enabled)) { \ ··· 848 848 VSX_SCALAR_MOVE_QP(xscpsgnqp, OP_CPSGN, SGN_MASK_DP) 849 849 850 850 #define VSX_VECTOR_MOVE(name, op, sgn_mask) \ 851 - static void glue(gen_, name)(DisasContext * ctx) \ 851 + static void glue(gen_, name)(DisasContext *ctx) \ 852 852 { \ 853 853 TCGv_i64 xbh, xbl, sgm; \ 854 854 if (unlikely(!ctx->vsx_enabled)) { \ ··· 910 910 VSX_VECTOR_MOVE(xvcpsgnsp, OP_CPSGN, SGN_MASK_SP) 911 911 912 912 #define GEN_VSX_HELPER_2(name, op1, op2, inval, type) \ 913 - static void gen_##name(DisasContext * ctx) \ 913 + static void gen_##name(DisasContext *ctx) \ 914 914 { \ 915 915 TCGv_i32 opc; \ 916 916 if (unlikely(!ctx->vsx_enabled)) { \ ··· 923 923 } 924 924 925 925 #define GEN_VSX_HELPER_XT_XB_ENV(name, op1, op2, inval, type) \ 926 - static void gen_##name(DisasContext * ctx) \ 926 + static void gen_##name(DisasContext *ctx) \ 927 927 { \ 928 928 TCGv_i64 t0; \ 929 929 TCGv_i64 t1; \ ··· 1230 1230 } 1231 1231 1232 1232 #define VSX_LOGICAL(name, vece, tcg_op) \ 1233 - static void glue(gen_, name)(DisasContext * ctx) \ 1233 + static void glue(gen_, name)(DisasContext *ctx) \ 1234 1234 { \ 1235 1235 if (unlikely(!ctx->vsx_enabled)) { \ 1236 1236 gen_exception(ctx, POWERPC_EXCP_VSXU); \ ··· 1251 1251 VSX_LOGICAL(xxlorc, MO_64, tcg_gen_gvec_orc) 1252 1252 1253 1253 #define VSX_XXMRG(name, high) \ 1254 - static void glue(gen_, name)(DisasContext * ctx) \ 1254 + static void glue(gen_, name)(DisasContext *ctx) \ 1255 1255 { \ 1256 1256 TCGv_i64 a0, a1, b0, b1, tmp; \ 1257 1257 if (unlikely(!ctx->vsx_enabled)) { \ ··· 1444 1444 xb = tcg_const_tl(xB(ctx->opcode)); \ 1445 1445 t0 = tcg_temp_new_i32(); \ 1446 1446 t1 = tcg_temp_new_i64(); \ 1447 - /* uimm > 15 out of bound and for \ 1447 + /* \ 1448 + * uimm > 15 out of bound and for \ 1448 1449 * uimm > 12 handle as per hardware in helper \ 1449 1450 */ \ 1450 1451 if (uimm > 15) { \
+148 -95
target/ppc/translate_init.inc.c
··· 41 41 #include "fpu/softfloat.h" 42 42 #include "qapi/qapi-commands-target.h" 43 43 44 - //#define PPC_DUMP_CPU 45 - //#define PPC_DEBUG_SPR 46 - //#define PPC_DUMP_SPR_ACCESSES 44 + /* #define PPC_DUMP_CPU */ 45 + /* #define PPC_DEBUG_SPR */ 46 + /* #define PPC_DUMP_SPR_ACCESSES */ 47 47 /* #define USE_APPLE_GDB */ 48 48 49 - /* Generic callbacks: 49 + /* 50 + * Generic callbacks: 50 51 * do nothing but store/retrieve spr value 51 52 */ 52 53 static void spr_load_dump_spr(int sprn) ··· 58 59 #endif 59 60 } 60 61 61 - static void spr_read_generic (DisasContext *ctx, int gprn, int sprn) 62 + static void spr_read_generic(DisasContext *ctx, int gprn, int sprn) 62 63 { 63 64 gen_load_spr(cpu_gpr[gprn], sprn); 64 65 spr_load_dump_spr(sprn); ··· 230 231 } 231 232 } 232 233 233 - __attribute__ (( unused )) 234 + ATTRIBUTE_UNUSED 234 235 static void spr_read_atbl(DisasContext *ctx, int gprn, int sprn) 235 236 { 236 237 gen_helper_load_atbl(cpu_gpr[gprn], cpu_env); 237 238 } 238 239 239 - __attribute__ (( unused )) 240 + ATTRIBUTE_UNUSED 240 241 static void spr_read_atbu(DisasContext *ctx, int gprn, int sprn) 241 242 { 242 243 gen_helper_load_atbu(cpu_gpr[gprn], cpu_env); ··· 267 268 } 268 269 } 269 270 270 - __attribute__ (( unused )) 271 + ATTRIBUTE_UNUSED 271 272 static void spr_write_atbl(DisasContext *ctx, int sprn, int gprn) 272 273 { 273 274 gen_helper_store_atbl(cpu_env, cpu_gpr[gprn]); 274 275 } 275 276 276 - __attribute__ (( unused )) 277 + ATTRIBUTE_UNUSED 277 278 static void spr_write_atbu(DisasContext *ctx, int sprn, int gprn) 278 279 { 279 280 gen_helper_store_atbu(cpu_env, cpu_gpr[gprn]); 280 281 } 281 282 282 283 #if defined(TARGET_PPC64) 283 - __attribute__ (( unused )) 284 + ATTRIBUTE_UNUSED 284 285 static void spr_read_purr(DisasContext *ctx, int gprn, int sprn) 285 286 { 286 287 gen_helper_load_purr(cpu_gpr[gprn], cpu_env); ··· 319 320 /* IBAT0L...IBAT7L */ 320 321 static void spr_read_ibat(DisasContext *ctx, int gprn, int sprn) 321 322 { 322 - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); 323 + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 324 + offsetof(CPUPPCState, 325 + IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); 323 326 } 324 327 325 328 static void spr_read_ibat_h(DisasContext *ctx, int gprn, int sprn) 326 329 { 327 - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4])); 330 + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 331 + offsetof(CPUPPCState, 332 + IBAT[sprn & 1][((sprn - SPR_IBAT4U) / 2) + 4])); 328 333 } 329 334 330 335 static void spr_write_ibatu(DisasContext *ctx, int sprn, int gprn) ··· 359 364 /* DBAT0L...DBAT7L */ 360 365 static void spr_read_dbat(DisasContext *ctx, int gprn, int sprn) 361 366 { 362 - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2])); 367 + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 368 + offsetof(CPUPPCState, 369 + DBAT[sprn & 1][(sprn - SPR_DBAT0U) / 2])); 363 370 } 364 371 365 372 static void spr_read_dbat_h(DisasContext *ctx, int gprn, int sprn) 366 373 { 367 - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4])); 374 + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 375 + offsetof(CPUPPCState, 376 + DBAT[sprn & 1][((sprn - SPR_DBAT4U) / 2) + 4])); 368 377 } 369 378 370 379 static void spr_write_dbatu(DisasContext *ctx, int sprn, int gprn) ··· 473 482 #if !defined(CONFIG_USER_ONLY) 474 483 static void spr_read_601_ubat(DisasContext *ctx, int gprn, int sprn) 475 484 { 476 - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); 485 + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 486 + offsetof(CPUPPCState, 487 + IBAT[sprn & 1][(sprn - SPR_IBAT0U) / 2])); 477 488 } 478 489 479 490 static void spr_write_601_ubatu(DisasContext *ctx, int sprn, int gprn) ··· 532 543 #if !defined(CONFIG_USER_ONLY) 533 544 static void spr_read_403_pbr(DisasContext *ctx, int gprn, int sprn) 534 545 { 535 - tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1])); 546 + tcg_gen_ld_tl(cpu_gpr[gprn], cpu_env, 547 + offsetof(CPUPPCState, pb[sprn - SPR_403_PBL1])); 536 548 } 537 549 538 550 static void spr_write_403_pbr(DisasContext *ctx, int sprn, int gprn) ··· 661 673 662 674 static inline void _spr_register(CPUPPCState *env, int num, 663 675 const char *name, 664 - void (*uea_read)(DisasContext *ctx, int gprn, int sprn), 665 - void (*uea_write)(DisasContext *ctx, int sprn, int gprn), 676 + void (*uea_read)(DisasContext *ctx, 677 + int gprn, int sprn), 678 + void (*uea_write)(DisasContext *ctx, 679 + int sprn, int gprn), 666 680 #if !defined(CONFIG_USER_ONLY) 667 681 668 - void (*oea_read)(DisasContext *ctx, int gprn, int sprn), 669 - void (*oea_write)(DisasContext *ctx, int sprn, int gprn), 670 - void (*hea_read)(DisasContext *opaque, int gprn, int sprn), 671 - void (*hea_write)(DisasContext *opaque, int sprn, int gprn), 682 + void (*oea_read)(DisasContext *ctx, 683 + int gprn, int sprn), 684 + void (*oea_write)(DisasContext *ctx, 685 + int sprn, int gprn), 686 + void (*hea_read)(DisasContext *opaque, 687 + int gprn, int sprn), 688 + void (*hea_write)(DisasContext *opaque, 689 + int sprn, int gprn), 672 690 #endif 673 691 #if defined(CONFIG_KVM) 674 692 uint64_t one_reg_id, ··· 678 696 ppc_spr_t *spr; 679 697 680 698 spr = &env->spr_cb[num]; 681 - if (spr->name != NULL ||env-> spr[num] != 0x00000000 || 699 + if (spr->name != NULL || env->spr[num] != 0x00000000 || 682 700 #if !defined(CONFIG_USER_ONLY) 683 701 spr->oea_read != NULL || spr->oea_write != NULL || 684 702 #endif ··· 774 792 { 775 793 #ifndef CONFIG_USER_ONLY 776 794 if (env->has_hv_mode) { 777 - /* SDR1 is a hypervisor resource on CPUs which have a 778 - * hypervisor mode */ 795 + /* 796 + * SDR1 is a hypervisor resource on CPUs which have a 797 + * hypervisor mode 798 + */ 779 799 spr_register_hv(env, SPR_SDR1, "SDR1", 780 800 SPR_NOACCESS, SPR_NOACCESS, 781 801 SPR_NOACCESS, SPR_NOACCESS, ··· 1123 1143 TCGv t1 = tcg_temp_new(); 1124 1144 TCGv t2 = tcg_temp_new(); 1125 1145 1126 - /* Note, the HV=1 PR=0 case is handled earlier by simply using 1146 + /* 1147 + * Note, the HV=1 PR=0 case is handled earlier by simply using 1127 1148 * spr_write_generic for HV mode in the SPR table 1128 1149 */ 1129 1150 ··· 1157 1178 TCGv t1 = tcg_temp_new(); 1158 1179 TCGv t2 = tcg_temp_new(); 1159 1180 1160 - /* Note, the HV=1 case is handled earlier by simply using 1181 + /* 1182 + * Note, the HV=1 case is handled earlier by simply using 1161 1183 * spr_write_generic for HV mode in the SPR table 1162 1184 */ 1163 1185 ··· 1187 1209 TCGv t1 = tcg_temp_new(); 1188 1210 TCGv t2 = tcg_temp_new(); 1189 1211 1190 - /* Note, the HV=1 case is handled earlier by simply using 1212 + /* 1213 + * Note, the HV=1 case is handled earlier by simply using 1191 1214 * spr_write_generic for HV mode in the SPR table 1192 1215 */ 1193 1216 ··· 1215 1238 static void gen_spr_amr(CPUPPCState *env) 1216 1239 { 1217 1240 #ifndef CONFIG_USER_ONLY 1218 - /* Virtual Page Class Key protection */ 1219 - /* The AMR is accessible either via SPR 13 or SPR 29. 13 is 1241 + /* 1242 + * Virtual Page Class Key protection 1243 + * 1244 + * The AMR is accessible either via SPR 13 or SPR 29. 13 is 1220 1245 * userspace accessible, 29 is privileged. So we only need to set 1221 - * the kvm ONE_REG id on one of them, we use 29 */ 1246 + * the kvm ONE_REG id on one of them, we use 29 1247 + */ 1222 1248 spr_register(env, SPR_UAMR, "UAMR", 1223 1249 &spr_read_generic, &spr_write_amr, 1224 1250 &spr_read_generic, &spr_write_amr, ··· 1902 1928 /* TLB assist registers */ 1903 1929 /* XXX : not implemented */ 1904 1930 for (i = 0; i < 8; i++) { 1905 - void (*uea_write)(DisasContext *ctx, int sprn, int gprn) = &spr_write_generic32; 1931 + void (*uea_write)(DisasContext *ctx, int sprn, int gprn) = 1932 + &spr_write_generic32; 1906 1933 if (i == 2 && (mas_mask & (1 << i)) && (env->insns_flags & PPC_64B)) { 1907 1934 uea_write = &spr_write_generic; 1908 1935 } ··· 2798 2825 0x00000000); 2799 2826 } 2800 2827 2801 - // XXX: TODO 2802 2828 /* 2803 2829 * AMR => SPR 29 (Power 2.04) 2804 2830 * CTRL => SPR 136 (Power 2.04) ··· 3344 3370 3345 3371 static int check_pow_hid0(CPUPPCState *env) 3346 3372 { 3347 - if (env->spr[SPR_HID0] & 0x00E00000) 3373 + if (env->spr[SPR_HID0] & 0x00E00000) { 3348 3374 return 1; 3375 + } 3349 3376 3350 3377 return 0; 3351 3378 } 3352 3379 3353 3380 static int check_pow_hid0_74xx(CPUPPCState *env) 3354 3381 { 3355 - if (env->spr[SPR_HID0] & 0x00600000) 3382 + if (env->spr[SPR_HID0] & 0x00600000) { 3356 3383 return 1; 3384 + } 3357 3385 3358 3386 return 0; 3359 3387 } ··· 4602 4630 dc->desc = "e200 core"; 4603 4631 pcc->init_proc = init_proc_e200; 4604 4632 pcc->check_pow = check_pow_hid0; 4605 - /* XXX: unimplemented instructions: 4633 + /* 4634 + * XXX: unimplemented instructions: 4606 4635 * dcblc 4607 4636 * dcbtlst 4608 4637 * dcbtstls ··· 4797 4826 * gen_spr_BookE(env, 0x0000000F0000FD7FULL); 4798 4827 */ 4799 4828 switch (version) { 4800 - case fsl_e500v1: 4801 - case fsl_e500v2: 4802 - default: 4803 - ivor_mask = 0x0000000F0000FFFFULL; 4804 - break; 4805 - case fsl_e500mc: 4806 - case fsl_e5500: 4807 - ivor_mask = 0x000003FE0000FFFFULL; 4808 - break; 4809 - case fsl_e6500: 4810 - ivor_mask = 0x000003FF0000FFFFULL; 4811 - break; 4829 + case fsl_e500v1: 4830 + case fsl_e500v2: 4831 + default: 4832 + ivor_mask = 0x0000000F0000FFFFULL; 4833 + break; 4834 + case fsl_e500mc: 4835 + case fsl_e5500: 4836 + ivor_mask = 0x000003FE0000FFFFULL; 4837 + break; 4838 + case fsl_e6500: 4839 + ivor_mask = 0x000003FF0000FFFFULL; 4840 + break; 4812 4841 } 4813 4842 gen_spr_BookE(env, ivor_mask); 4814 4843 gen_spr_usprg3(env); ··· 4848 4877 tlbncfg[1] = 0x40028040; 4849 4878 break; 4850 4879 default: 4851 - cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]); 4880 + cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", 4881 + env->spr[SPR_PVR]); 4852 4882 } 4853 4883 #endif 4854 4884 /* Cache sizes */ ··· 4872 4902 l1cfg1 |= 0x0B83820; 4873 4903 break; 4874 4904 default: 4875 - cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", env->spr[SPR_PVR]); 4905 + cpu_abort(CPU(cpu), "Unknown CPU: " TARGET_FMT_lx "\n", 4906 + env->spr[SPR_PVR]); 4876 4907 } 4877 4908 gen_spr_BookE206(env, 0x000000DF, tlbncfg, mmucfg); 4878 4909 /* XXX : not implemented */ ··· 5252 5283 0x00000000); 5253 5284 /* Memory management */ 5254 5285 init_excp_601(env); 5255 - /* XXX: beware that dcache line size is 64 5286 + /* 5287 + * XXX: beware that dcache line size is 64 5256 5288 * but dcbz uses 32 bytes "sectors" 5257 5289 * XXX: this breaks clcs instruction ! 5258 5290 */ ··· 5789 5821 0x00000000); 5790 5822 /* Memory management */ 5791 5823 gen_low_BATs(env); 5792 - /* XXX: high BATs are also present but are known to be bugged on 5824 + /* 5825 + * XXX: high BATs are also present but are known to be bugged on 5793 5826 * die version 1.x 5794 5827 */ 5795 5828 init_excp_7x0(env); ··· 5971 6004 dc->desc = "PowerPC 750 CL"; 5972 6005 pcc->init_proc = init_proc_750cl; 5973 6006 pcc->check_pow = check_pow_hid0; 5974 - /* XXX: not implemented: 6007 + /* 6008 + * XXX: not implemented: 5975 6009 * cache lock instructions: 5976 6010 * dcbz_l 5977 6011 * floating point paired instructions ··· 7569 7603 &spr_read_generic, &spr_write_generic, 7570 7604 KVM_REG_PPC_VRSAVE, 0x00000000); 7571 7605 7572 - /* Can't find information on what this should be on reset. This 7573 - * value is the one used by 74xx processors. */ 7606 + /* 7607 + * Can't find information on what this should be on reset. This 7608 + * value is the one used by 74xx processors. 7609 + */ 7574 7610 vscr_init(env, 0x00010000); 7575 7611 } 7576 7612 ··· 8975 9011 8976 9012 env->irq_inputs = NULL; 8977 9013 /* Set all exception vectors to an invalid address */ 8978 - for (i = 0; i < POWERPC_EXCP_NB; i++) 9014 + for (i = 0; i < POWERPC_EXCP_NB; i++) { 8979 9015 env->excp_vectors[i] = (target_ulong)(-1ULL); 9016 + } 8980 9017 env->ivor_mask = 0x00000000; 8981 9018 env->ivpr_mask = 0x00000000; 8982 9019 /* Default MMU definitions */ ··· 9108 9145 #if !defined(CONFIG_USER_ONLY) 9109 9146 if (env->nb_tlb != 0) { 9110 9147 int nb_tlb = env->nb_tlb; 9111 - if (env->id_tlbs != 0) 9148 + if (env->id_tlbs != 0) { 9112 9149 nb_tlb *= 2; 9150 + } 9113 9151 switch (env->tlb_type) { 9114 9152 case TLB_6XX: 9115 9153 env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb); ··· 9201 9239 { 9202 9240 int i; 9203 9241 9204 - for (i = 0; i < len; i++) 9242 + for (i = 0; i < len; i++) { 9205 9243 table[i] = &invalid_handler; 9244 + } 9206 9245 } 9207 9246 9208 9247 static int create_new_table(opc_handler_t **table, unsigned char idx) ··· 9219 9258 static int insert_in_table(opc_handler_t **table, unsigned char idx, 9220 9259 opc_handler_t *handler) 9221 9260 { 9222 - if (table[idx] != &invalid_handler) 9261 + if (table[idx] != &invalid_handler) { 9223 9262 return -1; 9263 + } 9224 9264 table[idx] = handler; 9225 9265 9226 9266 return 0; ··· 9341 9381 } 9342 9382 } else { 9343 9383 if (register_dblind_insn(ppc_opcodes, insn->opc1, insn->opc2, 9344 - insn->opc3, &insn->handler) < 0) 9384 + insn->opc3, &insn->handler) < 0) { 9345 9385 return -1; 9386 + } 9346 9387 } 9347 9388 } else { 9348 9389 if (register_ind_insn(ppc_opcodes, insn->opc1, 9349 - insn->opc2, &insn->handler) < 0) 9390 + insn->opc2, &insn->handler) < 0) { 9350 9391 return -1; 9392 + } 9351 9393 } 9352 9394 } else { 9353 - if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) 9395 + if (register_direct_insn(ppc_opcodes, insn->opc1, &insn->handler) < 0) { 9354 9396 return -1; 9397 + } 9355 9398 } 9356 9399 9357 9400 return 0; ··· 9363 9406 9364 9407 for (i = 0, count = 0; i < len; i++) { 9365 9408 /* Consistency fixup */ 9366 - if (table[i] == NULL) 9409 + if (table[i] == NULL) { 9367 9410 table[i] = &invalid_handler; 9411 + } 9368 9412 if (table[i] != &invalid_handler) { 9369 9413 if (is_indirect_opcode(table[i])) { 9370 9414 tmp = test_opcode_table(ind_table(table[i]), ··· 9386 9430 9387 9431 static void fix_opcode_tables(opc_handler_t **ppc_opcodes) 9388 9432 { 9389 - if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) 9433 + if (test_opcode_table(ppc_opcodes, PPC_CPU_OPCODES_LEN) == 0) { 9390 9434 printf("*** WARNING: no opcode defined !\n"); 9435 + } 9391 9436 } 9392 9437 9393 9438 /*****************************************************************************/ ··· 9726 9771 { 9727 9772 CPUPPCState *env = &cpu->env; 9728 9773 9729 - /* TCG doesn't (yet) emulate some groups of instructions that 9730 - * are implemented on some otherwise supported CPUs (e.g. VSX 9731 - * and decimal floating point instructions on POWER7). We 9732 - * remove unsupported instruction groups from the cpu state's 9733 - * instruction masks and hope the guest can cope. For at 9734 - * least the pseries machine, the unavailability of these 9735 - * instructions can be advertised to the guest via the device 9736 - * tree. */ 9774 + /* 9775 + * TCG doesn't (yet) emulate some groups of instructions that are 9776 + * implemented on some otherwise supported CPUs (e.g. VSX and 9777 + * decimal floating point instructions on POWER7). We remove 9778 + * unsupported instruction groups from the cpu state's instruction 9779 + * masks and hope the guest can cope. For at least the pseries 9780 + * machine, the unavailability of these instructions can be 9781 + * advertised to the guest via the device tree. 9782 + */ 9737 9783 if ((env->insns_flags & ~PPC_TCG_INSNS) 9738 9784 || (env->insns_flags2 & ~PPC_TCG_INSNS2)) { 9739 9785 warn_report("Disabling some instructions which are not " ··· 9928 9974 " Bus model : %s\n", 9929 9975 excp_model, bus_model); 9930 9976 printf(" MSR features :\n"); 9931 - if (env->flags & POWERPC_FLAG_SPE) 9977 + if (env->flags & POWERPC_FLAG_SPE) { 9932 9978 printf(" signal processing engine enable" 9933 9979 "\n"); 9934 - else if (env->flags & POWERPC_FLAG_VRE) 9980 + } else if (env->flags & POWERPC_FLAG_VRE) { 9935 9981 printf(" vector processor enable\n"); 9936 - if (env->flags & POWERPC_FLAG_TGPR) 9982 + } 9983 + if (env->flags & POWERPC_FLAG_TGPR) { 9937 9984 printf(" temporary GPRs\n"); 9938 - else if (env->flags & POWERPC_FLAG_CE) 9985 + } else if (env->flags & POWERPC_FLAG_CE) { 9939 9986 printf(" critical input enable\n"); 9940 - if (env->flags & POWERPC_FLAG_SE) 9987 + } 9988 + if (env->flags & POWERPC_FLAG_SE) { 9941 9989 printf(" single-step trace mode\n"); 9942 - else if (env->flags & POWERPC_FLAG_DWE) 9990 + } else if (env->flags & POWERPC_FLAG_DWE) { 9943 9991 printf(" debug wait enable\n"); 9944 - else if (env->flags & POWERPC_FLAG_UBLE) 9992 + } else if (env->flags & POWERPC_FLAG_UBLE) { 9945 9993 printf(" user BTB lock enable\n"); 9946 - if (env->flags & POWERPC_FLAG_BE) 9994 + } 9995 + if (env->flags & POWERPC_FLAG_BE) { 9947 9996 printf(" branch-step trace mode\n"); 9948 - else if (env->flags & POWERPC_FLAG_DE) 9997 + } else if (env->flags & POWERPC_FLAG_DE) { 9949 9998 printf(" debug interrupt enable\n"); 9950 - if (env->flags & POWERPC_FLAG_PX) 9999 + } 10000 + if (env->flags & POWERPC_FLAG_PX) { 9951 10001 printf(" inclusive protection\n"); 9952 - else if (env->flags & POWERPC_FLAG_PMM) 10002 + } else if (env->flags & POWERPC_FLAG_PMM) { 9953 10003 printf(" performance monitor mark\n"); 9954 - if (env->flags == POWERPC_FLAG_NONE) 10004 + } 10005 + if (env->flags == POWERPC_FLAG_NONE) { 9955 10006 printf(" none\n"); 10007 + } 9956 10008 printf(" Time-base/decrementer clock source: %s\n", 9957 10009 env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock"); 9958 10010 dump_ppc_insns(env); ··· 10094 10146 const char *p; 10095 10147 unsigned long pvr; 10096 10148 10097 - /* Lookup by PVR if cpu_model is valid 8 digit hex number 10098 - * (excl: 0x prefix if present) 10149 + /* 10150 + * Lookup by PVR if cpu_model is valid 8 digit hex number (excl: 10151 + * 0x prefix if present) 10099 10152 */ 10100 10153 if (!qemu_strtoul(name, &p, 16, &pvr)) { 10101 10154 int len = p - name; ··· 10439 10492 env->bfd_mach = pcc->bfd_mach; 10440 10493 env->check_pow = pcc->check_pow; 10441 10494 10442 - /* Mark HV mode as supported if the CPU has an MSR_HV bit 10443 - * in the msr_mask. The mask can later be cleared by PAPR 10444 - * mode but the hv mode support will remain, thus enforcing 10445 - * that we cannot use priv. instructions in guest in PAPR 10446 - * mode. For 970 we currently simply don't set HV in msr_mask 10447 - * thus simulating an "Apple mode" 970. If we ever want to 10448 - * support 970 HV mode, we'll have to add a processor attribute 10449 - * of some sort. 10495 + /* 10496 + * Mark HV mode as supported if the CPU has an MSR_HV bit in the 10497 + * msr_mask. The mask can later be cleared by PAPR mode but the hv 10498 + * mode support will remain, thus enforcing that we cannot use 10499 + * priv. instructions in guest in PAPR mode. For 970 we currently 10500 + * simply don't set HV in msr_mask thus simulating an "Apple mode" 10501 + * 970. If we ever want to support 970 HV mode, we'll have to add 10502 + * a processor attribute of some sort. 10450 10503 */ 10451 10504 #if !defined(CONFIG_USER_ONLY) 10452 10505 env->has_hv_mode = !!(env->msr_mask & MSR_HVB); ··· 10573 10626 cc->tcg_initialize = ppc_translate_init; 10574 10627 #endif 10575 10628 cc->disas_set_info = ppc_disas_set_info; 10576 - 10629 + 10577 10630 dc->fw_name = "PowerPC,UNKNOWN"; 10578 10631 } 10579 10632