qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

ppc/pnv: Add models for POWER9 PHB4 PCIe Host bridge

These changes introduces models for the PCIe Host Bridge (PHB4) of the
POWER9 processor. It includes the PowerBus logic interface (PBCQ),
IOMMU support, a single PCIe Gen.4 Root Complex, and support for MSI
and LSI interrupt sources as found on a POWER9 system using the XIVE
interrupt controller.

POWER9 processor comes with 3 PHB4 PEC (PCI Express Controller) and
each PEC can have several PHBs. By default,

* PEC0 provides 1 PHB (PHB0)
* PEC1 provides 2 PHBs (PHB1 and PHB2)
* PEC2 provides 3 PHBs (PHB3, PHB4 and PHB5)

Each PEC has a set "global" registers and some "per-stack" (per-PHB)
registers. Those are organized in two XSCOM ranges, the "Nest" range
and the "PCI" range, each range contains both some "PEC" registers and
some "per-stack" registers.

No default device layout is provided and PCI devices can be added on
any of the available PCIe Root Port (pcie.0 .. 2 of a Power9 chip)
with address 0x0 as the firwware (skiboot) only accepts a single
device per root port. To run a simple system with a network and a
storage adapters, use a command line options such as :

-device e1000e,netdev=net0,mac=C0:FF:EE:00:00:02,bus=pcie.0,addr=0x0
-netdev bridge,id=net0,helper=/usr/libexec/qemu-bridge-helper,br=virbr0,id=hostnet0

-device megasas,id=scsi0,bus=pcie.1,addr=0x0
-drive file=$disk,if=none,id=drive-scsi0-0-0-0,format=qcow2,cache=none
-device scsi-hd,bus=scsi0.0,channel=0,scsi-id=0,lun=0,drive=drive-scsi0-0-0-0,id=scsi0-0-0-0,bootindex=2

If more are needed, include a bridge.

Multi chip is supported, each chip adding its set of PHB4 controllers
and its PCI busses. The model doesn't emulate the EEH error handling.

This model is not ready for hotplug yet.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[ clg: - numerous cleanups
- commit log
- fix for broken LSI support
- PHB pic printinfo
- large QOM rework ]
Signed-off-by: Cédric Le Goater <clg@kaod.org>
Message-Id: <20200127144506.11132-2-clg@kaod.org>
[dwg: Use device_class_set_props()]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>

authored by

Benjamin Herrenschmidt and committed by
David Gibson
4f9924c4 6e8a3ff6

+2943
+1
hw/pci-host/Makefile.objs
··· 20 20 common-obj-$(CONFIG_PCI_EXPRESS_XILINX) += xilinx-pcie.o 21 21 22 22 common-obj-$(CONFIG_PCI_EXPRESS_DESIGNWARE) += designware.o 23 + obj-$(CONFIG_POWERNV) += pnv_phb4.o pnv_phb4_pec.o
+1438
hw/pci-host/pnv_phb4.c
··· 1 + /* 2 + * QEMU PowerPC PowerNV (POWER9) PHB4 model 3 + * 4 + * Copyright (c) 2018-2020, IBM Corporation. 5 + * 6 + * This code is licensed under the GPL version 2 or later. See the 7 + * COPYING file in the top-level directory. 8 + */ 9 + #include "qemu/osdep.h" 10 + #include "qemu/log.h" 11 + #include "qapi/visitor.h" 12 + #include "qapi/error.h" 13 + #include "qemu-common.h" 14 + #include "monitor/monitor.h" 15 + #include "target/ppc/cpu.h" 16 + #include "hw/pci-host/pnv_phb4_regs.h" 17 + #include "hw/pci-host/pnv_phb4.h" 18 + #include "hw/pci/pcie_host.h" 19 + #include "hw/pci/pcie_port.h" 20 + #include "hw/ppc/pnv.h" 21 + #include "hw/ppc/pnv_xscom.h" 22 + #include "hw/irq.h" 23 + #include "hw/qdev-properties.h" 24 + 25 + #define phb_error(phb, fmt, ...) \ 26 + qemu_log_mask(LOG_GUEST_ERROR, "phb4[%d:%d]: " fmt "\n", \ 27 + (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__) 28 + 29 + /* 30 + * QEMU version of the GETFIELD/SETFIELD macros 31 + * 32 + * These are common with the PnvXive model. 33 + */ 34 + static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 35 + { 36 + return (word & mask) >> ctz64(mask); 37 + } 38 + 39 + static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 40 + uint64_t value) 41 + { 42 + return (word & ~mask) | ((value << ctz64(mask)) & mask); 43 + } 44 + 45 + static PCIDevice *pnv_phb4_find_cfg_dev(PnvPHB4 *phb) 46 + { 47 + PCIHostState *pci = PCI_HOST_BRIDGE(phb); 48 + uint64_t addr = phb->regs[PHB_CONFIG_ADDRESS >> 3]; 49 + uint8_t bus, devfn; 50 + 51 + if (!(addr >> 63)) { 52 + return NULL; 53 + } 54 + bus = (addr >> 52) & 0xff; 55 + devfn = (addr >> 44) & 0xff; 56 + 57 + /* We don't access the root complex this way */ 58 + if (bus == 0 && devfn == 0) { 59 + return NULL; 60 + } 61 + return pci_find_device(pci->bus, bus, devfn); 62 + } 63 + 64 + /* 65 + * The CONFIG_DATA register expects little endian accesses, but as the 66 + * region is big endian, we have to swap the value. 67 + */ 68 + static void pnv_phb4_config_write(PnvPHB4 *phb, unsigned off, 69 + unsigned size, uint64_t val) 70 + { 71 + uint32_t cfg_addr, limit; 72 + PCIDevice *pdev; 73 + 74 + pdev = pnv_phb4_find_cfg_dev(phb); 75 + if (!pdev) { 76 + return; 77 + } 78 + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; 79 + cfg_addr |= off; 80 + limit = pci_config_size(pdev); 81 + if (limit <= cfg_addr) { 82 + /* 83 + * conventional pci device can be behind pcie-to-pci bridge. 84 + * 256 <= addr < 4K has no effects. 85 + */ 86 + return; 87 + } 88 + switch (size) { 89 + case 1: 90 + break; 91 + case 2: 92 + val = bswap16(val); 93 + break; 94 + case 4: 95 + val = bswap32(val); 96 + break; 97 + default: 98 + g_assert_not_reached(); 99 + } 100 + pci_host_config_write_common(pdev, cfg_addr, limit, val, size); 101 + } 102 + 103 + static uint64_t pnv_phb4_config_read(PnvPHB4 *phb, unsigned off, 104 + unsigned size) 105 + { 106 + uint32_t cfg_addr, limit; 107 + PCIDevice *pdev; 108 + uint64_t val; 109 + 110 + pdev = pnv_phb4_find_cfg_dev(phb); 111 + if (!pdev) { 112 + return ~0ull; 113 + } 114 + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; 115 + cfg_addr |= off; 116 + limit = pci_config_size(pdev); 117 + if (limit <= cfg_addr) { 118 + /* 119 + * conventional pci device can be behind pcie-to-pci bridge. 120 + * 256 <= addr < 4K has no effects. 121 + */ 122 + return ~0ull; 123 + } 124 + val = pci_host_config_read_common(pdev, cfg_addr, limit, size); 125 + switch (size) { 126 + case 1: 127 + return val; 128 + case 2: 129 + return bswap16(val); 130 + case 4: 131 + return bswap32(val); 132 + default: 133 + g_assert_not_reached(); 134 + } 135 + } 136 + 137 + /* 138 + * Root complex register accesses are memory mapped. 139 + */ 140 + static void pnv_phb4_rc_config_write(PnvPHB4 *phb, unsigned off, 141 + unsigned size, uint64_t val) 142 + { 143 + PCIHostState *pci = PCI_HOST_BRIDGE(phb); 144 + PCIDevice *pdev; 145 + 146 + if (size != 4) { 147 + phb_error(phb, "rc_config_write invalid size %d\n", size); 148 + return; 149 + } 150 + 151 + pdev = pci_find_device(pci->bus, 0, 0); 152 + assert(pdev); 153 + 154 + pci_host_config_write_common(pdev, off, PHB_RC_CONFIG_SIZE, 155 + bswap32(val), 4); 156 + } 157 + 158 + static uint64_t pnv_phb4_rc_config_read(PnvPHB4 *phb, unsigned off, 159 + unsigned size) 160 + { 161 + PCIHostState *pci = PCI_HOST_BRIDGE(phb); 162 + PCIDevice *pdev; 163 + uint64_t val; 164 + 165 + if (size != 4) { 166 + phb_error(phb, "rc_config_read invalid size %d\n", size); 167 + return ~0ull; 168 + } 169 + 170 + pdev = pci_find_device(pci->bus, 0, 0); 171 + assert(pdev); 172 + 173 + val = pci_host_config_read_common(pdev, off, PHB_RC_CONFIG_SIZE, 4); 174 + return bswap32(val); 175 + } 176 + 177 + static void pnv_phb4_check_mbt(PnvPHB4 *phb, uint32_t index) 178 + { 179 + uint64_t base, start, size, mbe0, mbe1; 180 + MemoryRegion *parent; 181 + char name[64]; 182 + 183 + /* Unmap first */ 184 + if (memory_region_is_mapped(&phb->mr_mmio[index])) { 185 + /* Should we destroy it in RCU friendly way... ? */ 186 + memory_region_del_subregion(phb->mr_mmio[index].container, 187 + &phb->mr_mmio[index]); 188 + } 189 + 190 + /* Get table entry */ 191 + mbe0 = phb->ioda_MBT[(index << 1)]; 192 + mbe1 = phb->ioda_MBT[(index << 1) + 1]; 193 + 194 + if (!(mbe0 & IODA3_MBT0_ENABLE)) { 195 + return; 196 + } 197 + 198 + /* Grab geometry from registers */ 199 + base = GETFIELD(IODA3_MBT0_BASE_ADDR, mbe0) << 12; 200 + size = GETFIELD(IODA3_MBT1_MASK, mbe1) << 12; 201 + size |= 0xff00000000000000ull; 202 + size = ~size + 1; 203 + 204 + /* Calculate PCI side start address based on M32/M64 window type */ 205 + if (mbe0 & IODA3_MBT0_TYPE_M32) { 206 + start = phb->regs[PHB_M32_START_ADDR >> 3]; 207 + if ((start + size) > 0x100000000ull) { 208 + phb_error(phb, "M32 set beyond 4GB boundary !"); 209 + size = 0x100000000 - start; 210 + } 211 + } else { 212 + start = base | (phb->regs[PHB_M64_UPPER_BITS >> 3]); 213 + } 214 + 215 + /* TODO: Figure out how to implemet/decode AOMASK */ 216 + 217 + /* Check if it matches an enabled MMIO region in the PEC stack */ 218 + if (memory_region_is_mapped(&phb->stack->mmbar0) && 219 + base >= phb->stack->mmio0_base && 220 + (base + size) <= (phb->stack->mmio0_base + phb->stack->mmio0_size)) { 221 + parent = &phb->stack->mmbar0; 222 + base -= phb->stack->mmio0_base; 223 + } else if (memory_region_is_mapped(&phb->stack->mmbar1) && 224 + base >= phb->stack->mmio1_base && 225 + (base + size) <= (phb->stack->mmio1_base + phb->stack->mmio1_size)) { 226 + parent = &phb->stack->mmbar1; 227 + base -= phb->stack->mmio1_base; 228 + } else { 229 + phb_error(phb, "PHB MBAR %d out of parent bounds", index); 230 + return; 231 + } 232 + 233 + /* Create alias (better name ?) */ 234 + snprintf(name, sizeof(name), "phb4-mbar%d", index); 235 + memory_region_init_alias(&phb->mr_mmio[index], OBJECT(phb), name, 236 + &phb->pci_mmio, start, size); 237 + memory_region_add_subregion(parent, base, &phb->mr_mmio[index]); 238 + } 239 + 240 + static void pnv_phb4_check_all_mbt(PnvPHB4 *phb) 241 + { 242 + uint64_t i; 243 + uint32_t num_windows = phb->big_phb ? PNV_PHB4_MAX_MMIO_WINDOWS : 244 + PNV_PHB4_MIN_MMIO_WINDOWS; 245 + 246 + for (i = 0; i < num_windows; i++) { 247 + pnv_phb4_check_mbt(phb, i); 248 + } 249 + } 250 + 251 + static uint64_t *pnv_phb4_ioda_access(PnvPHB4 *phb, 252 + unsigned *out_table, unsigned *out_idx) 253 + { 254 + uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3]; 255 + unsigned int index = GETFIELD(PHB_IODA_AD_TADR, adreg); 256 + unsigned int table = GETFIELD(PHB_IODA_AD_TSEL, adreg); 257 + unsigned int mask; 258 + uint64_t *tptr = NULL; 259 + 260 + switch (table) { 261 + case IODA3_TBL_LIST: 262 + tptr = phb->ioda_LIST; 263 + mask = 7; 264 + break; 265 + case IODA3_TBL_MIST: 266 + tptr = phb->ioda_MIST; 267 + mask = phb->big_phb ? PNV_PHB4_MAX_MIST : (PNV_PHB4_MAX_MIST >> 1); 268 + mask -= 1; 269 + break; 270 + case IODA3_TBL_RCAM: 271 + mask = phb->big_phb ? 127 : 63; 272 + break; 273 + case IODA3_TBL_MRT: 274 + mask = phb->big_phb ? 15 : 7; 275 + break; 276 + case IODA3_TBL_PESTA: 277 + case IODA3_TBL_PESTB: 278 + mask = phb->big_phb ? PNV_PHB4_MAX_PEs : (PNV_PHB4_MAX_PEs >> 1); 279 + mask -= 1; 280 + break; 281 + case IODA3_TBL_TVT: 282 + tptr = phb->ioda_TVT; 283 + mask = phb->big_phb ? PNV_PHB4_MAX_TVEs : (PNV_PHB4_MAX_TVEs >> 1); 284 + mask -= 1; 285 + break; 286 + case IODA3_TBL_TCR: 287 + case IODA3_TBL_TDR: 288 + mask = phb->big_phb ? 1023 : 511; 289 + break; 290 + case IODA3_TBL_MBT: 291 + tptr = phb->ioda_MBT; 292 + mask = phb->big_phb ? PNV_PHB4_MAX_MBEs : (PNV_PHB4_MAX_MBEs >> 1); 293 + mask -= 1; 294 + break; 295 + case IODA3_TBL_MDT: 296 + tptr = phb->ioda_MDT; 297 + mask = phb->big_phb ? PNV_PHB4_MAX_PEs : (PNV_PHB4_MAX_PEs >> 1); 298 + mask -= 1; 299 + break; 300 + case IODA3_TBL_PEEV: 301 + tptr = phb->ioda_PEEV; 302 + mask = phb->big_phb ? PNV_PHB4_MAX_PEEVs : (PNV_PHB4_MAX_PEEVs >> 1); 303 + mask -= 1; 304 + break; 305 + default: 306 + phb_error(phb, "invalid IODA table %d", table); 307 + return NULL; 308 + } 309 + index &= mask; 310 + if (out_idx) { 311 + *out_idx = index; 312 + } 313 + if (out_table) { 314 + *out_table = table; 315 + } 316 + if (tptr) { 317 + tptr += index; 318 + } 319 + if (adreg & PHB_IODA_AD_AUTOINC) { 320 + index = (index + 1) & mask; 321 + adreg = SETFIELD(PHB_IODA_AD_TADR, adreg, index); 322 + } 323 + 324 + phb->regs[PHB_IODA_ADDR >> 3] = adreg; 325 + return tptr; 326 + } 327 + 328 + static uint64_t pnv_phb4_ioda_read(PnvPHB4 *phb) 329 + { 330 + unsigned table, idx; 331 + uint64_t *tptr; 332 + 333 + tptr = pnv_phb4_ioda_access(phb, &table, &idx); 334 + if (!tptr) { 335 + /* Special PESTA case */ 336 + if (table == IODA3_TBL_PESTA) { 337 + return ((uint64_t)(phb->ioda_PEST_AB[idx] & 1)) << 63; 338 + } else if (table == IODA3_TBL_PESTB) { 339 + return ((uint64_t)(phb->ioda_PEST_AB[idx] & 2)) << 62; 340 + } 341 + /* Return 0 on unsupported tables, not ff's */ 342 + return 0; 343 + } 344 + return *tptr; 345 + } 346 + 347 + static void pnv_phb4_ioda_write(PnvPHB4 *phb, uint64_t val) 348 + { 349 + unsigned table, idx; 350 + uint64_t *tptr; 351 + 352 + tptr = pnv_phb4_ioda_access(phb, &table, &idx); 353 + if (!tptr) { 354 + /* Special PESTA case */ 355 + if (table == IODA3_TBL_PESTA) { 356 + phb->ioda_PEST_AB[idx] &= ~1; 357 + phb->ioda_PEST_AB[idx] |= (val >> 63) & 1; 358 + } else if (table == IODA3_TBL_PESTB) { 359 + phb->ioda_PEST_AB[idx] &= ~2; 360 + phb->ioda_PEST_AB[idx] |= (val >> 62) & 2; 361 + } 362 + return; 363 + } 364 + 365 + /* Handle side effects */ 366 + switch (table) { 367 + case IODA3_TBL_LIST: 368 + break; 369 + case IODA3_TBL_MIST: { 370 + /* Special mask for MIST partial write */ 371 + uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3]; 372 + uint32_t mmask = GETFIELD(PHB_IODA_AD_MIST_PWV, adreg); 373 + uint64_t v = *tptr; 374 + if (mmask == 0) { 375 + mmask = 0xf; 376 + } 377 + if (mmask & 8) { 378 + v &= 0x0000ffffffffffffull; 379 + v |= 0xcfff000000000000ull & val; 380 + } 381 + if (mmask & 4) { 382 + v &= 0xffff0000ffffffffull; 383 + v |= 0x0000cfff00000000ull & val; 384 + } 385 + if (mmask & 2) { 386 + v &= 0xffffffff0000ffffull; 387 + v |= 0x00000000cfff0000ull & val; 388 + } 389 + if (mmask & 1) { 390 + v &= 0xffffffffffff0000ull; 391 + v |= 0x000000000000cfffull & val; 392 + } 393 + *tptr = val; 394 + break; 395 + } 396 + case IODA3_TBL_MBT: 397 + *tptr = val; 398 + 399 + /* Copy accross the valid bit to the other half */ 400 + phb->ioda_MBT[idx ^ 1] &= 0x7fffffffffffffffull; 401 + phb->ioda_MBT[idx ^ 1] |= 0x8000000000000000ull & val; 402 + 403 + /* Update mappings */ 404 + pnv_phb4_check_mbt(phb, idx >> 1); 405 + break; 406 + default: 407 + *tptr = val; 408 + } 409 + } 410 + 411 + static void pnv_phb4_rtc_invalidate(PnvPHB4 *phb, uint64_t val) 412 + { 413 + PnvPhb4DMASpace *ds; 414 + 415 + /* Always invalidate all for now ... */ 416 + QLIST_FOREACH(ds, &phb->dma_spaces, list) { 417 + ds->pe_num = PHB_INVALID_PE; 418 + } 419 + } 420 + 421 + static void pnv_phb4_update_msi_regions(PnvPhb4DMASpace *ds) 422 + { 423 + uint64_t cfg = ds->phb->regs[PHB_PHB4_CONFIG >> 3]; 424 + 425 + if (cfg & PHB_PHB4C_32BIT_MSI_EN) { 426 + if (!memory_region_is_mapped(MEMORY_REGION(&ds->msi32_mr))) { 427 + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), 428 + 0xffff0000, &ds->msi32_mr); 429 + } 430 + } else { 431 + if (memory_region_is_mapped(MEMORY_REGION(&ds->msi32_mr))) { 432 + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), 433 + &ds->msi32_mr); 434 + } 435 + } 436 + 437 + if (cfg & PHB_PHB4C_64BIT_MSI_EN) { 438 + if (!memory_region_is_mapped(MEMORY_REGION(&ds->msi64_mr))) { 439 + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), 440 + (1ull << 60), &ds->msi64_mr); 441 + } 442 + } else { 443 + if (memory_region_is_mapped(MEMORY_REGION(&ds->msi64_mr))) { 444 + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), 445 + &ds->msi64_mr); 446 + } 447 + } 448 + } 449 + 450 + static void pnv_phb4_update_all_msi_regions(PnvPHB4 *phb) 451 + { 452 + PnvPhb4DMASpace *ds; 453 + 454 + QLIST_FOREACH(ds, &phb->dma_spaces, list) { 455 + pnv_phb4_update_msi_regions(ds); 456 + } 457 + } 458 + 459 + static void pnv_phb4_update_xsrc(PnvPHB4 *phb) 460 + { 461 + int shift, flags, i, lsi_base; 462 + XiveSource *xsrc = &phb->xsrc; 463 + 464 + /* The XIVE source characteristics can be set at run time */ 465 + if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_PGSZ_64K) { 466 + shift = XIVE_ESB_64K; 467 + } else { 468 + shift = XIVE_ESB_4K; 469 + } 470 + if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_STORE_EOI) { 471 + flags = XIVE_SRC_STORE_EOI; 472 + } else { 473 + flags = 0; 474 + } 475 + 476 + phb->xsrc.esb_shift = shift; 477 + phb->xsrc.esb_flags = flags; 478 + 479 + lsi_base = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]); 480 + lsi_base <<= 3; 481 + 482 + /* TODO: handle reset values of PHB_LSI_SRC_ID */ 483 + if (!lsi_base) { 484 + return; 485 + } 486 + 487 + /* TODO: need a xive_source_irq_reset_lsi() */ 488 + bitmap_zero(xsrc->lsi_map, xsrc->nr_irqs); 489 + 490 + for (i = 0; i < xsrc->nr_irqs; i++) { 491 + bool msi = (i < lsi_base || i >= (lsi_base + 8)); 492 + if (!msi) { 493 + xive_source_irq_set_lsi(xsrc, i); 494 + } 495 + } 496 + } 497 + 498 + static void pnv_phb4_reg_write(void *opaque, hwaddr off, uint64_t val, 499 + unsigned size) 500 + { 501 + PnvPHB4 *phb = PNV_PHB4(opaque); 502 + bool changed; 503 + 504 + /* Special case outbound configuration data */ 505 + if ((off & 0xfffc) == PHB_CONFIG_DATA) { 506 + pnv_phb4_config_write(phb, off & 0x3, size, val); 507 + return; 508 + } 509 + 510 + /* Special case RC configuration space */ 511 + if ((off & 0xf800) == PHB_RC_CONFIG_BASE) { 512 + pnv_phb4_rc_config_write(phb, off & 0x7ff, size, val); 513 + return; 514 + } 515 + 516 + /* Other registers are 64-bit only */ 517 + if (size != 8 || off & 0x7) { 518 + phb_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", 519 + off, size); 520 + return; 521 + } 522 + 523 + /* Handle masking */ 524 + switch (off) { 525 + case PHB_LSI_SOURCE_ID: 526 + val &= PHB_LSI_SRC_ID; 527 + break; 528 + case PHB_M64_UPPER_BITS: 529 + val &= 0xff00000000000000ull; 530 + break; 531 + /* TCE Kill */ 532 + case PHB_TCE_KILL: 533 + /* Clear top 3 bits which HW does to indicate successful queuing */ 534 + val &= ~(PHB_TCE_KILL_ALL | PHB_TCE_KILL_PE | PHB_TCE_KILL_ONE); 535 + break; 536 + case PHB_Q_DMA_R: 537 + /* 538 + * This is enough logic to make SW happy but we aren't 539 + * actually quiescing the DMAs 540 + */ 541 + if (val & PHB_Q_DMA_R_AUTORESET) { 542 + val = 0; 543 + } else { 544 + val &= PHB_Q_DMA_R_QUIESCE_DMA; 545 + } 546 + break; 547 + /* LEM stuff */ 548 + case PHB_LEM_FIR_AND_MASK: 549 + phb->regs[PHB_LEM_FIR_ACCUM >> 3] &= val; 550 + return; 551 + case PHB_LEM_FIR_OR_MASK: 552 + phb->regs[PHB_LEM_FIR_ACCUM >> 3] |= val; 553 + return; 554 + case PHB_LEM_ERROR_AND_MASK: 555 + phb->regs[PHB_LEM_ERROR_MASK >> 3] &= val; 556 + return; 557 + case PHB_LEM_ERROR_OR_MASK: 558 + phb->regs[PHB_LEM_ERROR_MASK >> 3] |= val; 559 + return; 560 + case PHB_LEM_WOF: 561 + val = 0; 562 + break; 563 + /* TODO: More regs ..., maybe create a table with masks... */ 564 + 565 + /* Read only registers */ 566 + case PHB_CPU_LOADSTORE_STATUS: 567 + case PHB_ETU_ERR_SUMMARY: 568 + case PHB_PHB4_GEN_CAP: 569 + case PHB_PHB4_TCE_CAP: 570 + case PHB_PHB4_IRQ_CAP: 571 + case PHB_PHB4_EEH_CAP: 572 + return; 573 + } 574 + 575 + /* Record whether it changed */ 576 + changed = phb->regs[off >> 3] != val; 577 + 578 + /* Store in register cache first */ 579 + phb->regs[off >> 3] = val; 580 + 581 + /* Handle side effects */ 582 + switch (off) { 583 + case PHB_PHB4_CONFIG: 584 + if (changed) { 585 + pnv_phb4_update_all_msi_regions(phb); 586 + } 587 + break; 588 + case PHB_M32_START_ADDR: 589 + case PHB_M64_UPPER_BITS: 590 + if (changed) { 591 + pnv_phb4_check_all_mbt(phb); 592 + } 593 + break; 594 + 595 + /* IODA table accesses */ 596 + case PHB_IODA_DATA0: 597 + pnv_phb4_ioda_write(phb, val); 598 + break; 599 + 600 + /* RTC invalidation */ 601 + case PHB_RTC_INVALIDATE: 602 + pnv_phb4_rtc_invalidate(phb, val); 603 + break; 604 + 605 + /* PHB Control (Affects XIVE source) */ 606 + case PHB_CTRLR: 607 + case PHB_LSI_SOURCE_ID: 608 + pnv_phb4_update_xsrc(phb); 609 + break; 610 + 611 + /* Silent simple writes */ 612 + case PHB_ASN_CMPM: 613 + case PHB_CONFIG_ADDRESS: 614 + case PHB_IODA_ADDR: 615 + case PHB_TCE_KILL: 616 + case PHB_TCE_SPEC_CTL: 617 + case PHB_PEST_BAR: 618 + case PHB_PELTV_BAR: 619 + case PHB_RTT_BAR: 620 + case PHB_LEM_FIR_ACCUM: 621 + case PHB_LEM_ERROR_MASK: 622 + case PHB_LEM_ACTION0: 623 + case PHB_LEM_ACTION1: 624 + case PHB_TCE_TAG_ENABLE: 625 + case PHB_INT_NOTIFY_ADDR: 626 + case PHB_INT_NOTIFY_INDEX: 627 + case PHB_DMARD_SYNC: 628 + break; 629 + 630 + /* Noise on anything else */ 631 + default: 632 + qemu_log_mask(LOG_UNIMP, "phb4: reg_write 0x%"PRIx64"=%"PRIx64"\n", 633 + off, val); 634 + } 635 + } 636 + 637 + static uint64_t pnv_phb4_reg_read(void *opaque, hwaddr off, unsigned size) 638 + { 639 + PnvPHB4 *phb = PNV_PHB4(opaque); 640 + uint64_t val; 641 + 642 + if ((off & 0xfffc) == PHB_CONFIG_DATA) { 643 + return pnv_phb4_config_read(phb, off & 0x3, size); 644 + } 645 + 646 + /* Special case RC configuration space */ 647 + if ((off & 0xf800) == PHB_RC_CONFIG_BASE) { 648 + return pnv_phb4_rc_config_read(phb, off & 0x7ff, size); 649 + } 650 + 651 + /* Other registers are 64-bit only */ 652 + if (size != 8 || off & 0x7) { 653 + phb_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", 654 + off, size); 655 + return ~0ull; 656 + } 657 + 658 + /* Default read from cache */ 659 + val = phb->regs[off >> 3]; 660 + 661 + switch (off) { 662 + case PHB_VERSION: 663 + return phb->version; 664 + 665 + /* Read-only */ 666 + case PHB_PHB4_GEN_CAP: 667 + return 0xe4b8000000000000ull; 668 + case PHB_PHB4_TCE_CAP: 669 + return phb->big_phb ? 0x4008440000000400ull : 0x2008440000000200ull; 670 + case PHB_PHB4_IRQ_CAP: 671 + return phb->big_phb ? 0x0800000000001000ull : 0x0800000000000800ull; 672 + case PHB_PHB4_EEH_CAP: 673 + return phb->big_phb ? 0x2000000000000000ull : 0x1000000000000000ull; 674 + 675 + /* IODA table accesses */ 676 + case PHB_IODA_DATA0: 677 + return pnv_phb4_ioda_read(phb); 678 + 679 + /* Link training always appears trained */ 680 + case PHB_PCIE_DLP_TRAIN_CTL: 681 + /* TODO: Do something sensible with speed ? */ 682 + return PHB_PCIE_DLP_INBAND_PRESENCE | PHB_PCIE_DLP_TL_LINKACT; 683 + 684 + /* DMA read sync: make it look like it's complete */ 685 + case PHB_DMARD_SYNC: 686 + return PHB_DMARD_SYNC_COMPLETE; 687 + 688 + /* Silent simple reads */ 689 + case PHB_LSI_SOURCE_ID: 690 + case PHB_CPU_LOADSTORE_STATUS: 691 + case PHB_ASN_CMPM: 692 + case PHB_PHB4_CONFIG: 693 + case PHB_M32_START_ADDR: 694 + case PHB_CONFIG_ADDRESS: 695 + case PHB_IODA_ADDR: 696 + case PHB_RTC_INVALIDATE: 697 + case PHB_TCE_KILL: 698 + case PHB_TCE_SPEC_CTL: 699 + case PHB_PEST_BAR: 700 + case PHB_PELTV_BAR: 701 + case PHB_RTT_BAR: 702 + case PHB_M64_UPPER_BITS: 703 + case PHB_CTRLR: 704 + case PHB_LEM_FIR_ACCUM: 705 + case PHB_LEM_ERROR_MASK: 706 + case PHB_LEM_ACTION0: 707 + case PHB_LEM_ACTION1: 708 + case PHB_TCE_TAG_ENABLE: 709 + case PHB_INT_NOTIFY_ADDR: 710 + case PHB_INT_NOTIFY_INDEX: 711 + case PHB_Q_DMA_R: 712 + case PHB_ETU_ERR_SUMMARY: 713 + break; 714 + 715 + /* Noise on anything else */ 716 + default: 717 + qemu_log_mask(LOG_UNIMP, "phb4: reg_read 0x%"PRIx64"=%"PRIx64"\n", 718 + off, val); 719 + } 720 + return val; 721 + } 722 + 723 + static const MemoryRegionOps pnv_phb4_reg_ops = { 724 + .read = pnv_phb4_reg_read, 725 + .write = pnv_phb4_reg_write, 726 + .valid.min_access_size = 1, 727 + .valid.max_access_size = 8, 728 + .impl.min_access_size = 1, 729 + .impl.max_access_size = 8, 730 + .endianness = DEVICE_BIG_ENDIAN, 731 + }; 732 + 733 + static uint64_t pnv_phb4_xscom_read(void *opaque, hwaddr addr, unsigned size) 734 + { 735 + PnvPHB4 *phb = PNV_PHB4(opaque); 736 + uint32_t reg = addr >> 3; 737 + uint64_t val; 738 + hwaddr offset; 739 + 740 + switch (reg) { 741 + case PHB_SCOM_HV_IND_ADDR: 742 + return phb->scom_hv_ind_addr_reg; 743 + 744 + case PHB_SCOM_HV_IND_DATA: 745 + if (!(phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_VALID)) { 746 + phb_error(phb, "Invalid indirect address"); 747 + return ~0ull; 748 + } 749 + size = (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_4B) ? 4 : 8; 750 + offset = GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, phb->scom_hv_ind_addr_reg); 751 + val = pnv_phb4_reg_read(phb, offset, size); 752 + if (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_AUTOINC) { 753 + offset += size; 754 + offset &= 0x3fff; 755 + phb->scom_hv_ind_addr_reg = SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, 756 + phb->scom_hv_ind_addr_reg, 757 + offset); 758 + } 759 + return val; 760 + case PHB_SCOM_ETU_LEM_FIR: 761 + case PHB_SCOM_ETU_LEM_FIR_AND: 762 + case PHB_SCOM_ETU_LEM_FIR_OR: 763 + case PHB_SCOM_ETU_LEM_FIR_MSK: 764 + case PHB_SCOM_ETU_LEM_ERR_MSK_AND: 765 + case PHB_SCOM_ETU_LEM_ERR_MSK_OR: 766 + case PHB_SCOM_ETU_LEM_ACT0: 767 + case PHB_SCOM_ETU_LEM_ACT1: 768 + case PHB_SCOM_ETU_LEM_WOF: 769 + offset = ((reg - PHB_SCOM_ETU_LEM_FIR) << 3) + PHB_LEM_FIR_ACCUM; 770 + return pnv_phb4_reg_read(phb, offset, size); 771 + case PHB_SCOM_ETU_PMON_CONFIG: 772 + case PHB_SCOM_ETU_PMON_CTR0: 773 + case PHB_SCOM_ETU_PMON_CTR1: 774 + case PHB_SCOM_ETU_PMON_CTR2: 775 + case PHB_SCOM_ETU_PMON_CTR3: 776 + offset = ((reg - PHB_SCOM_ETU_PMON_CONFIG) << 3) + PHB_PERFMON_CONFIG; 777 + return pnv_phb4_reg_read(phb, offset, size); 778 + 779 + default: 780 + qemu_log_mask(LOG_UNIMP, "phb4: xscom_read 0x%"HWADDR_PRIx"\n", addr); 781 + return ~0ull; 782 + } 783 + } 784 + 785 + static void pnv_phb4_xscom_write(void *opaque, hwaddr addr, 786 + uint64_t val, unsigned size) 787 + { 788 + PnvPHB4 *phb = PNV_PHB4(opaque); 789 + uint32_t reg = addr >> 3; 790 + hwaddr offset; 791 + 792 + switch (reg) { 793 + case PHB_SCOM_HV_IND_ADDR: 794 + phb->scom_hv_ind_addr_reg = val & 0xe000000000001fff; 795 + break; 796 + case PHB_SCOM_HV_IND_DATA: 797 + if (!(phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_VALID)) { 798 + phb_error(phb, "Invalid indirect address"); 799 + break; 800 + } 801 + size = (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_4B) ? 4 : 8; 802 + offset = GETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, phb->scom_hv_ind_addr_reg); 803 + pnv_phb4_reg_write(phb, offset, val, size); 804 + if (phb->scom_hv_ind_addr_reg & PHB_SCOM_HV_IND_ADDR_AUTOINC) { 805 + offset += size; 806 + offset &= 0x3fff; 807 + phb->scom_hv_ind_addr_reg = SETFIELD(PHB_SCOM_HV_IND_ADDR_ADDR, 808 + phb->scom_hv_ind_addr_reg, 809 + offset); 810 + } 811 + break; 812 + case PHB_SCOM_ETU_LEM_FIR: 813 + case PHB_SCOM_ETU_LEM_FIR_AND: 814 + case PHB_SCOM_ETU_LEM_FIR_OR: 815 + case PHB_SCOM_ETU_LEM_FIR_MSK: 816 + case PHB_SCOM_ETU_LEM_ERR_MSK_AND: 817 + case PHB_SCOM_ETU_LEM_ERR_MSK_OR: 818 + case PHB_SCOM_ETU_LEM_ACT0: 819 + case PHB_SCOM_ETU_LEM_ACT1: 820 + case PHB_SCOM_ETU_LEM_WOF: 821 + offset = ((reg - PHB_SCOM_ETU_LEM_FIR) << 3) + PHB_LEM_FIR_ACCUM; 822 + pnv_phb4_reg_write(phb, offset, val, size); 823 + break; 824 + case PHB_SCOM_ETU_PMON_CONFIG: 825 + case PHB_SCOM_ETU_PMON_CTR0: 826 + case PHB_SCOM_ETU_PMON_CTR1: 827 + case PHB_SCOM_ETU_PMON_CTR2: 828 + case PHB_SCOM_ETU_PMON_CTR3: 829 + offset = ((reg - PHB_SCOM_ETU_PMON_CONFIG) << 3) + PHB_PERFMON_CONFIG; 830 + pnv_phb4_reg_write(phb, offset, val, size); 831 + break; 832 + default: 833 + qemu_log_mask(LOG_UNIMP, "phb4: xscom_write 0x%"HWADDR_PRIx 834 + "=%"PRIx64"\n", addr, val); 835 + } 836 + } 837 + 838 + const MemoryRegionOps pnv_phb4_xscom_ops = { 839 + .read = pnv_phb4_xscom_read, 840 + .write = pnv_phb4_xscom_write, 841 + .valid.min_access_size = 8, 842 + .valid.max_access_size = 8, 843 + .impl.min_access_size = 8, 844 + .impl.max_access_size = 8, 845 + .endianness = DEVICE_BIG_ENDIAN, 846 + }; 847 + 848 + static int pnv_phb4_map_irq(PCIDevice *pci_dev, int irq_num) 849 + { 850 + /* Check that out properly ... */ 851 + return irq_num & 3; 852 + } 853 + 854 + static void pnv_phb4_set_irq(void *opaque, int irq_num, int level) 855 + { 856 + PnvPHB4 *phb = PNV_PHB4(opaque); 857 + uint32_t lsi_base; 858 + 859 + /* LSI only ... */ 860 + if (irq_num > 3) { 861 + phb_error(phb, "IRQ %x is not an LSI", irq_num); 862 + } 863 + lsi_base = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]); 864 + lsi_base <<= 3; 865 + qemu_set_irq(phb->qirqs[lsi_base + irq_num], level); 866 + } 867 + 868 + static bool pnv_phb4_resolve_pe(PnvPhb4DMASpace *ds) 869 + { 870 + uint64_t rtt, addr; 871 + uint16_t rte; 872 + int bus_num; 873 + int num_PEs; 874 + 875 + /* Already resolved ? */ 876 + if (ds->pe_num != PHB_INVALID_PE) { 877 + return true; 878 + } 879 + 880 + /* We need to lookup the RTT */ 881 + rtt = ds->phb->regs[PHB_RTT_BAR >> 3]; 882 + if (!(rtt & PHB_RTT_BAR_ENABLE)) { 883 + phb_error(ds->phb, "DMA with RTT BAR disabled !"); 884 + /* Set error bits ? fence ? ... */ 885 + return false; 886 + } 887 + 888 + /* Read RTE */ 889 + bus_num = pci_bus_num(ds->bus); 890 + addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; 891 + addr += 2 * ((bus_num << 8) | ds->devfn); 892 + if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) { 893 + phb_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); 894 + /* Set error bits ? fence ? ... */ 895 + return false; 896 + } 897 + rte = be16_to_cpu(rte); 898 + 899 + /* Fail upon reading of invalid PE# */ 900 + num_PEs = ds->phb->big_phb ? PNV_PHB4_MAX_PEs : (PNV_PHB4_MAX_PEs >> 1); 901 + if (rte >= num_PEs) { 902 + phb_error(ds->phb, "RTE for RID 0x%x invalid (%04x", ds->devfn, rte); 903 + rte &= num_PEs - 1; 904 + } 905 + ds->pe_num = rte; 906 + return true; 907 + } 908 + 909 + static void pnv_phb4_translate_tve(PnvPhb4DMASpace *ds, hwaddr addr, 910 + bool is_write, uint64_t tve, 911 + IOMMUTLBEntry *tlb) 912 + { 913 + uint64_t tta = GETFIELD(IODA3_TVT_TABLE_ADDR, tve); 914 + int32_t lev = GETFIELD(IODA3_TVT_NUM_LEVELS, tve); 915 + uint32_t tts = GETFIELD(IODA3_TVT_TCE_TABLE_SIZE, tve); 916 + uint32_t tps = GETFIELD(IODA3_TVT_IO_PSIZE, tve); 917 + 918 + /* Invalid levels */ 919 + if (lev > 4) { 920 + phb_error(ds->phb, "Invalid #levels in TVE %d", lev); 921 + return; 922 + } 923 + 924 + /* Invalid entry */ 925 + if (tts == 0) { 926 + phb_error(ds->phb, "Access to invalid TVE"); 927 + return; 928 + } 929 + 930 + /* IO Page Size of 0 means untranslated, else use TCEs */ 931 + if (tps == 0) { 932 + /* TODO: Handle boundaries */ 933 + 934 + /* Use 4k pages like q35 ... for now */ 935 + tlb->iova = addr & 0xfffffffffffff000ull; 936 + tlb->translated_addr = addr & 0x0003fffffffff000ull; 937 + tlb->addr_mask = 0xfffull; 938 + tlb->perm = IOMMU_RW; 939 + } else { 940 + uint32_t tce_shift, tbl_shift, sh; 941 + uint64_t base, taddr, tce, tce_mask; 942 + 943 + /* Address bits per bottom level TCE entry */ 944 + tce_shift = tps + 11; 945 + 946 + /* Address bits per table level */ 947 + tbl_shift = tts + 8; 948 + 949 + /* Top level table base address */ 950 + base = tta << 12; 951 + 952 + /* Total shift to first level */ 953 + sh = tbl_shift * lev + tce_shift; 954 + 955 + /* TODO: Limit to support IO page sizes */ 956 + 957 + /* TODO: Multi-level untested */ 958 + while ((lev--) >= 0) { 959 + /* Grab the TCE address */ 960 + taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); 961 + if (dma_memory_read(&address_space_memory, taddr, &tce, 962 + sizeof(tce))) { 963 + phb_error(ds->phb, "Failed to read TCE at 0x%"PRIx64, taddr); 964 + return; 965 + } 966 + tce = be64_to_cpu(tce); 967 + 968 + /* Check permission for indirect TCE */ 969 + if ((lev >= 0) && !(tce & 3)) { 970 + phb_error(ds->phb, "Invalid indirect TCE at 0x%"PRIx64, taddr); 971 + phb_error(ds->phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, 972 + is_write ? 'W' : 'R', tve); 973 + phb_error(ds->phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", 974 + tta, lev, tts, tps); 975 + return; 976 + } 977 + sh -= tbl_shift; 978 + base = tce & ~0xfffull; 979 + } 980 + 981 + /* We exit the loop with TCE being the final TCE */ 982 + tce_mask = ~((1ull << tce_shift) - 1); 983 + tlb->iova = addr & tce_mask; 984 + tlb->translated_addr = tce & tce_mask; 985 + tlb->addr_mask = ~tce_mask; 986 + tlb->perm = tce & 3; 987 + if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) { 988 + phb_error(ds->phb, "TCE access fault at 0x%"PRIx64, taddr); 989 + phb_error(ds->phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, 990 + is_write ? 'W' : 'R', tve); 991 + phb_error(ds->phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", 992 + tta, lev, tts, tps); 993 + } 994 + } 995 + } 996 + 997 + static IOMMUTLBEntry pnv_phb4_translate_iommu(IOMMUMemoryRegion *iommu, 998 + hwaddr addr, 999 + IOMMUAccessFlags flag, 1000 + int iommu_idx) 1001 + { 1002 + PnvPhb4DMASpace *ds = container_of(iommu, PnvPhb4DMASpace, dma_mr); 1003 + int tve_sel; 1004 + uint64_t tve, cfg; 1005 + IOMMUTLBEntry ret = { 1006 + .target_as = &address_space_memory, 1007 + .iova = addr, 1008 + .translated_addr = 0, 1009 + .addr_mask = ~(hwaddr)0, 1010 + .perm = IOMMU_NONE, 1011 + }; 1012 + 1013 + /* Resolve PE# */ 1014 + if (!pnv_phb4_resolve_pe(ds)) { 1015 + phb_error(ds->phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", 1016 + ds->bus, pci_bus_num(ds->bus), ds->devfn); 1017 + return ret; 1018 + } 1019 + 1020 + /* Check top bits */ 1021 + switch (addr >> 60) { 1022 + case 00: 1023 + /* DMA or 32-bit MSI ? */ 1024 + cfg = ds->phb->regs[PHB_PHB4_CONFIG >> 3]; 1025 + if ((cfg & PHB_PHB4C_32BIT_MSI_EN) && 1026 + ((addr & 0xffffffffffff0000ull) == 0xffff0000ull)) { 1027 + phb_error(ds->phb, "xlate on 32-bit MSI region"); 1028 + return ret; 1029 + } 1030 + /* Choose TVE XXX Use PHB4 Control Register */ 1031 + tve_sel = (addr >> 59) & 1; 1032 + tve = ds->phb->ioda_TVT[ds->pe_num * 2 + tve_sel]; 1033 + pnv_phb4_translate_tve(ds, addr, flag & IOMMU_WO, tve, &ret); 1034 + break; 1035 + case 01: 1036 + phb_error(ds->phb, "xlate on 64-bit MSI region"); 1037 + break; 1038 + default: 1039 + phb_error(ds->phb, "xlate on unsupported address 0x%"PRIx64, addr); 1040 + } 1041 + return ret; 1042 + } 1043 + 1044 + #define TYPE_PNV_PHB4_IOMMU_MEMORY_REGION "pnv-phb4-iommu-memory-region" 1045 + #define PNV_PHB4_IOMMU_MEMORY_REGION(obj) \ 1046 + OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_PNV_PHB4_IOMMU_MEMORY_REGION) 1047 + 1048 + static void pnv_phb4_iommu_memory_region_class_init(ObjectClass *klass, 1049 + void *data) 1050 + { 1051 + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1052 + 1053 + imrc->translate = pnv_phb4_translate_iommu; 1054 + } 1055 + 1056 + static const TypeInfo pnv_phb4_iommu_memory_region_info = { 1057 + .parent = TYPE_IOMMU_MEMORY_REGION, 1058 + .name = TYPE_PNV_PHB4_IOMMU_MEMORY_REGION, 1059 + .class_init = pnv_phb4_iommu_memory_region_class_init, 1060 + }; 1061 + 1062 + /* 1063 + * MSI/MSIX memory region implementation. 1064 + * The handler handles both MSI and MSIX. 1065 + */ 1066 + static void pnv_phb4_msi_write(void *opaque, hwaddr addr, 1067 + uint64_t data, unsigned size) 1068 + { 1069 + PnvPhb4DMASpace *ds = opaque; 1070 + PnvPHB4 *phb = ds->phb; 1071 + 1072 + uint32_t src = ((addr >> 4) & 0xffff) | (data & 0x1f); 1073 + 1074 + /* Resolve PE# */ 1075 + if (!pnv_phb4_resolve_pe(ds)) { 1076 + phb_error(phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", 1077 + ds->bus, pci_bus_num(ds->bus), ds->devfn); 1078 + return; 1079 + } 1080 + 1081 + /* TODO: Check it doesn't collide with LSIs */ 1082 + if (src >= phb->xsrc.nr_irqs) { 1083 + phb_error(phb, "MSI %d out of bounds", src); 1084 + return; 1085 + } 1086 + 1087 + /* TODO: check PE/MSI assignement */ 1088 + 1089 + qemu_irq_pulse(phb->qirqs[src]); 1090 + } 1091 + 1092 + /* There is no .read as the read result is undefined by PCI spec */ 1093 + static uint64_t pnv_phb4_msi_read(void *opaque, hwaddr addr, unsigned size) 1094 + { 1095 + PnvPhb4DMASpace *ds = opaque; 1096 + 1097 + phb_error(ds->phb, "Invalid MSI read @ 0x%" HWADDR_PRIx, addr); 1098 + return -1; 1099 + } 1100 + 1101 + static const MemoryRegionOps pnv_phb4_msi_ops = { 1102 + .read = pnv_phb4_msi_read, 1103 + .write = pnv_phb4_msi_write, 1104 + .endianness = DEVICE_LITTLE_ENDIAN 1105 + }; 1106 + 1107 + static PnvPhb4DMASpace *pnv_phb4_dma_find(PnvPHB4 *phb, PCIBus *bus, int devfn) 1108 + { 1109 + PnvPhb4DMASpace *ds; 1110 + 1111 + QLIST_FOREACH(ds, &phb->dma_spaces, list) { 1112 + if (ds->bus == bus && ds->devfn == devfn) { 1113 + break; 1114 + } 1115 + } 1116 + return ds; 1117 + } 1118 + 1119 + static AddressSpace *pnv_phb4_dma_iommu(PCIBus *bus, void *opaque, int devfn) 1120 + { 1121 + PnvPHB4 *phb = opaque; 1122 + PnvPhb4DMASpace *ds; 1123 + char name[32]; 1124 + 1125 + ds = pnv_phb4_dma_find(phb, bus, devfn); 1126 + 1127 + if (ds == NULL) { 1128 + ds = g_malloc0(sizeof(PnvPhb4DMASpace)); 1129 + ds->bus = bus; 1130 + ds->devfn = devfn; 1131 + ds->pe_num = PHB_INVALID_PE; 1132 + ds->phb = phb; 1133 + snprintf(name, sizeof(name), "phb4-%d.%d-iommu", phb->chip_id, 1134 + phb->phb_id); 1135 + memory_region_init_iommu(&ds->dma_mr, sizeof(ds->dma_mr), 1136 + TYPE_PNV_PHB4_IOMMU_MEMORY_REGION, 1137 + OBJECT(phb), name, UINT64_MAX); 1138 + address_space_init(&ds->dma_as, MEMORY_REGION(&ds->dma_mr), 1139 + name); 1140 + memory_region_init_io(&ds->msi32_mr, OBJECT(phb), &pnv_phb4_msi_ops, 1141 + ds, "msi32", 0x10000); 1142 + memory_region_init_io(&ds->msi64_mr, OBJECT(phb), &pnv_phb4_msi_ops, 1143 + ds, "msi64", 0x100000); 1144 + pnv_phb4_update_msi_regions(ds); 1145 + 1146 + QLIST_INSERT_HEAD(&phb->dma_spaces, ds, list); 1147 + } 1148 + return &ds->dma_as; 1149 + } 1150 + 1151 + static void pnv_phb4_instance_init(Object *obj) 1152 + { 1153 + PnvPHB4 *phb = PNV_PHB4(obj); 1154 + 1155 + QLIST_INIT(&phb->dma_spaces); 1156 + 1157 + /* XIVE interrupt source object */ 1158 + object_initialize_child(obj, "source", &phb->xsrc, sizeof(XiveSource), 1159 + TYPE_XIVE_SOURCE, &error_abort, NULL); 1160 + 1161 + /* Root Port */ 1162 + object_initialize_child(obj, "root", &phb->root, sizeof(phb->root), 1163 + TYPE_PNV_PHB4_ROOT_PORT, &error_abort, NULL); 1164 + 1165 + qdev_prop_set_int32(DEVICE(&phb->root), "addr", PCI_DEVFN(0, 0)); 1166 + qdev_prop_set_bit(DEVICE(&phb->root), "multifunction", false); 1167 + } 1168 + 1169 + static void pnv_phb4_realize(DeviceState *dev, Error **errp) 1170 + { 1171 + PnvPHB4 *phb = PNV_PHB4(dev); 1172 + PCIHostState *pci = PCI_HOST_BRIDGE(dev); 1173 + XiveSource *xsrc = &phb->xsrc; 1174 + Error *local_err = NULL; 1175 + int nr_irqs; 1176 + char name[32]; 1177 + 1178 + assert(phb->stack); 1179 + 1180 + /* Set the "big_phb" flag */ 1181 + phb->big_phb = phb->phb_id == 0 || phb->phb_id == 3; 1182 + 1183 + /* Controller Registers */ 1184 + snprintf(name, sizeof(name), "phb4-%d.%d-regs", phb->chip_id, 1185 + phb->phb_id); 1186 + memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb4_reg_ops, phb, 1187 + name, 0x2000); 1188 + 1189 + /* 1190 + * PHB4 doesn't support IO space. However, qemu gets very upset if 1191 + * we don't have an IO region to anchor IO BARs onto so we just 1192 + * initialize one which we never hook up to anything 1193 + */ 1194 + 1195 + snprintf(name, sizeof(name), "phb4-%d.%d-pci-io", phb->chip_id, 1196 + phb->phb_id); 1197 + memory_region_init(&phb->pci_io, OBJECT(phb), name, 0x10000); 1198 + 1199 + snprintf(name, sizeof(name), "phb4-%d.%d-pci-mmio", phb->chip_id, 1200 + phb->phb_id); 1201 + memory_region_init(&phb->pci_mmio, OBJECT(phb), name, 1202 + PCI_MMIO_TOTAL_SIZE); 1203 + 1204 + pci->bus = pci_register_root_bus(dev, "root-bus", 1205 + pnv_phb4_set_irq, pnv_phb4_map_irq, phb, 1206 + &phb->pci_mmio, &phb->pci_io, 1207 + 0, 4, TYPE_PNV_PHB4_ROOT_BUS); 1208 + pci_setup_iommu(pci->bus, pnv_phb4_dma_iommu, phb); 1209 + 1210 + /* Add a single Root port */ 1211 + qdev_prop_set_uint8(DEVICE(&phb->root), "chassis", phb->chip_id); 1212 + qdev_prop_set_uint16(DEVICE(&phb->root), "slot", phb->phb_id); 1213 + qdev_set_parent_bus(DEVICE(&phb->root), BUS(pci->bus)); 1214 + qdev_init_nofail(DEVICE(&phb->root)); 1215 + 1216 + /* Setup XIVE Source */ 1217 + if (phb->big_phb) { 1218 + nr_irqs = PNV_PHB4_MAX_INTs; 1219 + } else { 1220 + nr_irqs = PNV_PHB4_MAX_INTs >> 1; 1221 + } 1222 + object_property_set_int(OBJECT(xsrc), nr_irqs, "nr-irqs", &error_fatal); 1223 + object_property_set_link(OBJECT(xsrc), OBJECT(phb), "xive", &error_fatal); 1224 + object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); 1225 + if (local_err) { 1226 + error_propagate(errp, local_err); 1227 + return; 1228 + } 1229 + 1230 + pnv_phb4_update_xsrc(phb); 1231 + 1232 + phb->qirqs = qemu_allocate_irqs(xive_source_set_irq, xsrc, xsrc->nr_irqs); 1233 + } 1234 + 1235 + static void pnv_phb4_reset(DeviceState *dev) 1236 + { 1237 + PnvPHB4 *phb = PNV_PHB4(dev); 1238 + PCIDevice *root_dev = PCI_DEVICE(&phb->root); 1239 + 1240 + /* 1241 + * Configure PCI device id at reset using a property. 1242 + */ 1243 + pci_config_set_vendor_id(root_dev->config, PCI_VENDOR_ID_IBM); 1244 + pci_config_set_device_id(root_dev->config, phb->device_id); 1245 + } 1246 + 1247 + static const char *pnv_phb4_root_bus_path(PCIHostState *host_bridge, 1248 + PCIBus *rootbus) 1249 + { 1250 + PnvPHB4 *phb = PNV_PHB4(host_bridge); 1251 + 1252 + snprintf(phb->bus_path, sizeof(phb->bus_path), "00%02x:%02x", 1253 + phb->chip_id, phb->phb_id); 1254 + return phb->bus_path; 1255 + } 1256 + 1257 + static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno) 1258 + { 1259 + PnvPHB4 *phb = PNV_PHB4(xf); 1260 + uint64_t notif_port = phb->regs[PHB_INT_NOTIFY_ADDR >> 3]; 1261 + uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3]; 1262 + uint64_t data = XIVE_TRIGGER_PQ | offset | srcno; 1263 + MemTxResult result; 1264 + 1265 + address_space_stq_be(&address_space_memory, notif_port, data, 1266 + MEMTXATTRS_UNSPECIFIED, &result); 1267 + if (result != MEMTX_OK) { 1268 + phb_error(phb, "trigger failed @%"HWADDR_PRIx "\n", notif_port); 1269 + return; 1270 + } 1271 + } 1272 + 1273 + static Property pnv_phb4_properties[] = { 1274 + DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0), 1275 + DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0), 1276 + DEFINE_PROP_UINT64("version", PnvPHB4, version, 0), 1277 + DEFINE_PROP_UINT16("device-id", PnvPHB4, device_id, 0), 1278 + DEFINE_PROP_LINK("stack", PnvPHB4, stack, TYPE_PNV_PHB4_PEC_STACK, 1279 + PnvPhb4PecStack *), 1280 + DEFINE_PROP_END_OF_LIST(), 1281 + }; 1282 + 1283 + static void pnv_phb4_class_init(ObjectClass *klass, void *data) 1284 + { 1285 + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); 1286 + DeviceClass *dc = DEVICE_CLASS(klass); 1287 + XiveNotifierClass *xfc = XIVE_NOTIFIER_CLASS(klass); 1288 + 1289 + hc->root_bus_path = pnv_phb4_root_bus_path; 1290 + dc->realize = pnv_phb4_realize; 1291 + device_class_set_props(dc, pnv_phb4_properties); 1292 + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 1293 + dc->user_creatable = true; 1294 + dc->reset = pnv_phb4_reset; 1295 + 1296 + xfc->notify = pnv_phb4_xive_notify; 1297 + } 1298 + 1299 + static const TypeInfo pnv_phb4_type_info = { 1300 + .name = TYPE_PNV_PHB4, 1301 + .parent = TYPE_PCIE_HOST_BRIDGE, 1302 + .instance_init = pnv_phb4_instance_init, 1303 + .instance_size = sizeof(PnvPHB4), 1304 + .class_init = pnv_phb4_class_init, 1305 + .interfaces = (InterfaceInfo[]) { 1306 + { TYPE_XIVE_NOTIFIER }, 1307 + { }, 1308 + } 1309 + }; 1310 + 1311 + static void pnv_phb4_root_bus_class_init(ObjectClass *klass, void *data) 1312 + { 1313 + BusClass *k = BUS_CLASS(klass); 1314 + 1315 + /* 1316 + * PHB4 has only a single root complex. Enforce the limit on the 1317 + * parent bus 1318 + */ 1319 + k->max_dev = 1; 1320 + } 1321 + 1322 + static const TypeInfo pnv_phb4_root_bus_info = { 1323 + .name = TYPE_PNV_PHB4_ROOT_BUS, 1324 + .parent = TYPE_PCIE_BUS, 1325 + .class_init = pnv_phb4_root_bus_class_init, 1326 + .interfaces = (InterfaceInfo[]) { 1327 + { INTERFACE_PCIE_DEVICE }, 1328 + { } 1329 + }, 1330 + }; 1331 + 1332 + static void pnv_phb4_root_port_reset(DeviceState *dev) 1333 + { 1334 + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); 1335 + PCIDevice *d = PCI_DEVICE(dev); 1336 + uint8_t *conf = d->config; 1337 + 1338 + rpc->parent_reset(dev); 1339 + 1340 + pci_byte_test_and_set_mask(conf + PCI_IO_BASE, 1341 + PCI_IO_RANGE_MASK & 0xff); 1342 + pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, 1343 + PCI_IO_RANGE_MASK & 0xff); 1344 + pci_set_word(conf + PCI_MEMORY_BASE, 0); 1345 + pci_set_word(conf + PCI_MEMORY_LIMIT, 0xfff0); 1346 + pci_set_word(conf + PCI_PREF_MEMORY_BASE, 0x1); 1347 + pci_set_word(conf + PCI_PREF_MEMORY_LIMIT, 0xfff1); 1348 + pci_set_long(conf + PCI_PREF_BASE_UPPER32, 0x1); /* Hack */ 1349 + pci_set_long(conf + PCI_PREF_LIMIT_UPPER32, 0xffffffff); 1350 + } 1351 + 1352 + static void pnv_phb4_root_port_realize(DeviceState *dev, Error **errp) 1353 + { 1354 + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); 1355 + Error *local_err = NULL; 1356 + 1357 + rpc->parent_realize(dev, &local_err); 1358 + if (local_err) { 1359 + error_propagate(errp, local_err); 1360 + return; 1361 + } 1362 + } 1363 + 1364 + static void pnv_phb4_root_port_class_init(ObjectClass *klass, void *data) 1365 + { 1366 + DeviceClass *dc = DEVICE_CLASS(klass); 1367 + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1368 + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); 1369 + 1370 + dc->desc = "IBM PHB4 PCIE Root Port"; 1371 + 1372 + device_class_set_parent_realize(dc, pnv_phb4_root_port_realize, 1373 + &rpc->parent_realize); 1374 + device_class_set_parent_reset(dc, pnv_phb4_root_port_reset, 1375 + &rpc->parent_reset); 1376 + 1377 + k->vendor_id = PCI_VENDOR_ID_IBM; 1378 + k->device_id = PNV_PHB4_DEVICE_ID; 1379 + k->revision = 0; 1380 + 1381 + rpc->exp_offset = 0x48; 1382 + rpc->aer_offset = 0x100; 1383 + 1384 + dc->reset = &pnv_phb4_root_port_reset; 1385 + } 1386 + 1387 + static const TypeInfo pnv_phb4_root_port_info = { 1388 + .name = TYPE_PNV_PHB4_ROOT_PORT, 1389 + .parent = TYPE_PCIE_ROOT_PORT, 1390 + .instance_size = sizeof(PnvPHB4RootPort), 1391 + .class_init = pnv_phb4_root_port_class_init, 1392 + }; 1393 + 1394 + static void pnv_phb4_register_types(void) 1395 + { 1396 + type_register_static(&pnv_phb4_root_bus_info); 1397 + type_register_static(&pnv_phb4_root_port_info); 1398 + type_register_static(&pnv_phb4_type_info); 1399 + type_register_static(&pnv_phb4_iommu_memory_region_info); 1400 + } 1401 + 1402 + type_init(pnv_phb4_register_types); 1403 + 1404 + void pnv_phb4_update_regions(PnvPhb4PecStack *stack) 1405 + { 1406 + PnvPHB4 *phb = &stack->phb; 1407 + 1408 + /* Unmap first always */ 1409 + if (memory_region_is_mapped(&phb->mr_regs)) { 1410 + memory_region_del_subregion(&stack->phbbar, &phb->mr_regs); 1411 + } 1412 + if (memory_region_is_mapped(&phb->xsrc.esb_mmio)) { 1413 + memory_region_del_subregion(&stack->intbar, &phb->xsrc.esb_mmio); 1414 + } 1415 + 1416 + /* Map registers if enabled */ 1417 + if (memory_region_is_mapped(&stack->phbbar)) { 1418 + memory_region_add_subregion(&stack->phbbar, 0, &phb->mr_regs); 1419 + } 1420 + 1421 + /* Map ESB if enabled */ 1422 + if (memory_region_is_mapped(&stack->intbar)) { 1423 + memory_region_add_subregion(&stack->intbar, 0, &phb->xsrc.esb_mmio); 1424 + } 1425 + 1426 + /* Check/update m32 */ 1427 + pnv_phb4_check_all_mbt(phb); 1428 + } 1429 + 1430 + void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon) 1431 + { 1432 + uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3]; 1433 + 1434 + monitor_printf(mon, "PHB4[%x:%x] Source %08x .. %08x\n", 1435 + phb->chip_id, phb->phb_id, 1436 + offset, offset + phb->xsrc.nr_irqs - 1); 1437 + xive_source_pic_print_info(&phb->xsrc, 0, mon); 1438 + }
+593
hw/pci-host/pnv_phb4_pec.c
··· 1 + /* 2 + * QEMU PowerPC PowerNV (POWER9) PHB4 model 3 + * 4 + * Copyright (c) 2018-2020, IBM Corporation. 5 + * 6 + * This code is licensed under the GPL version 2 or later. See the 7 + * COPYING file in the top-level directory. 8 + */ 9 + #include "qemu/osdep.h" 10 + #include "qapi/error.h" 11 + #include "qemu-common.h" 12 + #include "qemu/log.h" 13 + #include "target/ppc/cpu.h" 14 + #include "hw/ppc/fdt.h" 15 + #include "hw/pci-host/pnv_phb4_regs.h" 16 + #include "hw/pci-host/pnv_phb4.h" 17 + #include "hw/ppc/pnv_xscom.h" 18 + #include "hw/pci/pci_bridge.h" 19 + #include "hw/pci/pci_bus.h" 20 + #include "hw/ppc/pnv.h" 21 + #include "hw/qdev-properties.h" 22 + 23 + #include <libfdt.h> 24 + 25 + #define phb_pec_error(pec, fmt, ...) \ 26 + qemu_log_mask(LOG_GUEST_ERROR, "phb4_pec[%d:%d]: " fmt "\n", \ 27 + (pec)->chip_id, (pec)->index, ## __VA_ARGS__) 28 + 29 + 30 + static uint64_t pnv_pec_nest_xscom_read(void *opaque, hwaddr addr, 31 + unsigned size) 32 + { 33 + PnvPhb4PecState *pec = PNV_PHB4_PEC(opaque); 34 + uint32_t reg = addr >> 3; 35 + 36 + /* TODO: add list of allowed registers and error out if not */ 37 + return pec->nest_regs[reg]; 38 + } 39 + 40 + static void pnv_pec_nest_xscom_write(void *opaque, hwaddr addr, 41 + uint64_t val, unsigned size) 42 + { 43 + PnvPhb4PecState *pec = PNV_PHB4_PEC(opaque); 44 + uint32_t reg = addr >> 3; 45 + 46 + switch (reg) { 47 + case PEC_NEST_PBCQ_HW_CONFIG: 48 + case PEC_NEST_DROP_PRIO_CTRL: 49 + case PEC_NEST_PBCQ_ERR_INJECT: 50 + case PEC_NEST_PCI_NEST_CLK_TRACE_CTL: 51 + case PEC_NEST_PBCQ_PMON_CTRL: 52 + case PEC_NEST_PBCQ_PBUS_ADDR_EXT: 53 + case PEC_NEST_PBCQ_PRED_VEC_TIMEOUT: 54 + case PEC_NEST_CAPP_CTRL: 55 + case PEC_NEST_PBCQ_READ_STK_OVR: 56 + case PEC_NEST_PBCQ_WRITE_STK_OVR: 57 + case PEC_NEST_PBCQ_STORE_STK_OVR: 58 + case PEC_NEST_PBCQ_RETRY_BKOFF_CTRL: 59 + pec->nest_regs[reg] = val; 60 + break; 61 + default: 62 + phb_pec_error(pec, "%s @0x%"HWADDR_PRIx"=%"PRIx64"\n", __func__, 63 + addr, val); 64 + } 65 + } 66 + 67 + static const MemoryRegionOps pnv_pec_nest_xscom_ops = { 68 + .read = pnv_pec_nest_xscom_read, 69 + .write = pnv_pec_nest_xscom_write, 70 + .valid.min_access_size = 8, 71 + .valid.max_access_size = 8, 72 + .impl.min_access_size = 8, 73 + .impl.max_access_size = 8, 74 + .endianness = DEVICE_BIG_ENDIAN, 75 + }; 76 + 77 + static uint64_t pnv_pec_pci_xscom_read(void *opaque, hwaddr addr, 78 + unsigned size) 79 + { 80 + PnvPhb4PecState *pec = PNV_PHB4_PEC(opaque); 81 + uint32_t reg = addr >> 3; 82 + 83 + /* TODO: add list of allowed registers and error out if not */ 84 + return pec->pci_regs[reg]; 85 + } 86 + 87 + static void pnv_pec_pci_xscom_write(void *opaque, hwaddr addr, 88 + uint64_t val, unsigned size) 89 + { 90 + PnvPhb4PecState *pec = PNV_PHB4_PEC(opaque); 91 + uint32_t reg = addr >> 3; 92 + 93 + switch (reg) { 94 + case PEC_PCI_PBAIB_HW_CONFIG: 95 + case PEC_PCI_PBAIB_READ_STK_OVR: 96 + pec->pci_regs[reg] = val; 97 + break; 98 + default: 99 + phb_pec_error(pec, "%s @0x%"HWADDR_PRIx"=%"PRIx64"\n", __func__, 100 + addr, val); 101 + } 102 + } 103 + 104 + static const MemoryRegionOps pnv_pec_pci_xscom_ops = { 105 + .read = pnv_pec_pci_xscom_read, 106 + .write = pnv_pec_pci_xscom_write, 107 + .valid.min_access_size = 8, 108 + .valid.max_access_size = 8, 109 + .impl.min_access_size = 8, 110 + .impl.max_access_size = 8, 111 + .endianness = DEVICE_BIG_ENDIAN, 112 + }; 113 + 114 + static uint64_t pnv_pec_stk_nest_xscom_read(void *opaque, hwaddr addr, 115 + unsigned size) 116 + { 117 + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); 118 + uint32_t reg = addr >> 3; 119 + 120 + /* TODO: add list of allowed registers and error out if not */ 121 + return stack->nest_regs[reg]; 122 + } 123 + 124 + static void pnv_pec_stk_update_map(PnvPhb4PecStack *stack) 125 + { 126 + PnvPhb4PecState *pec = stack->pec; 127 + MemoryRegion *sysmem = pec->system_memory; 128 + uint64_t bar_en = stack->nest_regs[PEC_NEST_STK_BAR_EN]; 129 + uint64_t bar, mask, size; 130 + char name[64]; 131 + 132 + /* 133 + * NOTE: This will really not work well if those are remapped 134 + * after the PHB has created its sub regions. We could do better 135 + * if we had a way to resize regions but we don't really care 136 + * that much in practice as the stuff below really only happens 137 + * once early during boot 138 + */ 139 + 140 + /* Handle unmaps */ 141 + if (memory_region_is_mapped(&stack->mmbar0) && 142 + !(bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { 143 + memory_region_del_subregion(sysmem, &stack->mmbar0); 144 + } 145 + if (memory_region_is_mapped(&stack->mmbar1) && 146 + !(bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { 147 + memory_region_del_subregion(sysmem, &stack->mmbar1); 148 + } 149 + if (memory_region_is_mapped(&stack->phbbar) && 150 + !(bar_en & PEC_NEST_STK_BAR_EN_PHB)) { 151 + memory_region_del_subregion(sysmem, &stack->phbbar); 152 + } 153 + if (memory_region_is_mapped(&stack->intbar) && 154 + !(bar_en & PEC_NEST_STK_BAR_EN_INT)) { 155 + memory_region_del_subregion(sysmem, &stack->intbar); 156 + } 157 + 158 + /* Update PHB */ 159 + pnv_phb4_update_regions(stack); 160 + 161 + /* Handle maps */ 162 + if (!memory_region_is_mapped(&stack->mmbar0) && 163 + (bar_en & PEC_NEST_STK_BAR_EN_MMIO0)) { 164 + bar = stack->nest_regs[PEC_NEST_STK_MMIO_BAR0] >> 8; 165 + mask = stack->nest_regs[PEC_NEST_STK_MMIO_BAR0_MASK]; 166 + size = ((~mask) >> 8) + 1; 167 + snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-mmio0", 168 + pec->chip_id, pec->index, stack->stack_no); 169 + memory_region_init(&stack->mmbar0, OBJECT(stack), name, size); 170 + memory_region_add_subregion(sysmem, bar, &stack->mmbar0); 171 + stack->mmio0_base = bar; 172 + stack->mmio0_size = size; 173 + } 174 + if (!memory_region_is_mapped(&stack->mmbar1) && 175 + (bar_en & PEC_NEST_STK_BAR_EN_MMIO1)) { 176 + bar = stack->nest_regs[PEC_NEST_STK_MMIO_BAR1] >> 8; 177 + mask = stack->nest_regs[PEC_NEST_STK_MMIO_BAR1_MASK]; 178 + size = ((~mask) >> 8) + 1; 179 + snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-mmio1", 180 + pec->chip_id, pec->index, stack->stack_no); 181 + memory_region_init(&stack->mmbar1, OBJECT(stack), name, size); 182 + memory_region_add_subregion(sysmem, bar, &stack->mmbar1); 183 + stack->mmio1_base = bar; 184 + stack->mmio1_size = size; 185 + } 186 + if (!memory_region_is_mapped(&stack->phbbar) && 187 + (bar_en & PEC_NEST_STK_BAR_EN_PHB)) { 188 + bar = stack->nest_regs[PEC_NEST_STK_PHB_REGS_BAR] >> 8; 189 + size = PNV_PHB4_NUM_REGS << 3; 190 + snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-phb", 191 + pec->chip_id, pec->index, stack->stack_no); 192 + memory_region_init(&stack->phbbar, OBJECT(stack), name, size); 193 + memory_region_add_subregion(sysmem, bar, &stack->phbbar); 194 + } 195 + if (!memory_region_is_mapped(&stack->intbar) && 196 + (bar_en & PEC_NEST_STK_BAR_EN_INT)) { 197 + bar = stack->nest_regs[PEC_NEST_STK_INT_BAR] >> 8; 198 + size = PNV_PHB4_MAX_INTs << 16; 199 + snprintf(name, sizeof(name), "pec-%d.%d-stack-%d-int", 200 + stack->pec->chip_id, stack->pec->index, stack->stack_no); 201 + memory_region_init(&stack->intbar, OBJECT(stack), name, size); 202 + memory_region_add_subregion(sysmem, bar, &stack->intbar); 203 + } 204 + 205 + /* Update PHB */ 206 + pnv_phb4_update_regions(stack); 207 + } 208 + 209 + static void pnv_pec_stk_nest_xscom_write(void *opaque, hwaddr addr, 210 + uint64_t val, unsigned size) 211 + { 212 + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); 213 + PnvPhb4PecState *pec = stack->pec; 214 + uint32_t reg = addr >> 3; 215 + 216 + switch (reg) { 217 + case PEC_NEST_STK_PCI_NEST_FIR: 218 + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] = val; 219 + break; 220 + case PEC_NEST_STK_PCI_NEST_FIR_CLR: 221 + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] &= val; 222 + break; 223 + case PEC_NEST_STK_PCI_NEST_FIR_SET: 224 + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR] |= val; 225 + break; 226 + case PEC_NEST_STK_PCI_NEST_FIR_MSK: 227 + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] = val; 228 + break; 229 + case PEC_NEST_STK_PCI_NEST_FIR_MSKC: 230 + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] &= val; 231 + break; 232 + case PEC_NEST_STK_PCI_NEST_FIR_MSKS: 233 + stack->nest_regs[PEC_NEST_STK_PCI_NEST_FIR_MSK] |= val; 234 + break; 235 + case PEC_NEST_STK_PCI_NEST_FIR_ACT0: 236 + case PEC_NEST_STK_PCI_NEST_FIR_ACT1: 237 + stack->nest_regs[reg] = val; 238 + break; 239 + case PEC_NEST_STK_PCI_NEST_FIR_WOF: 240 + stack->nest_regs[reg] = 0; 241 + break; 242 + case PEC_NEST_STK_ERR_REPORT_0: 243 + case PEC_NEST_STK_ERR_REPORT_1: 244 + case PEC_NEST_STK_PBCQ_GNRL_STATUS: 245 + /* Flag error ? */ 246 + break; 247 + case PEC_NEST_STK_PBCQ_MODE: 248 + stack->nest_regs[reg] = val & 0xff00000000000000ull; 249 + break; 250 + case PEC_NEST_STK_MMIO_BAR0: 251 + case PEC_NEST_STK_MMIO_BAR0_MASK: 252 + case PEC_NEST_STK_MMIO_BAR1: 253 + case PEC_NEST_STK_MMIO_BAR1_MASK: 254 + if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & 255 + (PEC_NEST_STK_BAR_EN_MMIO0 | 256 + PEC_NEST_STK_BAR_EN_MMIO1)) { 257 + phb_pec_error(pec, "Changing enabled BAR unsupported\n"); 258 + } 259 + stack->nest_regs[reg] = val & 0xffffffffff000000ull; 260 + break; 261 + case PEC_NEST_STK_PHB_REGS_BAR: 262 + if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_PHB) { 263 + phb_pec_error(pec, "Changing enabled BAR unsupported\n"); 264 + } 265 + stack->nest_regs[reg] = val & 0xffffffffffc00000ull; 266 + break; 267 + case PEC_NEST_STK_INT_BAR: 268 + if (stack->nest_regs[PEC_NEST_STK_BAR_EN] & PEC_NEST_STK_BAR_EN_INT) { 269 + phb_pec_error(pec, "Changing enabled BAR unsupported\n"); 270 + } 271 + stack->nest_regs[reg] = val & 0xfffffff000000000ull; 272 + break; 273 + case PEC_NEST_STK_BAR_EN: 274 + stack->nest_regs[reg] = val & 0xf000000000000000ull; 275 + pnv_pec_stk_update_map(stack); 276 + break; 277 + case PEC_NEST_STK_DATA_FRZ_TYPE: 278 + case PEC_NEST_STK_PBCQ_TUN_BAR: 279 + /* Not used for now */ 280 + stack->nest_regs[reg] = val; 281 + break; 282 + default: 283 + qemu_log_mask(LOG_UNIMP, "phb4_pec: nest_xscom_write 0x%"HWADDR_PRIx 284 + "=%"PRIx64"\n", addr, val); 285 + } 286 + } 287 + 288 + static const MemoryRegionOps pnv_pec_stk_nest_xscom_ops = { 289 + .read = pnv_pec_stk_nest_xscom_read, 290 + .write = pnv_pec_stk_nest_xscom_write, 291 + .valid.min_access_size = 8, 292 + .valid.max_access_size = 8, 293 + .impl.min_access_size = 8, 294 + .impl.max_access_size = 8, 295 + .endianness = DEVICE_BIG_ENDIAN, 296 + }; 297 + 298 + static uint64_t pnv_pec_stk_pci_xscom_read(void *opaque, hwaddr addr, 299 + unsigned size) 300 + { 301 + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); 302 + uint32_t reg = addr >> 3; 303 + 304 + /* TODO: add list of allowed registers and error out if not */ 305 + return stack->pci_regs[reg]; 306 + } 307 + 308 + static void pnv_pec_stk_pci_xscom_write(void *opaque, hwaddr addr, 309 + uint64_t val, unsigned size) 310 + { 311 + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(opaque); 312 + uint32_t reg = addr >> 3; 313 + 314 + switch (reg) { 315 + case PEC_PCI_STK_PCI_FIR: 316 + stack->nest_regs[reg] = val; 317 + break; 318 + case PEC_PCI_STK_PCI_FIR_CLR: 319 + stack->nest_regs[PEC_PCI_STK_PCI_FIR] &= val; 320 + break; 321 + case PEC_PCI_STK_PCI_FIR_SET: 322 + stack->nest_regs[PEC_PCI_STK_PCI_FIR] |= val; 323 + break; 324 + case PEC_PCI_STK_PCI_FIR_MSK: 325 + stack->nest_regs[reg] = val; 326 + break; 327 + case PEC_PCI_STK_PCI_FIR_MSKC: 328 + stack->nest_regs[PEC_PCI_STK_PCI_FIR_MSK] &= val; 329 + break; 330 + case PEC_PCI_STK_PCI_FIR_MSKS: 331 + stack->nest_regs[PEC_PCI_STK_PCI_FIR_MSK] |= val; 332 + break; 333 + case PEC_PCI_STK_PCI_FIR_ACT0: 334 + case PEC_PCI_STK_PCI_FIR_ACT1: 335 + stack->nest_regs[reg] = val; 336 + break; 337 + case PEC_PCI_STK_PCI_FIR_WOF: 338 + stack->nest_regs[reg] = 0; 339 + break; 340 + case PEC_PCI_STK_ETU_RESET: 341 + stack->nest_regs[reg] = val & 0x8000000000000000ull; 342 + /* TODO: Implement reset */ 343 + break; 344 + case PEC_PCI_STK_PBAIB_ERR_REPORT: 345 + break; 346 + case PEC_PCI_STK_PBAIB_TX_CMD_CRED: 347 + case PEC_PCI_STK_PBAIB_TX_DAT_CRED: 348 + stack->nest_regs[reg] = val; 349 + break; 350 + default: 351 + qemu_log_mask(LOG_UNIMP, "phb4_pec_stk: pci_xscom_write 0x%"HWADDR_PRIx 352 + "=%"PRIx64"\n", addr, val); 353 + } 354 + } 355 + 356 + static const MemoryRegionOps pnv_pec_stk_pci_xscom_ops = { 357 + .read = pnv_pec_stk_pci_xscom_read, 358 + .write = pnv_pec_stk_pci_xscom_write, 359 + .valid.min_access_size = 8, 360 + .valid.max_access_size = 8, 361 + .impl.min_access_size = 8, 362 + .impl.max_access_size = 8, 363 + .endianness = DEVICE_BIG_ENDIAN, 364 + }; 365 + 366 + static void pnv_pec_instance_init(Object *obj) 367 + { 368 + PnvPhb4PecState *pec = PNV_PHB4_PEC(obj); 369 + int i; 370 + 371 + for (i = 0; i < PHB4_PEC_MAX_STACKS; i++) { 372 + object_initialize_child(obj, "stack[*]", &pec->stacks[i], 373 + sizeof(pec->stacks[i]), TYPE_PNV_PHB4_PEC_STACK, 374 + &error_abort, NULL); 375 + } 376 + } 377 + 378 + static void pnv_pec_realize(DeviceState *dev, Error **errp) 379 + { 380 + PnvPhb4PecState *pec = PNV_PHB4_PEC(dev); 381 + Error *local_err = NULL; 382 + char name[64]; 383 + int i; 384 + 385 + assert(pec->system_memory); 386 + 387 + /* Create stacks */ 388 + for (i = 0; i < pec->num_stacks; i++) { 389 + PnvPhb4PecStack *stack = &pec->stacks[i]; 390 + Object *stk_obj = OBJECT(stack); 391 + 392 + object_property_set_int(stk_obj, i, "stack-no", &error_abort); 393 + object_property_set_link(stk_obj, OBJECT(pec), "pec", &error_abort); 394 + object_property_set_bool(stk_obj, true, "realized", errp); 395 + if (local_err) { 396 + error_propagate(errp, local_err); 397 + return; 398 + } 399 + } 400 + 401 + /* Initialize the XSCOM regions for the PEC registers */ 402 + snprintf(name, sizeof(name), "xscom-pec-%d.%d-nest", pec->chip_id, 403 + pec->index); 404 + pnv_xscom_region_init(&pec->nest_regs_mr, OBJECT(dev), 405 + &pnv_pec_nest_xscom_ops, pec, name, 406 + PHB4_PEC_NEST_REGS_COUNT); 407 + 408 + snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci", pec->chip_id, 409 + pec->index); 410 + pnv_xscom_region_init(&pec->pci_regs_mr, OBJECT(dev), 411 + &pnv_pec_pci_xscom_ops, pec, name, 412 + PHB4_PEC_PCI_REGS_COUNT); 413 + } 414 + 415 + static int pnv_pec_dt_xscom(PnvXScomInterface *dev, void *fdt, 416 + int xscom_offset) 417 + { 418 + PnvPhb4PecState *pec = PNV_PHB4_PEC(dev); 419 + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(dev); 420 + uint32_t nbase = pecc->xscom_nest_base(pec); 421 + uint32_t pbase = pecc->xscom_pci_base(pec); 422 + int offset, i; 423 + char *name; 424 + uint32_t reg[] = { 425 + cpu_to_be32(nbase), 426 + cpu_to_be32(pecc->xscom_nest_size), 427 + cpu_to_be32(pbase), 428 + cpu_to_be32(pecc->xscom_pci_size), 429 + }; 430 + 431 + name = g_strdup_printf("pbcq@%x", nbase); 432 + offset = fdt_add_subnode(fdt, xscom_offset, name); 433 + _FDT(offset); 434 + g_free(name); 435 + 436 + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 437 + 438 + _FDT((fdt_setprop_cell(fdt, offset, "ibm,pec-index", pec->index))); 439 + _FDT((fdt_setprop_cell(fdt, offset, "#address-cells", 1))); 440 + _FDT((fdt_setprop_cell(fdt, offset, "#size-cells", 0))); 441 + _FDT((fdt_setprop(fdt, offset, "compatible", pecc->compat, 442 + pecc->compat_size))); 443 + 444 + for (i = 0; i < pec->num_stacks; i++) { 445 + PnvPhb4PecStack *stack = &pec->stacks[i]; 446 + PnvPHB4 *phb = &stack->phb; 447 + int stk_offset; 448 + 449 + name = g_strdup_printf("stack@%x", i); 450 + stk_offset = fdt_add_subnode(fdt, offset, name); 451 + _FDT(stk_offset); 452 + g_free(name); 453 + _FDT((fdt_setprop(fdt, stk_offset, "compatible", pecc->stk_compat, 454 + pecc->stk_compat_size))); 455 + _FDT((fdt_setprop_cell(fdt, stk_offset, "reg", i))); 456 + _FDT((fdt_setprop_cell(fdt, stk_offset, "ibm,phb-index", phb->phb_id))); 457 + } 458 + 459 + return 0; 460 + } 461 + 462 + static Property pnv_pec_properties[] = { 463 + DEFINE_PROP_UINT32("index", PnvPhb4PecState, index, 0), 464 + DEFINE_PROP_UINT32("num-stacks", PnvPhb4PecState, num_stacks, 0), 465 + DEFINE_PROP_UINT32("chip-id", PnvPhb4PecState, chip_id, 0), 466 + DEFINE_PROP_LINK("system-memory", PnvPhb4PecState, system_memory, 467 + TYPE_MEMORY_REGION, MemoryRegion *), 468 + DEFINE_PROP_END_OF_LIST(), 469 + }; 470 + 471 + static uint32_t pnv_pec_xscom_pci_base(PnvPhb4PecState *pec) 472 + { 473 + return PNV9_XSCOM_PEC_PCI_BASE + 0x1000000 * pec->index; 474 + } 475 + 476 + static uint32_t pnv_pec_xscom_nest_base(PnvPhb4PecState *pec) 477 + { 478 + return PNV9_XSCOM_PEC_NEST_BASE + 0x400 * pec->index; 479 + } 480 + 481 + static void pnv_pec_class_init(ObjectClass *klass, void *data) 482 + { 483 + DeviceClass *dc = DEVICE_CLASS(klass); 484 + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 485 + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_CLASS(klass); 486 + static const char compat[] = "ibm,power9-pbcq"; 487 + static const char stk_compat[] = "ibm,power9-phb-stack"; 488 + 489 + xdc->dt_xscom = pnv_pec_dt_xscom; 490 + 491 + dc->realize = pnv_pec_realize; 492 + device_class_set_props(dc, pnv_pec_properties); 493 + 494 + pecc->xscom_nest_base = pnv_pec_xscom_nest_base; 495 + pecc->xscom_pci_base = pnv_pec_xscom_pci_base; 496 + pecc->xscom_nest_size = PNV9_XSCOM_PEC_NEST_SIZE; 497 + pecc->xscom_pci_size = PNV9_XSCOM_PEC_PCI_SIZE; 498 + pecc->compat = compat; 499 + pecc->compat_size = sizeof(compat); 500 + pecc->stk_compat = stk_compat; 501 + pecc->stk_compat_size = sizeof(stk_compat); 502 + } 503 + 504 + static const TypeInfo pnv_pec_type_info = { 505 + .name = TYPE_PNV_PHB4_PEC, 506 + .parent = TYPE_DEVICE, 507 + .instance_size = sizeof(PnvPhb4PecState), 508 + .instance_init = pnv_pec_instance_init, 509 + .class_init = pnv_pec_class_init, 510 + .class_size = sizeof(PnvPhb4PecClass), 511 + .interfaces = (InterfaceInfo[]) { 512 + { TYPE_PNV_XSCOM_INTERFACE }, 513 + { } 514 + } 515 + }; 516 + 517 + static void pnv_pec_stk_instance_init(Object *obj) 518 + { 519 + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(obj); 520 + 521 + object_initialize_child(obj, "phb", &stack->phb, sizeof(stack->phb), 522 + TYPE_PNV_PHB4, &error_abort, NULL); 523 + } 524 + 525 + static void pnv_pec_stk_realize(DeviceState *dev, Error **errp) 526 + { 527 + PnvPhb4PecStack *stack = PNV_PHB4_PEC_STACK(dev); 528 + PnvPhb4PecState *pec = stack->pec; 529 + char name[64]; 530 + 531 + assert(pec); 532 + 533 + /* Initialize the XSCOM regions for the stack registers */ 534 + snprintf(name, sizeof(name), "xscom-pec-%d.%d-nest-stack-%d", 535 + pec->chip_id, pec->index, stack->stack_no); 536 + pnv_xscom_region_init(&stack->nest_regs_mr, OBJECT(stack), 537 + &pnv_pec_stk_nest_xscom_ops, stack, name, 538 + PHB4_PEC_NEST_STK_REGS_COUNT); 539 + 540 + snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-stack-%d", 541 + pec->chip_id, pec->index, stack->stack_no); 542 + pnv_xscom_region_init(&stack->pci_regs_mr, OBJECT(stack), 543 + &pnv_pec_stk_pci_xscom_ops, stack, name, 544 + PHB4_PEC_PCI_STK_REGS_COUNT); 545 + 546 + /* PHB pass-through */ 547 + snprintf(name, sizeof(name), "xscom-pec-%d.%d-pci-stack-%d-phb", 548 + pec->chip_id, pec->index, stack->stack_no); 549 + pnv_xscom_region_init(&stack->phb_regs_mr, OBJECT(&stack->phb), 550 + &pnv_phb4_xscom_ops, &stack->phb, name, 0x40); 551 + 552 + /* 553 + * Let the machine/chip realize the PHB object to customize more 554 + * easily some fields 555 + */ 556 + } 557 + 558 + static Property pnv_pec_stk_properties[] = { 559 + DEFINE_PROP_UINT32("stack-no", PnvPhb4PecStack, stack_no, 0), 560 + DEFINE_PROP_LINK("pec", PnvPhb4PecStack, pec, TYPE_PNV_PHB4_PEC, 561 + PnvPhb4PecState *), 562 + DEFINE_PROP_END_OF_LIST(), 563 + }; 564 + 565 + static void pnv_pec_stk_class_init(ObjectClass *klass, void *data) 566 + { 567 + DeviceClass *dc = DEVICE_CLASS(klass); 568 + 569 + device_class_set_props(dc, pnv_pec_stk_properties); 570 + dc->realize = pnv_pec_stk_realize; 571 + 572 + /* TODO: reset regs ? */ 573 + } 574 + 575 + static const TypeInfo pnv_pec_stk_type_info = { 576 + .name = TYPE_PNV_PHB4_PEC_STACK, 577 + .parent = TYPE_DEVICE, 578 + .instance_size = sizeof(PnvPhb4PecStack), 579 + .instance_init = pnv_pec_stk_instance_init, 580 + .class_init = pnv_pec_stk_class_init, 581 + .interfaces = (InterfaceInfo[]) { 582 + { TYPE_PNV_XSCOM_INTERFACE }, 583 + { } 584 + } 585 + }; 586 + 587 + static void pnv_pec_register_types(void) 588 + { 589 + type_register_static(&pnv_pec_type_info); 590 + type_register_static(&pnv_pec_stk_type_info); 591 + } 592 + 593 + type_init(pnv_pec_register_types);
+2
hw/ppc/Kconfig
··· 135 135 default y 136 136 depends on PSERIES 137 137 select XIVE 138 + select PCI 139 + select PCIE_PORT 138 140 139 141 config XIVE_KVM 140 142 bool
+107
hw/ppc/pnv.c
··· 40 40 #include "hw/intc/intc.h" 41 41 #include "hw/ipmi/ipmi.h" 42 42 #include "target/ppc/mmu-hash64.h" 43 + #include "hw/pci/msi.h" 43 44 44 45 #include "hw/ppc/xics.h" 45 46 #include "hw/qdev-properties.h" ··· 622 623 static void pnv_chip_power9_pic_print_info(PnvChip *chip, Monitor *mon) 623 624 { 624 625 Pnv9Chip *chip9 = PNV9_CHIP(chip); 626 + int i, j; 625 627 626 628 pnv_xive_pic_print_info(&chip9->xive, mon); 627 629 pnv_psi_pic_print_info(&chip9->psi, mon); 630 + 631 + for (i = 0; i < PNV9_CHIP_MAX_PEC; i++) { 632 + PnvPhb4PecState *pec = &chip9->pecs[i]; 633 + for (j = 0; j < pec->num_stacks; j++) { 634 + pnv_phb4_pic_print_info(&pec->stacks[j].phb, mon); 635 + } 636 + } 628 637 } 629 638 630 639 static uint64_t pnv_chip_power8_xscom_core_base(PnvChip *chip, ··· 747 756 exit(1); 748 757 } 749 758 } 759 + 760 + /* MSIs are supported on this platform */ 761 + msi_nonbroken = true; 750 762 751 763 /* 752 764 * Check compatibility of the specified CPU with the machine ··· 1230 1242 1231 1243 static void pnv_chip_power9_instance_init(Object *obj) 1232 1244 { 1245 + PnvChip *chip = PNV_CHIP(obj); 1233 1246 Pnv9Chip *chip9 = PNV9_CHIP(obj); 1247 + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj); 1248 + int i; 1234 1249 1235 1250 object_initialize_child(obj, "xive", &chip9->xive, sizeof(chip9->xive), 1236 1251 TYPE_PNV_XIVE, &error_abort, NULL); ··· 1248 1263 1249 1264 object_initialize_child(obj, "homer", &chip9->homer, sizeof(chip9->homer), 1250 1265 TYPE_PNV9_HOMER, &error_abort, NULL); 1266 + 1267 + for (i = 0; i < PNV9_CHIP_MAX_PEC; i++) { 1268 + object_initialize_child(obj, "pec[*]", &chip9->pecs[i], 1269 + sizeof(chip9->pecs[i]), TYPE_PNV_PHB4_PEC, 1270 + &error_abort, NULL); 1271 + } 1272 + 1273 + /* 1274 + * Number of PHBs is the chip default 1275 + */ 1276 + chip->num_phbs = pcc->num_phbs; 1251 1277 } 1252 1278 1253 1279 static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp) ··· 1276 1302 } 1277 1303 } 1278 1304 1305 + static void pnv_chip_power9_phb_realize(PnvChip *chip, Error **errp) 1306 + { 1307 + Pnv9Chip *chip9 = PNV9_CHIP(chip); 1308 + Error *local_err = NULL; 1309 + int i, j; 1310 + int phb_id = 0; 1311 + 1312 + for (i = 0; i < PNV9_CHIP_MAX_PEC; i++) { 1313 + PnvPhb4PecState *pec = &chip9->pecs[i]; 1314 + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); 1315 + uint32_t pec_nest_base; 1316 + uint32_t pec_pci_base; 1317 + 1318 + object_property_set_int(OBJECT(pec), i, "index", &error_fatal); 1319 + /* 1320 + * PEC0 -> 1 stack 1321 + * PEC1 -> 2 stacks 1322 + * PEC2 -> 3 stacks 1323 + */ 1324 + object_property_set_int(OBJECT(pec), i + 1, "num-stacks", 1325 + &error_fatal); 1326 + object_property_set_int(OBJECT(pec), chip->chip_id, "chip-id", 1327 + &error_fatal); 1328 + object_property_set_link(OBJECT(pec), OBJECT(get_system_memory()), 1329 + "system-memory", &error_abort); 1330 + object_property_set_bool(OBJECT(pec), true, "realized", &local_err); 1331 + if (local_err) { 1332 + error_propagate(errp, local_err); 1333 + return; 1334 + } 1335 + 1336 + pec_nest_base = pecc->xscom_nest_base(pec); 1337 + pec_pci_base = pecc->xscom_pci_base(pec); 1338 + 1339 + pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr); 1340 + pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr); 1341 + 1342 + for (j = 0; j < pec->num_stacks && phb_id < chip->num_phbs; 1343 + j++, phb_id++) { 1344 + PnvPhb4PecStack *stack = &pec->stacks[j]; 1345 + Object *obj = OBJECT(&stack->phb); 1346 + 1347 + object_property_set_int(obj, phb_id, "index", &error_fatal); 1348 + object_property_set_int(obj, chip->chip_id, "chip-id", 1349 + &error_fatal); 1350 + object_property_set_int(obj, PNV_PHB4_VERSION, "version", 1351 + &error_fatal); 1352 + object_property_set_int(obj, PNV_PHB4_DEVICE_ID, "device-id", 1353 + &error_fatal); 1354 + object_property_set_link(obj, OBJECT(stack), "stack", &error_abort); 1355 + object_property_set_bool(obj, true, "realized", &local_err); 1356 + if (local_err) { 1357 + error_propagate(errp, local_err); 1358 + return; 1359 + } 1360 + qdev_set_parent_bus(DEVICE(obj), sysbus_get_default()); 1361 + 1362 + /* Populate the XSCOM address space. */ 1363 + pnv_xscom_add_subregion(chip, 1364 + pec_nest_base + 0x40 * (stack->stack_no + 1), 1365 + &stack->nest_regs_mr); 1366 + pnv_xscom_add_subregion(chip, 1367 + pec_pci_base + 0x40 * (stack->stack_no + 1), 1368 + &stack->pci_regs_mr); 1369 + pnv_xscom_add_subregion(chip, 1370 + pec_pci_base + PNV9_XSCOM_PEC_PCI_STK0 + 1371 + 0x40 * stack->stack_no, 1372 + &stack->phb_regs_mr); 1373 + } 1374 + } 1375 + } 1376 + 1279 1377 static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) 1280 1378 { 1281 1379 PnvChipClass *pcc = PNV_CHIP_GET_CLASS(dev); ··· 1378 1476 /* Homer mmio region */ 1379 1477 memory_region_add_subregion(get_system_memory(), PNV9_HOMER_BASE(chip), 1380 1478 &chip9->homer.regs); 1479 + 1480 + /* PHBs */ 1481 + pnv_chip_power9_phb_realize(chip, &local_err); 1482 + if (local_err) { 1483 + error_propagate(errp, local_err); 1484 + return; 1485 + } 1381 1486 } 1382 1487 1383 1488 static uint32_t pnv_chip_power9_xscom_pcba(PnvChip *chip, uint64_t addr) ··· 1404 1509 k->xscom_core_base = pnv_chip_power9_xscom_core_base; 1405 1510 k->xscom_pcba = pnv_chip_power9_xscom_pcba; 1406 1511 dc->desc = "PowerNV Chip POWER9"; 1512 + k->num_phbs = 6; 1407 1513 1408 1514 device_class_set_parent_realize(dc, pnv_chip_power9_realize, 1409 1515 &k->parent_realize); ··· 1608 1714 DEFINE_PROP_UINT32("nr-cores", PnvChip, nr_cores, 1), 1609 1715 DEFINE_PROP_UINT64("cores-mask", PnvChip, cores_mask, 0x0), 1610 1716 DEFINE_PROP_UINT32("nr-threads", PnvChip, nr_threads, 1), 1717 + DEFINE_PROP_UINT32("num-phbs", PnvChip, num_phbs, 0), 1611 1718 DEFINE_PROP_END_OF_LIST(), 1612 1719 }; 1613 1720
+230
include/hw/pci-host/pnv_phb4.h
··· 1 + /* 2 + * QEMU PowerPC PowerNV (POWER9) PHB4 model 3 + * 4 + * Copyright (c) 2018-2020, IBM Corporation. 5 + * 6 + * This code is licensed under the GPL version 2 or later. See the 7 + * COPYING file in the top-level directory. 8 + */ 9 + 10 + #ifndef PCI_HOST_PNV_PHB4_H 11 + #define PCI_HOST_PNV_PHB4_H 12 + 13 + #include "hw/pci/pcie_host.h" 14 + #include "hw/pci/pcie_port.h" 15 + #include "hw/ppc/xive.h" 16 + 17 + typedef struct PnvPhb4PecState PnvPhb4PecState; 18 + typedef struct PnvPhb4PecStack PnvPhb4PecStack; 19 + typedef struct PnvPHB4 PnvPHB4; 20 + typedef struct PnvChip PnvChip; 21 + 22 + /* 23 + * We have one such address space wrapper per possible device under 24 + * the PHB since they need to be assigned statically at qemu device 25 + * creation time. The relationship to a PE is done later 26 + * dynamically. This means we can potentially create a lot of these 27 + * guys. Q35 stores them as some kind of radix tree but we never 28 + * really need to do fast lookups so instead we simply keep a QLIST of 29 + * them for now, we can add the radix if needed later on. 30 + * 31 + * We do cache the PE number to speed things up a bit though. 32 + */ 33 + typedef struct PnvPhb4DMASpace { 34 + PCIBus *bus; 35 + uint8_t devfn; 36 + int pe_num; /* Cached PE number */ 37 + #define PHB_INVALID_PE (-1) 38 + PnvPHB4 *phb; 39 + AddressSpace dma_as; 40 + IOMMUMemoryRegion dma_mr; 41 + MemoryRegion msi32_mr; 42 + MemoryRegion msi64_mr; 43 + QLIST_ENTRY(PnvPhb4DMASpace) list; 44 + } PnvPhb4DMASpace; 45 + 46 + /* 47 + * PHB4 PCIe Root port 48 + */ 49 + #define TYPE_PNV_PHB4_ROOT_BUS "pnv-phb4-root-bus" 50 + #define TYPE_PNV_PHB4_ROOT_PORT "pnv-phb4-root-port" 51 + 52 + typedef struct PnvPHB4RootPort { 53 + PCIESlot parent_obj; 54 + } PnvPHB4RootPort; 55 + 56 + /* 57 + * PHB4 PCIe Host Bridge for PowerNV machines (POWER9) 58 + */ 59 + #define TYPE_PNV_PHB4 "pnv-phb4" 60 + #define PNV_PHB4(obj) OBJECT_CHECK(PnvPHB4, (obj), TYPE_PNV_PHB4) 61 + 62 + #define PNV_PHB4_MAX_LSIs 8 63 + #define PNV_PHB4_MAX_INTs 4096 64 + #define PNV_PHB4_MAX_MIST (PNV_PHB4_MAX_INTs >> 2) 65 + #define PNV_PHB4_MAX_MMIO_WINDOWS 32 66 + #define PNV_PHB4_MIN_MMIO_WINDOWS 16 67 + #define PNV_PHB4_NUM_REGS (0x3000 >> 3) 68 + #define PNV_PHB4_MAX_PEs 512 69 + #define PNV_PHB4_MAX_TVEs (PNV_PHB4_MAX_PEs * 2) 70 + #define PNV_PHB4_MAX_PEEVs (PNV_PHB4_MAX_PEs / 64) 71 + #define PNV_PHB4_MAX_MBEs (PNV_PHB4_MAX_MMIO_WINDOWS * 2) 72 + 73 + #define PNV_PHB4_VERSION 0x000000a400000002ull 74 + #define PNV_PHB4_DEVICE_ID 0x04c1 75 + 76 + #define PCI_MMIO_TOTAL_SIZE (0x1ull << 60) 77 + 78 + struct PnvPHB4 { 79 + PCIExpressHost parent_obj; 80 + 81 + PnvPHB4RootPort root; 82 + 83 + uint32_t chip_id; 84 + uint32_t phb_id; 85 + 86 + uint64_t version; 87 + uint16_t device_id; 88 + 89 + char bus_path[8]; 90 + 91 + /* Main register images */ 92 + uint64_t regs[PNV_PHB4_NUM_REGS]; 93 + MemoryRegion mr_regs; 94 + 95 + /* Extra SCOM-only register */ 96 + uint64_t scom_hv_ind_addr_reg; 97 + 98 + /* 99 + * Geometry of the PHB. There are two types, small and big PHBs, a 100 + * number of resources (number of PEs, windows etc...) are doubled 101 + * for a big PHB 102 + */ 103 + bool big_phb; 104 + 105 + /* Memory regions for MMIO space */ 106 + MemoryRegion mr_mmio[PNV_PHB4_MAX_MMIO_WINDOWS]; 107 + 108 + /* PCI side space */ 109 + MemoryRegion pci_mmio; 110 + MemoryRegion pci_io; 111 + 112 + /* On-chip IODA tables */ 113 + uint64_t ioda_LIST[PNV_PHB4_MAX_LSIs]; 114 + uint64_t ioda_MIST[PNV_PHB4_MAX_MIST]; 115 + uint64_t ioda_TVT[PNV_PHB4_MAX_TVEs]; 116 + uint64_t ioda_MBT[PNV_PHB4_MAX_MBEs]; 117 + uint64_t ioda_MDT[PNV_PHB4_MAX_PEs]; 118 + uint64_t ioda_PEEV[PNV_PHB4_MAX_PEEVs]; 119 + 120 + /* 121 + * The internal PESTA/B is 2 bits per PE split into two tables, we 122 + * store them in a single array here to avoid wasting space. 123 + */ 124 + uint8_t ioda_PEST_AB[PNV_PHB4_MAX_PEs]; 125 + 126 + /* P9 Interrupt generation */ 127 + XiveSource xsrc; 128 + qemu_irq *qirqs; 129 + 130 + PnvPhb4PecStack *stack; 131 + 132 + QLIST_HEAD(, PnvPhb4DMASpace) dma_spaces; 133 + }; 134 + 135 + void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon); 136 + void pnv_phb4_update_regions(PnvPhb4PecStack *stack); 137 + extern const MemoryRegionOps pnv_phb4_xscom_ops; 138 + 139 + /* 140 + * PHB4 PEC (PCI Express Controller) 141 + */ 142 + #define TYPE_PNV_PHB4_PEC "pnv-phb4-pec" 143 + #define PNV_PHB4_PEC(obj) \ 144 + OBJECT_CHECK(PnvPhb4PecState, (obj), TYPE_PNV_PHB4_PEC) 145 + 146 + #define TYPE_PNV_PHB4_PEC_STACK "pnv-phb4-pec-stack" 147 + #define PNV_PHB4_PEC_STACK(obj) \ 148 + OBJECT_CHECK(PnvPhb4PecStack, (obj), TYPE_PNV_PHB4_PEC_STACK) 149 + 150 + /* Per-stack data */ 151 + struct PnvPhb4PecStack { 152 + DeviceState parent; 153 + 154 + /* My own stack number */ 155 + uint32_t stack_no; 156 + 157 + /* Nest registers */ 158 + #define PHB4_PEC_NEST_STK_REGS_COUNT 0x17 159 + uint64_t nest_regs[PHB4_PEC_NEST_STK_REGS_COUNT]; 160 + MemoryRegion nest_regs_mr; 161 + 162 + /* PCI registers (excluding pass-through) */ 163 + #define PHB4_PEC_PCI_STK_REGS_COUNT 0xf 164 + uint64_t pci_regs[PHB4_PEC_PCI_STK_REGS_COUNT]; 165 + MemoryRegion pci_regs_mr; 166 + 167 + /* PHB pass-through XSCOM */ 168 + MemoryRegion phb_regs_mr; 169 + 170 + /* Memory windows from PowerBus to PHB */ 171 + MemoryRegion mmbar0; 172 + MemoryRegion mmbar1; 173 + MemoryRegion phbbar; 174 + MemoryRegion intbar; 175 + uint64_t mmio0_base; 176 + uint64_t mmio0_size; 177 + uint64_t mmio1_base; 178 + uint64_t mmio1_size; 179 + 180 + /* The owner PEC */ 181 + PnvPhb4PecState *pec; 182 + 183 + /* The actual PHB */ 184 + PnvPHB4 phb; 185 + }; 186 + 187 + struct PnvPhb4PecState { 188 + DeviceState parent; 189 + 190 + /* PEC number in chip */ 191 + uint32_t index; 192 + uint32_t chip_id; 193 + 194 + MemoryRegion *system_memory; 195 + 196 + /* Nest registers, excuding per-stack */ 197 + #define PHB4_PEC_NEST_REGS_COUNT 0xf 198 + uint64_t nest_regs[PHB4_PEC_NEST_REGS_COUNT]; 199 + MemoryRegion nest_regs_mr; 200 + 201 + /* PCI registers, excluding per-stack */ 202 + #define PHB4_PEC_PCI_REGS_COUNT 0x2 203 + uint64_t pci_regs[PHB4_PEC_PCI_REGS_COUNT]; 204 + MemoryRegion pci_regs_mr; 205 + 206 + /* Stacks */ 207 + #define PHB4_PEC_MAX_STACKS 3 208 + uint32_t num_stacks; 209 + PnvPhb4PecStack stacks[PHB4_PEC_MAX_STACKS]; 210 + }; 211 + 212 + #define PNV_PHB4_PEC_CLASS(klass) \ 213 + OBJECT_CLASS_CHECK(PnvPhb4PecClass, (klass), TYPE_PNV_PHB4_PEC) 214 + #define PNV_PHB4_PEC_GET_CLASS(obj) \ 215 + OBJECT_GET_CLASS(PnvPhb4PecClass, (obj), TYPE_PNV_PHB4_PEC) 216 + 217 + typedef struct PnvPhb4PecClass { 218 + DeviceClass parent_class; 219 + 220 + uint32_t (*xscom_nest_base)(PnvPhb4PecState *pec); 221 + uint32_t xscom_nest_size; 222 + uint32_t (*xscom_pci_base)(PnvPhb4PecState *pec); 223 + uint32_t xscom_pci_size; 224 + const char *compat; 225 + int compat_size; 226 + const char *stk_compat; 227 + int stk_compat_size; 228 + } PnvPhb4PecClass; 229 + 230 + #endif /* PCI_HOST_PNV_PHB4_H */
+553
include/hw/pci-host/pnv_phb4_regs.h
··· 1 + /* 2 + * QEMU PowerPC PowerNV (POWER9) PHB4 model 3 + * 4 + * Copyright (c) 2013-2020, IBM Corporation. 5 + * 6 + * This code is licensed under the GPL version 2 or later. See the 7 + * COPYING file in the top-level directory. 8 + */ 9 + 10 + #ifndef PCI_HOST_PNV_PHB4_REGS_H 11 + #define PCI_HOST_PNV_PHB4_REGS_H 12 + 13 + /* 14 + * PEC XSCOM registers 15 + * 16 + * There a 3 PECs in P9. Each PEC can have several PHBs. Each PEC has some 17 + * "global" registers and some "per-stack" (per-PHB) registers. Those are 18 + * organized in two XSCOM ranges, the "Nest" range and the "PCI" range, each 19 + * range contains both some "PEC" registers and some "per-stack" registers. 20 + * 21 + * Finally the PCI range also contains an additional range per stack that 22 + * passes through to some of the PHB own registers. 23 + * 24 + * PEC0 can contain 1 PHB (PHB0) 25 + * PEC1 can contain 2 PHBs (PHB1 and PHB2) 26 + * PEC2 can contain 3 PHBs (PHB3, PHB4 and PHB5) 27 + */ 28 + 29 + /* 30 + * This is the "stack" offset, it's the offset from a given range base 31 + * to the first "per-stack" registers and also the stride between 32 + * stacks, thus for PEC2, the global registers are at offset 0, the 33 + * PHB3 registers at offset 0x40, the PHB4 at offset 0x80 etc.... 34 + * 35 + * It is *also* the offset to the pass-through SCOM region but in this case 36 + * it is 0 based, ie PHB3 is at 0x100 PHB4 is a 0x140 etc.. 37 + */ 38 + #define PEC_STACK_OFFSET 0x40 39 + 40 + /* XSCOM Nest global registers */ 41 + #define PEC_NEST_PBCQ_HW_CONFIG 0x00 42 + #define PEC_NEST_DROP_PRIO_CTRL 0x01 43 + #define PEC_NEST_PBCQ_ERR_INJECT 0x02 44 + #define PEC_NEST_PCI_NEST_CLK_TRACE_CTL 0x03 45 + #define PEC_NEST_PBCQ_PMON_CTRL 0x04 46 + #define PEC_NEST_PBCQ_PBUS_ADDR_EXT 0x05 47 + #define PEC_NEST_PBCQ_PRED_VEC_TIMEOUT 0x06 48 + #define PEC_NEST_CAPP_CTRL 0x07 49 + #define PEC_NEST_PBCQ_READ_STK_OVR 0x08 50 + #define PEC_NEST_PBCQ_WRITE_STK_OVR 0x09 51 + #define PEC_NEST_PBCQ_STORE_STK_OVR 0x0a 52 + #define PEC_NEST_PBCQ_RETRY_BKOFF_CTRL 0x0b 53 + 54 + /* XSCOM Nest per-stack registers */ 55 + #define PEC_NEST_STK_PCI_NEST_FIR 0x00 56 + #define PEC_NEST_STK_PCI_NEST_FIR_CLR 0x01 57 + #define PEC_NEST_STK_PCI_NEST_FIR_SET 0x02 58 + #define PEC_NEST_STK_PCI_NEST_FIR_MSK 0x03 59 + #define PEC_NEST_STK_PCI_NEST_FIR_MSKC 0x04 60 + #define PEC_NEST_STK_PCI_NEST_FIR_MSKS 0x05 61 + #define PEC_NEST_STK_PCI_NEST_FIR_ACT0 0x06 62 + #define PEC_NEST_STK_PCI_NEST_FIR_ACT1 0x07 63 + #define PEC_NEST_STK_PCI_NEST_FIR_WOF 0x08 64 + #define PEC_NEST_STK_ERR_REPORT_0 0x0a 65 + #define PEC_NEST_STK_ERR_REPORT_1 0x0b 66 + #define PEC_NEST_STK_PBCQ_GNRL_STATUS 0x0c 67 + #define PEC_NEST_STK_PBCQ_MODE 0x0d 68 + #define PEC_NEST_STK_MMIO_BAR0 0x0e 69 + #define PEC_NEST_STK_MMIO_BAR0_MASK 0x0f 70 + #define PEC_NEST_STK_MMIO_BAR1 0x10 71 + #define PEC_NEST_STK_MMIO_BAR1_MASK 0x11 72 + #define PEC_NEST_STK_PHB_REGS_BAR 0x12 73 + #define PEC_NEST_STK_INT_BAR 0x13 74 + #define PEC_NEST_STK_BAR_EN 0x14 75 + #define PEC_NEST_STK_BAR_EN_MMIO0 PPC_BIT(0) 76 + #define PEC_NEST_STK_BAR_EN_MMIO1 PPC_BIT(1) 77 + #define PEC_NEST_STK_BAR_EN_PHB PPC_BIT(2) 78 + #define PEC_NEST_STK_BAR_EN_INT PPC_BIT(3) 79 + #define PEC_NEST_STK_DATA_FRZ_TYPE 0x15 80 + #define PEC_NEST_STK_PBCQ_TUN_BAR 0x16 81 + 82 + /* XSCOM PCI global registers */ 83 + #define PEC_PCI_PBAIB_HW_CONFIG 0x00 84 + #define PEC_PCI_PBAIB_READ_STK_OVR 0x02 85 + 86 + /* XSCOM PCI per-stack registers */ 87 + #define PEC_PCI_STK_PCI_FIR 0x00 88 + #define PEC_PCI_STK_PCI_FIR_CLR 0x01 89 + #define PEC_PCI_STK_PCI_FIR_SET 0x02 90 + #define PEC_PCI_STK_PCI_FIR_MSK 0x03 91 + #define PEC_PCI_STK_PCI_FIR_MSKC 0x04 92 + #define PEC_PCI_STK_PCI_FIR_MSKS 0x05 93 + #define PEC_PCI_STK_PCI_FIR_ACT0 0x06 94 + #define PEC_PCI_STK_PCI_FIR_ACT1 0x07 95 + #define PEC_PCI_STK_PCI_FIR_WOF 0x08 96 + #define PEC_PCI_STK_ETU_RESET 0x0a 97 + #define PEC_PCI_STK_PBAIB_ERR_REPORT 0x0b 98 + #define PEC_PCI_STK_PBAIB_TX_CMD_CRED 0x0d 99 + #define PEC_PCI_STK_PBAIB_TX_DAT_CRED 0x0e 100 + 101 + /* 102 + * PHB "SCOM" registers. This is accessed via the above window 103 + * and provides a backdoor to the PHB when the AIB bus is not 104 + * functional. Some of these directly map some of the PHB MMIO 105 + * registers, some are specific and allow indirect access to a 106 + * wider range of PHB registers 107 + */ 108 + #define PHB_SCOM_HV_IND_ADDR 0x00 109 + #define PHB_SCOM_HV_IND_ADDR_VALID PPC_BIT(0) 110 + #define PHB_SCOM_HV_IND_ADDR_4B PPC_BIT(1) 111 + #define PHB_SCOM_HV_IND_ADDR_AUTOINC PPC_BIT(2) 112 + #define PHB_SCOM_HV_IND_ADDR_ADDR PPC_BITMASK(51, 63) 113 + #define PHB_SCOM_HV_IND_DATA 0x01 114 + #define PHB_SCOM_ETU_LEM_FIR 0x08 115 + #define PHB_SCOM_ETU_LEM_FIR_AND 0x09 116 + #define PHB_SCOM_ETU_LEM_FIR_OR 0x0a 117 + #define PHB_SCOM_ETU_LEM_FIR_MSK 0x0b 118 + #define PHB_SCOM_ETU_LEM_ERR_MSK_AND 0x0c 119 + #define PHB_SCOM_ETU_LEM_ERR_MSK_OR 0x0d 120 + #define PHB_SCOM_ETU_LEM_ACT0 0x0e 121 + #define PHB_SCOM_ETU_LEM_ACT1 0x0f 122 + #define PHB_SCOM_ETU_LEM_WOF 0x10 123 + #define PHB_SCOM_ETU_PMON_CONFIG 0x17 124 + #define PHB_SCOM_ETU_PMON_CTR0 0x18 125 + #define PHB_SCOM_ETU_PMON_CTR1 0x19 126 + #define PHB_SCOM_ETU_PMON_CTR2 0x1a 127 + #define PHB_SCOM_ETU_PMON_CTR3 0x1b 128 + 129 + 130 + /* 131 + * PHB MMIO registers 132 + */ 133 + 134 + /* PHB Fundamental register set A */ 135 + #define PHB_LSI_SOURCE_ID 0x100 136 + #define PHB_LSI_SRC_ID PPC_BITMASK(4, 12) 137 + #define PHB_DMA_CHAN_STATUS 0x110 138 + #define PHB_DMA_CHAN_ANY_ERR PPC_BIT(27) 139 + #define PHB_DMA_CHAN_ANY_ERR1 PPC_BIT(28) 140 + #define PHB_DMA_CHAN_ANY_FREEZE PPC_BIT(29) 141 + #define PHB_CPU_LOADSTORE_STATUS 0x120 142 + #define PHB_CPU_LS_ANY_ERR PPC_BIT(27) 143 + #define PHB_CPU_LS_ANY_ERR1 PPC_BIT(28) 144 + #define PHB_CPU_LS_ANY_FREEZE PPC_BIT(29) 145 + #define PHB_CONFIG_DATA 0x130 146 + #define PHB_LOCK0 0x138 147 + #define PHB_CONFIG_ADDRESS 0x140 148 + #define PHB_CA_ENABLE PPC_BIT(0) 149 + #define PHB_CA_STATUS PPC_BITMASK(1, 3) 150 + #define PHB_CA_STATUS_GOOD 0 151 + #define PHB_CA_STATUS_UR 1 152 + #define PHB_CA_STATUS_CRS 2 153 + #define PHB_CA_STATUS_CA 4 154 + #define PHB_CA_BUS PPC_BITMASK(4, 11) 155 + #define PHB_CA_DEV PPC_BITMASK(12, 16) 156 + #define PHB_CA_FUNC PPC_BITMASK(17, 19) 157 + #define PHB_CA_BDFN PPC_BITMASK(4, 19) /* bus,dev,func */ 158 + #define PHB_CA_REG PPC_BITMASK(20, 31) 159 + #define PHB_CA_PE PPC_BITMASK(39, 47) 160 + #define PHB_LOCK1 0x148 161 + #define PHB_PHB4_CONFIG 0x160 162 + #define PHB_PHB4C_32BIT_MSI_EN PPC_BIT(8) 163 + #define PHB_PHB4C_64BIT_MSI_EN PPC_BIT(14) 164 + #define PHB_RTT_BAR 0x168 165 + #define PHB_RTT_BAR_ENABLE PPC_BIT(0) 166 + #define PHB_RTT_BASE_ADDRESS_MASK PPC_BITMASK(8, 46) 167 + #define PHB_PELTV_BAR 0x188 168 + #define PHB_PELTV_BAR_ENABLE PPC_BIT(0) 169 + #define PHB_PELTV_BASE_ADDRESS PPC_BITMASK(8, 50) 170 + #define PHB_M32_START_ADDR 0x1a0 171 + #define PHB_PEST_BAR 0x1a8 172 + #define PHB_PEST_BAR_ENABLE PPC_BIT(0) 173 + #define PHB_PEST_BASE_ADDRESS PPC_BITMASK(8, 51) 174 + #define PHB_ASN_CMPM 0x1C0 175 + #define PHB_ASN_CMPM_ENABLE PPC_BIT(63) 176 + #define PHB_CAPI_CMPM 0x1C8 177 + #define PHB_CAPI_CMPM_ENABLE PPC_BIT(63) 178 + #define PHB_M64_AOMASK 0x1d0 179 + #define PHB_M64_UPPER_BITS 0x1f0 180 + #define PHB_NXLATE_PREFIX 0x1f8 181 + #define PHB_DMARD_SYNC 0x200 182 + #define PHB_DMARD_SYNC_START PPC_BIT(0) 183 + #define PHB_DMARD_SYNC_COMPLETE PPC_BIT(1) 184 + #define PHB_RTC_INVALIDATE 0x208 185 + #define PHB_RTC_INVALIDATE_ALL PPC_BIT(0) 186 + #define PHB_RTC_INVALIDATE_RID PPC_BITMASK(16, 31) 187 + #define PHB_TCE_KILL 0x210 188 + #define PHB_TCE_KILL_ALL PPC_BIT(0) 189 + #define PHB_TCE_KILL_PE PPC_BIT(1) 190 + #define PHB_TCE_KILL_ONE PPC_BIT(2) 191 + #define PHB_TCE_KILL_PSEL PPC_BIT(3) 192 + #define PHB_TCE_KILL_64K 0x1000 /* Address override */ 193 + #define PHB_TCE_KILL_2M 0x2000 /* Address override */ 194 + #define PHB_TCE_KILL_1G 0x3000 /* Address override */ 195 + #define PHB_TCE_KILL_PENUM PPC_BITMASK(55, 63) 196 + #define PHB_TCE_SPEC_CTL 0x218 197 + #define PHB_IODA_ADDR 0x220 198 + #define PHB_IODA_AD_AUTOINC PPC_BIT(0) 199 + #define PHB_IODA_AD_TSEL PPC_BITMASK(11, 15) 200 + #define PHB_IODA_AD_MIST_PWV PPC_BITMASK(28, 31) 201 + #define PHB_IODA_AD_TADR PPC_BITMASK(54, 63) 202 + #define PHB_IODA_DATA0 0x228 203 + #define PHB_PHB4_GEN_CAP 0x250 204 + #define PHB_PHB4_TCE_CAP 0x258 205 + #define PHB_PHB4_IRQ_CAP 0x260 206 + #define PHB_PHB4_EEH_CAP 0x268 207 + #define PHB_PAPR_ERR_INJ_CTL 0x2b0 208 + #define PHB_PAPR_ERR_INJ_CTL_INB PPC_BIT(0) 209 + #define PHB_PAPR_ERR_INJ_CTL_OUTB PPC_BIT(1) 210 + #define PHB_PAPR_ERR_INJ_CTL_STICKY PPC_BIT(2) 211 + #define PHB_PAPR_ERR_INJ_CTL_CFG PPC_BIT(3) 212 + #define PHB_PAPR_ERR_INJ_CTL_RD PPC_BIT(4) 213 + #define PHB_PAPR_ERR_INJ_CTL_WR PPC_BIT(5) 214 + #define PHB_PAPR_ERR_INJ_CTL_FREEZE PPC_BIT(6) 215 + #define PHB_PAPR_ERR_INJ_ADDR 0x2b8 216 + #define PHB_PAPR_ERR_INJ_ADDR_MMIO PPC_BITMASK(16, 63) 217 + #define PHB_PAPR_ERR_INJ_MASK 0x2c0 218 + #define PHB_PAPR_ERR_INJ_MASK_CFG PPC_BITMASK(4, 11) 219 + #define PHB_PAPR_ERR_INJ_MASK_CFG_ALL PPC_BITMASK(4, 19) 220 + #define PHB_PAPR_ERR_INJ_MASK_MMIO PPC_BITMASK(16, 63) 221 + #define PHB_ETU_ERR_SUMMARY 0x2c8 222 + #define PHB_INT_NOTIFY_ADDR 0x300 223 + #define PHB_INT_NOTIFY_INDEX 0x308 224 + 225 + /* Fundamental register set B */ 226 + #define PHB_VERSION 0x800 227 + #define PHB_CTRLR 0x810 228 + #define PHB_CTRLR_IRQ_PGSZ_64K PPC_BIT(11) 229 + #define PHB_CTRLR_IRQ_STORE_EOI PPC_BIT(12) 230 + #define PHB_CTRLR_MMIO_RD_STRICT PPC_BIT(13) 231 + #define PHB_CTRLR_MMIO_EEH_DISABLE PPC_BIT(14) 232 + #define PHB_CTRLR_CFG_EEH_BLOCK PPC_BIT(15) 233 + #define PHB_CTRLR_FENCE_LNKILL_DIS PPC_BIT(16) 234 + #define PHB_CTRLR_TVT_ADDR_SEL PPC_BITMASK(17, 19) 235 + #define TVT_DD1_1_PER_PE 0 236 + #define TVT_DD1_2_PER_PE 1 237 + #define TVT_DD1_4_PER_PE 2 238 + #define TVT_DD1_8_PER_PE 3 239 + #define TVT_DD1_16_PER_PE 4 240 + #define TVT_2_PER_PE 0 241 + #define TVT_4_PER_PE 1 242 + #define TVT_8_PER_PE 2 243 + #define TVT_16_PER_PE 3 244 + #define PHB_CTRLR_DMA_RD_SPACING PPC_BITMASK(28, 31) 245 + #define PHB_AIB_FENCE_CTRL 0x860 246 + #define PHB_TCE_TAG_ENABLE 0x868 247 + #define PHB_TCE_WATERMARK 0x870 248 + #define PHB_TIMEOUT_CTRL1 0x878 249 + #define PHB_TIMEOUT_CTRL2 0x880 250 + #define PHB_Q_DMA_R 0x888 251 + #define PHB_Q_DMA_R_QUIESCE_DMA PPC_BIT(0) 252 + #define PHB_Q_DMA_R_AUTORESET PPC_BIT(1) 253 + #define PHB_Q_DMA_R_DMA_RESP_STATUS PPC_BIT(4) 254 + #define PHB_Q_DMA_R_MMIO_RESP_STATUS PPC_BIT(5) 255 + #define PHB_Q_DMA_R_TCE_RESP_STATUS PPC_BIT(6) 256 + #define PHB_Q_DMA_R_TCE_KILL_STATUS PPC_BIT(7) 257 + #define PHB_TCE_TAG_STATUS 0x908 258 + 259 + /* FIR & Error registers */ 260 + #define PHB_LEM_FIR_ACCUM 0xc00 261 + #define PHB_LEM_FIR_AND_MASK 0xc08 262 + #define PHB_LEM_FIR_OR_MASK 0xc10 263 + #define PHB_LEM_ERROR_MASK 0xc18 264 + #define PHB_LEM_ERROR_AND_MASK 0xc20 265 + #define PHB_LEM_ERROR_OR_MASK 0xc28 266 + #define PHB_LEM_ACTION0 0xc30 267 + #define PHB_LEM_ACTION1 0xc38 268 + #define PHB_LEM_WOF 0xc40 269 + #define PHB_ERR_STATUS 0xc80 270 + #define PHB_ERR1_STATUS 0xc88 271 + #define PHB_ERR_INJECT 0xc90 272 + #define PHB_ERR_LEM_ENABLE 0xc98 273 + #define PHB_ERR_IRQ_ENABLE 0xca0 274 + #define PHB_ERR_FREEZE_ENABLE 0xca8 275 + #define PHB_ERR_AIB_FENCE_ENABLE 0xcb0 276 + #define PHB_ERR_LOG_0 0xcc0 277 + #define PHB_ERR_LOG_1 0xcc8 278 + #define PHB_ERR_STATUS_MASK 0xcd0 279 + #define PHB_ERR1_STATUS_MASK 0xcd8 280 + 281 + #define PHB_TXE_ERR_STATUS 0xd00 282 + #define PHB_TXE_ERR1_STATUS 0xd08 283 + #define PHB_TXE_ERR_INJECT 0xd10 284 + #define PHB_TXE_ERR_LEM_ENABLE 0xd18 285 + #define PHB_TXE_ERR_IRQ_ENABLE 0xd20 286 + #define PHB_TXE_ERR_FREEZE_ENABLE 0xd28 287 + #define PHB_TXE_ERR_AIB_FENCE_ENABLE 0xd30 288 + #define PHB_TXE_ERR_LOG_0 0xd40 289 + #define PHB_TXE_ERR_LOG_1 0xd48 290 + #define PHB_TXE_ERR_STATUS_MASK 0xd50 291 + #define PHB_TXE_ERR1_STATUS_MASK 0xd58 292 + 293 + #define PHB_RXE_ARB_ERR_STATUS 0xd80 294 + #define PHB_RXE_ARB_ERR1_STATUS 0xd88 295 + #define PHB_RXE_ARB_ERR_INJECT 0xd90 296 + #define PHB_RXE_ARB_ERR_LEM_ENABLE 0xd98 297 + #define PHB_RXE_ARB_ERR_IRQ_ENABLE 0xda0 298 + #define PHB_RXE_ARB_ERR_FREEZE_ENABLE 0xda8 299 + #define PHB_RXE_ARB_ERR_AIB_FENCE_ENABLE 0xdb0 300 + #define PHB_RXE_ARB_ERR_LOG_0 0xdc0 301 + #define PHB_RXE_ARB_ERR_LOG_1 0xdc8 302 + #define PHB_RXE_ARB_ERR_STATUS_MASK 0xdd0 303 + #define PHB_RXE_ARB_ERR1_STATUS_MASK 0xdd8 304 + 305 + #define PHB_RXE_MRG_ERR_STATUS 0xe00 306 + #define PHB_RXE_MRG_ERR1_STATUS 0xe08 307 + #define PHB_RXE_MRG_ERR_INJECT 0xe10 308 + #define PHB_RXE_MRG_ERR_LEM_ENABLE 0xe18 309 + #define PHB_RXE_MRG_ERR_IRQ_ENABLE 0xe20 310 + #define PHB_RXE_MRG_ERR_FREEZE_ENABLE 0xe28 311 + #define PHB_RXE_MRG_ERR_AIB_FENCE_ENABLE 0xe30 312 + #define PHB_RXE_MRG_ERR_LOG_0 0xe40 313 + #define PHB_RXE_MRG_ERR_LOG_1 0xe48 314 + #define PHB_RXE_MRG_ERR_STATUS_MASK 0xe50 315 + #define PHB_RXE_MRG_ERR1_STATUS_MASK 0xe58 316 + 317 + #define PHB_RXE_TCE_ERR_STATUS 0xe80 318 + #define PHB_RXE_TCE_ERR1_STATUS 0xe88 319 + #define PHB_RXE_TCE_ERR_INJECT 0xe90 320 + #define PHB_RXE_TCE_ERR_LEM_ENABLE 0xe98 321 + #define PHB_RXE_TCE_ERR_IRQ_ENABLE 0xea0 322 + #define PHB_RXE_TCE_ERR_FREEZE_ENABLE 0xea8 323 + #define PHB_RXE_TCE_ERR_AIB_FENCE_ENABLE 0xeb0 324 + #define PHB_RXE_TCE_ERR_LOG_0 0xec0 325 + #define PHB_RXE_TCE_ERR_LOG_1 0xec8 326 + #define PHB_RXE_TCE_ERR_STATUS_MASK 0xed0 327 + #define PHB_RXE_TCE_ERR1_STATUS_MASK 0xed8 328 + 329 + /* Performance monitor & Debug registers */ 330 + #define PHB_TRACE_CONTROL 0xf80 331 + #define PHB_PERFMON_CONFIG 0xf88 332 + #define PHB_PERFMON_CTR0 0xf90 333 + #define PHB_PERFMON_CTR1 0xf98 334 + #define PHB_PERFMON_CTR2 0xfa0 335 + #define PHB_PERFMON_CTR3 0xfa8 336 + 337 + /* Root complex config space memory mapped */ 338 + #define PHB_RC_CONFIG_BASE 0x1000 339 + #define PHB_RC_CONFIG_SIZE 0x800 340 + 341 + /* PHB4 REGB registers */ 342 + 343 + /* PBL core */ 344 + #define PHB_PBL_CONTROL 0x1800 345 + #define PHB_PBL_TIMEOUT_CTRL 0x1810 346 + #define PHB_PBL_NPTAG_ENABLE 0x1820 347 + #define PHB_PBL_NBW_CMP_MASK 0x1830 348 + #define PHB_PBL_NBW_MASK_ENABLE PPC_BIT(63) 349 + #define PHB_PBL_SYS_LINK_INIT 0x1838 350 + #define PHB_PBL_BUF_STATUS 0x1840 351 + #define PHB_PBL_ERR_STATUS 0x1900 352 + #define PHB_PBL_ERR1_STATUS 0x1908 353 + #define PHB_PBL_ERR_INJECT 0x1910 354 + #define PHB_PBL_ERR_INF_ENABLE 0x1920 355 + #define PHB_PBL_ERR_ERC_ENABLE 0x1928 356 + #define PHB_PBL_ERR_FAT_ENABLE 0x1930 357 + #define PHB_PBL_ERR_LOG_0 0x1940 358 + #define PHB_PBL_ERR_LOG_1 0x1948 359 + #define PHB_PBL_ERR_STATUS_MASK 0x1950 360 + #define PHB_PBL_ERR1_STATUS_MASK 0x1958 361 + 362 + /* PCI-E stack */ 363 + #define PHB_PCIE_SCR 0x1A00 364 + #define PHB_PCIE_SCR_SLOT_CAP PPC_BIT(15) 365 + #define PHB_PCIE_SCR_MAXLINKSPEED PPC_BITMASK(32, 35) 366 + 367 + 368 + #define PHB_PCIE_CRESET 0x1A10 369 + #define PHB_PCIE_CRESET_CFG_CORE PPC_BIT(0) 370 + #define PHB_PCIE_CRESET_TLDLP PPC_BIT(1) 371 + #define PHB_PCIE_CRESET_PBL PPC_BIT(2) 372 + #define PHB_PCIE_CRESET_PERST_N PPC_BIT(3) 373 + #define PHB_PCIE_CRESET_PIPE_N PPC_BIT(4) 374 + 375 + 376 + #define PHB_PCIE_HOTPLUG_STATUS 0x1A20 377 + #define PHB_PCIE_HPSTAT_PRESENCE PPC_BIT(10) 378 + 379 + #define PHB_PCIE_DLP_TRAIN_CTL 0x1A40 380 + #define PHB_PCIE_DLP_LINK_WIDTH PPC_BITMASK(30, 35) 381 + #define PHB_PCIE_DLP_LINK_SPEED PPC_BITMASK(36, 39) 382 + #define PHB_PCIE_DLP_LTSSM_TRC PPC_BITMASK(24, 27) 383 + #define PHB_PCIE_DLP_LTSSM_RESET 0 384 + #define PHB_PCIE_DLP_LTSSM_DETECT 1 385 + #define PHB_PCIE_DLP_LTSSM_POLLING 2 386 + #define PHB_PCIE_DLP_LTSSM_CONFIG 3 387 + #define PHB_PCIE_DLP_LTSSM_L0 4 388 + #define PHB_PCIE_DLP_LTSSM_REC 5 389 + #define PHB_PCIE_DLP_LTSSM_L1 6 390 + #define PHB_PCIE_DLP_LTSSM_L2 7 391 + #define PHB_PCIE_DLP_LTSSM_HOTRESET 8 392 + #define PHB_PCIE_DLP_LTSSM_DISABLED 9 393 + #define PHB_PCIE_DLP_LTSSM_LOOPBACK 10 394 + #define PHB_PCIE_DLP_TL_LINKACT PPC_BIT(23) 395 + #define PHB_PCIE_DLP_DL_PGRESET PPC_BIT(22) 396 + #define PHB_PCIE_DLP_TRAINING PPC_BIT(20) 397 + #define PHB_PCIE_DLP_INBAND_PRESENCE PPC_BIT(19) 398 + 399 + #define PHB_PCIE_DLP_CTL 0x1A78 400 + #define PHB_PCIE_DLP_CTL_BYPASS_PH2 PPC_BIT(4) 401 + #define PHB_PCIE_DLP_CTL_BYPASS_PH3 PPC_BIT(5) 402 + 403 + #define PHB_PCIE_DLP_TRWCTL 0x1A80 404 + #define PHB_PCIE_DLP_TRWCTL_EN PPC_BIT(0) 405 + 406 + #define PHB_PCIE_DLP_ERRLOG1 0x1AA0 407 + #define PHB_PCIE_DLP_ERRLOG2 0x1AA8 408 + #define PHB_PCIE_DLP_ERR_STATUS 0x1AB0 409 + #define PHB_PCIE_DLP_ERR_COUNTERS 0x1AB8 410 + 411 + #define PHB_PCIE_LANE_EQ_CNTL0 0x1AD0 412 + #define PHB_PCIE_LANE_EQ_CNTL1 0x1AD8 413 + #define PHB_PCIE_LANE_EQ_CNTL2 0x1AE0 414 + #define PHB_PCIE_LANE_EQ_CNTL3 0x1AE8 415 + #define PHB_PCIE_LANE_EQ_CNTL20 0x1AF0 416 + #define PHB_PCIE_LANE_EQ_CNTL21 0x1AF8 417 + #define PHB_PCIE_LANE_EQ_CNTL22 0x1B00 /* DD1 only */ 418 + #define PHB_PCIE_LANE_EQ_CNTL23 0x1B08 /* DD1 only */ 419 + #define PHB_PCIE_TRACE_CTRL 0x1B20 420 + #define PHB_PCIE_MISC_STRAP 0x1B30 421 + 422 + /* Error */ 423 + #define PHB_REGB_ERR_STATUS 0x1C00 424 + #define PHB_REGB_ERR1_STATUS 0x1C08 425 + #define PHB_REGB_ERR_INJECT 0x1C10 426 + #define PHB_REGB_ERR_INF_ENABLE 0x1C20 427 + #define PHB_REGB_ERR_ERC_ENABLE 0x1C28 428 + #define PHB_REGB_ERR_FAT_ENABLE 0x1C30 429 + #define PHB_REGB_ERR_LOG_0 0x1C40 430 + #define PHB_REGB_ERR_LOG_1 0x1C48 431 + #define PHB_REGB_ERR_STATUS_MASK 0x1C50 432 + #define PHB_REGB_ERR1_STATUS_MASK 0x1C58 433 + 434 + /* 435 + * IODA3 on-chip tables 436 + */ 437 + 438 + #define IODA3_TBL_LIST 1 439 + #define IODA3_TBL_MIST 2 440 + #define IODA3_TBL_RCAM 5 441 + #define IODA3_TBL_MRT 6 442 + #define IODA3_TBL_PESTA 7 443 + #define IODA3_TBL_PESTB 8 444 + #define IODA3_TBL_TVT 9 445 + #define IODA3_TBL_TCR 10 446 + #define IODA3_TBL_TDR 11 447 + #define IODA3_TBL_MBT 16 448 + #define IODA3_TBL_MDT 17 449 + #define IODA3_TBL_PEEV 20 450 + 451 + /* LIST */ 452 + #define IODA3_LIST_P PPC_BIT(6) 453 + #define IODA3_LIST_Q PPC_BIT(7) 454 + #define IODA3_LIST_STATE PPC_BIT(14) 455 + 456 + /* MIST */ 457 + #define IODA3_MIST_P3 PPC_BIT(48 + 0) 458 + #define IODA3_MIST_Q3 PPC_BIT(48 + 1) 459 + #define IODA3_MIST_PE3 PPC_BITMASK(48 + 4, 48 + 15) 460 + 461 + /* TVT */ 462 + #define IODA3_TVT_TABLE_ADDR PPC_BITMASK(0, 47) 463 + #define IODA3_TVT_NUM_LEVELS PPC_BITMASK(48, 50) 464 + #define IODA3_TVE_1_LEVEL 0 465 + #define IODA3_TVE_2_LEVELS 1 466 + #define IODA3_TVE_3_LEVELS 2 467 + #define IODA3_TVE_4_LEVELS 3 468 + #define IODA3_TVE_5_LEVELS 4 469 + #define IODA3_TVT_TCE_TABLE_SIZE PPC_BITMASK(51, 55) 470 + #define IODA3_TVT_NON_TRANSLATE_50 PPC_BIT(56) 471 + #define IODA3_TVT_IO_PSIZE PPC_BITMASK(59, 63) 472 + 473 + /* PESTA */ 474 + #define IODA3_PESTA_MMIO_FROZEN PPC_BIT(0) 475 + #define IODA3_PESTA_TRANS_TYPE PPC_BITMASK(5, 7) 476 + #define IODA3_PESTA_TRANS_TYPE_MMIOLOAD 0x4 477 + #define IODA3_PESTA_CA_CMPLT_TMT PPC_BIT(8) 478 + #define IODA3_PESTA_UR PPC_BIT(9) 479 + 480 + /* PESTB */ 481 + #define IODA3_PESTB_DMA_STOPPED PPC_BIT(0) 482 + 483 + /* MDT */ 484 + /* FIXME: check this field with Eric and add a B, C and D */ 485 + #define IODA3_MDT_PE_A PPC_BITMASK(0, 15) 486 + #define IODA3_MDT_PE_B PPC_BITMASK(16, 31) 487 + #define IODA3_MDT_PE_C PPC_BITMASK(32, 47) 488 + #define IODA3_MDT_PE_D PPC_BITMASK(48, 63) 489 + 490 + /* MBT */ 491 + #define IODA3_MBT0_ENABLE PPC_BIT(0) 492 + #define IODA3_MBT0_TYPE PPC_BIT(1) 493 + #define IODA3_MBT0_TYPE_M32 IODA3_MBT0_TYPE 494 + #define IODA3_MBT0_TYPE_M64 0 495 + #define IODA3_MBT0_MODE PPC_BITMASK(2, 3) 496 + #define IODA3_MBT0_MODE_PE_SEG 0 497 + #define IODA3_MBT0_MODE_MDT 1 498 + #define IODA3_MBT0_MODE_SINGLE_PE 2 499 + #define IODA3_MBT0_SEG_DIV PPC_BITMASK(4, 5) 500 + #define IODA3_MBT0_SEG_DIV_MAX 0 501 + #define IODA3_MBT0_SEG_DIV_128 1 502 + #define IODA3_MBT0_SEG_DIV_64 2 503 + #define IODA3_MBT0_SEG_DIV_8 3 504 + #define IODA3_MBT0_MDT_COLUMN PPC_BITMASK(4, 5) 505 + #define IODA3_MBT0_BASE_ADDR PPC_BITMASK(8, 51) 506 + 507 + #define IODA3_MBT1_ENABLE PPC_BIT(0) 508 + #define IODA3_MBT1_MASK PPC_BITMASK(8, 51) 509 + #define IODA3_MBT1_SEG_BASE PPC_BITMASK(55, 63) 510 + #define IODA3_MBT1_SINGLE_PE_NUM PPC_BITMASK(55, 63) 511 + 512 + /* 513 + * IODA3 in-memory tables 514 + */ 515 + 516 + /* 517 + * PEST 518 + * 519 + * 2x8 bytes entries, PEST0 and PEST1 520 + */ 521 + 522 + #define IODA3_PEST0_MMIO_CAUSE PPC_BIT(2) 523 + #define IODA3_PEST0_CFG_READ PPC_BIT(3) 524 + #define IODA3_PEST0_CFG_WRITE PPC_BIT(4) 525 + #define IODA3_PEST0_TTYPE PPC_BITMASK(5, 7) 526 + #define PEST_TTYPE_DMA_WRITE 0 527 + #define PEST_TTYPE_MSI 1 528 + #define PEST_TTYPE_DMA_READ 2 529 + #define PEST_TTYPE_DMA_READ_RESP 3 530 + #define PEST_TTYPE_MMIO_LOAD 4 531 + #define PEST_TTYPE_MMIO_STORE 5 532 + #define PEST_TTYPE_OTHER 7 533 + #define IODA3_PEST0_CA_RETURN PPC_BIT(8) 534 + #define IODA3_PEST0_UR_RETURN PPC_BIT(9) 535 + #define IODA3_PEST0_PCIE_NONFATAL PPC_BIT(10) 536 + #define IODA3_PEST0_PCIE_FATAL PPC_BIT(11) 537 + #define IODA3_PEST0_PARITY_UE PPC_BIT(13) 538 + #define IODA3_PEST0_PCIE_CORRECTABLE PPC_BIT(14) 539 + #define IODA3_PEST0_PCIE_INTERRUPT PPC_BIT(15) 540 + #define IODA3_PEST0_MMIO_XLATE PPC_BIT(16) 541 + #define IODA3_PEST0_IODA3_ERROR PPC_BIT(16) /* Same bit as MMIO xlate */ 542 + #define IODA3_PEST0_TCE_PAGE_FAULT PPC_BIT(18) 543 + #define IODA3_PEST0_TCE_ACCESS_FAULT PPC_BIT(19) 544 + #define IODA3_PEST0_DMA_RESP_TIMEOUT PPC_BIT(20) 545 + #define IODA3_PEST0_AIB_SIZE_INVALID PPC_BIT(21) 546 + #define IODA3_PEST0_LEM_BIT PPC_BITMASK(26, 31) 547 + #define IODA3_PEST0_RID PPC_BITMASK(32, 47) 548 + #define IODA3_PEST0_MSI_DATA PPC_BITMASK(48, 63) 549 + 550 + #define IODA3_PEST1_FAIL_ADDR PPC_BITMASK(3, 63) 551 + 552 + 553 + #endif /* PCI_HOST_PNV_PHB4_REGS_H */
+1
include/hw/pci/pcie_port.h
··· 72 72 typedef struct PCIERootPortClass { 73 73 PCIDeviceClass parent_class; 74 74 DeviceRealize parent_realize; 75 + DeviceReset parent_reset; 75 76 76 77 uint8_t (*aer_vector)(const PCIDevice *dev); 77 78 int (*interrupts_init)(PCIDevice *dev, Error **errp);
+7
include/hw/ppc/pnv.h
··· 30 30 #include "hw/ppc/pnv_homer.h" 31 31 #include "hw/ppc/pnv_xive.h" 32 32 #include "hw/ppc/pnv_core.h" 33 + #include "hw/pci-host/pnv_phb4.h" 33 34 34 35 #define TYPE_PNV_CHIP "pnv-chip" 35 36 #define PNV_CHIP(obj) OBJECT_CHECK(PnvChip, (obj), TYPE_PNV_CHIP) ··· 51 52 uint32_t nr_threads; 52 53 uint64_t cores_mask; 53 54 PnvCore **cores; 55 + 56 + uint32_t num_phbs; 54 57 55 58 MemoryRegion xscom_mmio; 56 59 MemoryRegion xscom; ··· 93 96 94 97 uint32_t nr_quads; 95 98 PnvQuad *quads; 99 + 100 + #define PNV9_CHIP_MAX_PEC 3 101 + PnvPhb4PecState pecs[PNV9_CHIP_MAX_PEC]; 96 102 } Pnv9Chip; 97 103 98 104 /* ··· 120 126 /*< public >*/ 121 127 uint64_t chip_cfam_id; 122 128 uint64_t cores_mask; 129 + uint32_t num_phbs; 123 130 124 131 DeviceRealize parent_realize; 125 132
+11
include/hw/ppc/pnv_xscom.h
··· 94 94 #define PNV9_XSCOM_XIVE_BASE 0x5013000 95 95 #define PNV9_XSCOM_XIVE_SIZE 0x300 96 96 97 + #define PNV9_XSCOM_PEC_NEST_BASE 0x4010c00 98 + #define PNV9_XSCOM_PEC_NEST_SIZE 0x100 99 + 100 + #define PNV9_XSCOM_PEC_PCI_BASE 0xd010800 101 + #define PNV9_XSCOM_PEC_PCI_SIZE 0x200 102 + 103 + /* XSCOM PCI "pass-through" window to PHB SCOM */ 104 + #define PNV9_XSCOM_PEC_PCI_STK0 0x100 105 + #define PNV9_XSCOM_PEC_PCI_STK1 0x140 106 + #define PNV9_XSCOM_PEC_PCI_STK2 0x180 107 + 97 108 /* 98 109 * Layout of the XSCOM PCB addresses (POWER 10) 99 110 */