qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

ppc/pnv: Add models for POWER8 PHB3 PCIe Host bridge

This is a model of the PCIe Host Bridge (PHB3) found on a POWER8
processor. It includes the PowerBus logic interface (PBCQ), IOMMU
support, a single PCIe Gen.3 Root Complex, and support for MSI and LSI
interrupt sources as found on a POWER8 system using the XICS interrupt
controller.

The POWER8 processor comes in different flavors: Venice, Murano,
Naple, each having a different number of PHBs. To make things simpler,
the models provides 3 PHB3 per chip. Some platforms, like the
Firestone, can also couple PHBs on the first chip to provide more
bandwidth but this is too specific to model in QEMU.

XICS requires some adjustment to support the PHB3 MSI. The changes are
provided here but they could be decoupled in prereq patches.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
Message-Id: <20200127144506.11132-3-clg@kaod.org>
[dwg: Use device_class_set_props()]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>

authored by

Cédric Le Goater and committed by
David Gibson
9ae1329e 4f9924c4

+2614 -3
+13 -1
hw/intc/xics.c
··· 217 217 } 218 218 } 219 219 220 - static void icp_irq(ICSState *ics, int server, int nr, uint8_t priority) 220 + void icp_irq(ICSState *ics, int server, int nr, uint8_t priority) 221 221 { 222 222 ICPState *icp = xics_icp_get(ics->xics, server); 223 223 ··· 512 512 513 513 static void ics_reject(ICSState *ics, uint32_t nr) 514 514 { 515 + ICSStateClass *isc = ICS_GET_CLASS(ics); 515 516 ICSIRQState *irq = ics->irqs + nr - ics->offset; 517 + 518 + if (isc->reject) { 519 + isc->reject(ics, nr); 520 + return; 521 + } 516 522 517 523 trace_xics_ics_reject(nr, nr - ics->offset); 518 524 if (irq->flags & XICS_FLAGS_IRQ_MSI) { ··· 524 530 525 531 void ics_resend(ICSState *ics) 526 532 { 533 + ICSStateClass *isc = ICS_GET_CLASS(ics); 527 534 int i; 535 + 536 + if (isc->resend) { 537 + isc->resend(ics); 538 + return; 539 + } 528 540 529 541 for (i = 0; i < ics->nr_irqs; i++) { 530 542 /* FIXME: filter by server#? */
+1
hw/pci-host/Makefile.objs
··· 21 21 22 22 common-obj-$(CONFIG_PCI_EXPRESS_DESIGNWARE) += designware.o 23 23 obj-$(CONFIG_POWERNV) += pnv_phb4.o pnv_phb4_pec.o 24 + obj-$(CONFIG_POWERNV) += pnv_phb3.o pnv_phb3_msi.o pnv_phb3_pbcq.o
+1195
hw/pci-host/pnv_phb3.c
··· 1 + /* 2 + * QEMU PowerPC PowerNV (POWER8) PHB3 model 3 + * 4 + * Copyright (c) 2014-2020, IBM Corporation. 5 + * 6 + * This code is licensed under the GPL version 2 or later. See the 7 + * COPYING file in the top-level directory. 8 + */ 9 + #include "qemu/osdep.h" 10 + #include "qemu/log.h" 11 + #include "qapi/visitor.h" 12 + #include "qapi/error.h" 13 + #include "qemu-common.h" 14 + #include "hw/pci-host/pnv_phb3_regs.h" 15 + #include "hw/pci-host/pnv_phb3.h" 16 + #include "hw/pci/pcie_host.h" 17 + #include "hw/pci/pcie_port.h" 18 + #include "hw/ppc/pnv.h" 19 + #include "hw/irq.h" 20 + #include "hw/qdev-properties.h" 21 + 22 + #define phb3_error(phb, fmt, ...) \ 23 + qemu_log_mask(LOG_GUEST_ERROR, "phb3[%d:%d]: " fmt "\n", \ 24 + (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__) 25 + 26 + static PCIDevice *pnv_phb3_find_cfg_dev(PnvPHB3 *phb) 27 + { 28 + PCIHostState *pci = PCI_HOST_BRIDGE(phb); 29 + uint64_t addr = phb->regs[PHB_CONFIG_ADDRESS >> 3]; 30 + uint8_t bus, devfn; 31 + 32 + if (!(addr >> 63)) { 33 + return NULL; 34 + } 35 + bus = (addr >> 52) & 0xff; 36 + devfn = (addr >> 44) & 0xff; 37 + 38 + return pci_find_device(pci->bus, bus, devfn); 39 + } 40 + 41 + /* 42 + * The CONFIG_DATA register expects little endian accesses, but as the 43 + * region is big endian, we have to swap the value. 44 + */ 45 + static void pnv_phb3_config_write(PnvPHB3 *phb, unsigned off, 46 + unsigned size, uint64_t val) 47 + { 48 + uint32_t cfg_addr, limit; 49 + PCIDevice *pdev; 50 + 51 + pdev = pnv_phb3_find_cfg_dev(phb); 52 + if (!pdev) { 53 + return; 54 + } 55 + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; 56 + cfg_addr |= off; 57 + limit = pci_config_size(pdev); 58 + if (limit <= cfg_addr) { 59 + /* 60 + * conventional pci device can be behind pcie-to-pci bridge. 61 + * 256 <= addr < 4K has no effects. 62 + */ 63 + return; 64 + } 65 + switch (size) { 66 + case 1: 67 + break; 68 + case 2: 69 + val = bswap16(val); 70 + break; 71 + case 4: 72 + val = bswap32(val); 73 + break; 74 + default: 75 + g_assert_not_reached(); 76 + } 77 + pci_host_config_write_common(pdev, cfg_addr, limit, val, size); 78 + } 79 + 80 + static uint64_t pnv_phb3_config_read(PnvPHB3 *phb, unsigned off, 81 + unsigned size) 82 + { 83 + uint32_t cfg_addr, limit; 84 + PCIDevice *pdev; 85 + uint64_t val; 86 + 87 + pdev = pnv_phb3_find_cfg_dev(phb); 88 + if (!pdev) { 89 + return ~0ull; 90 + } 91 + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; 92 + cfg_addr |= off; 93 + limit = pci_config_size(pdev); 94 + if (limit <= cfg_addr) { 95 + /* 96 + * conventional pci device can be behind pcie-to-pci bridge. 97 + * 256 <= addr < 4K has no effects. 98 + */ 99 + return ~0ull; 100 + } 101 + val = pci_host_config_read_common(pdev, cfg_addr, limit, size); 102 + switch (size) { 103 + case 1: 104 + return val; 105 + case 2: 106 + return bswap16(val); 107 + case 4: 108 + return bswap32(val); 109 + default: 110 + g_assert_not_reached(); 111 + } 112 + } 113 + 114 + static void pnv_phb3_check_m32(PnvPHB3 *phb) 115 + { 116 + uint64_t base, start, size; 117 + MemoryRegion *parent; 118 + PnvPBCQState *pbcq = &phb->pbcq; 119 + 120 + if (memory_region_is_mapped(&phb->mr_m32)) { 121 + memory_region_del_subregion(phb->mr_m32.container, &phb->mr_m32); 122 + } 123 + 124 + if (!(phb->regs[PHB_PHB3_CONFIG >> 3] & PHB_PHB3C_M32_EN)) { 125 + return; 126 + } 127 + 128 + /* Grab geometry from registers */ 129 + base = phb->regs[PHB_M32_BASE_ADDR >> 3]; 130 + start = phb->regs[PHB_M32_START_ADDR >> 3]; 131 + size = ~(phb->regs[PHB_M32_BASE_MASK >> 3] | 0xfffc000000000000ull) + 1; 132 + 133 + /* Check if it matches an enabled MMIO region in the PBCQ */ 134 + if (memory_region_is_mapped(&pbcq->mmbar0) && 135 + base >= pbcq->mmio0_base && 136 + (base + size) <= (pbcq->mmio0_base + pbcq->mmio0_size)) { 137 + parent = &pbcq->mmbar0; 138 + base -= pbcq->mmio0_base; 139 + } else if (memory_region_is_mapped(&pbcq->mmbar1) && 140 + base >= pbcq->mmio1_base && 141 + (base + size) <= (pbcq->mmio1_base + pbcq->mmio1_size)) { 142 + parent = &pbcq->mmbar1; 143 + base -= pbcq->mmio1_base; 144 + } else { 145 + return; 146 + } 147 + 148 + /* Create alias */ 149 + memory_region_init_alias(&phb->mr_m32, OBJECT(phb), "phb3-m32", 150 + &phb->pci_mmio, start, size); 151 + memory_region_add_subregion(parent, base, &phb->mr_m32); 152 + } 153 + 154 + static void pnv_phb3_check_m64(PnvPHB3 *phb, uint32_t index) 155 + { 156 + uint64_t base, start, size, m64; 157 + MemoryRegion *parent; 158 + PnvPBCQState *pbcq = &phb->pbcq; 159 + 160 + if (memory_region_is_mapped(&phb->mr_m64[index])) { 161 + /* Should we destroy it in RCU friendly way... ? */ 162 + memory_region_del_subregion(phb->mr_m64[index].container, 163 + &phb->mr_m64[index]); 164 + } 165 + 166 + /* Get table entry */ 167 + m64 = phb->ioda_M64BT[index]; 168 + 169 + if (!(m64 & IODA2_M64BT_ENABLE)) { 170 + return; 171 + } 172 + 173 + /* Grab geometry from registers */ 174 + base = GETFIELD(IODA2_M64BT_BASE, m64) << 20; 175 + if (m64 & IODA2_M64BT_SINGLE_PE) { 176 + base &= ~0x1ffffffull; 177 + } 178 + size = GETFIELD(IODA2_M64BT_MASK, m64) << 20; 179 + size |= 0xfffc000000000000ull; 180 + size = ~size + 1; 181 + start = base | (phb->regs[PHB_M64_UPPER_BITS >> 3]); 182 + 183 + /* Check if it matches an enabled MMIO region in the PBCQ */ 184 + if (memory_region_is_mapped(&pbcq->mmbar0) && 185 + base >= pbcq->mmio0_base && 186 + (base + size) <= (pbcq->mmio0_base + pbcq->mmio0_size)) { 187 + parent = &pbcq->mmbar0; 188 + base -= pbcq->mmio0_base; 189 + } else if (memory_region_is_mapped(&pbcq->mmbar1) && 190 + base >= pbcq->mmio1_base && 191 + (base + size) <= (pbcq->mmio1_base + pbcq->mmio1_size)) { 192 + parent = &pbcq->mmbar1; 193 + base -= pbcq->mmio1_base; 194 + } else { 195 + return; 196 + } 197 + 198 + /* Create alias */ 199 + memory_region_init_alias(&phb->mr_m64[index], OBJECT(phb), "phb3-m64", 200 + &phb->pci_mmio, start, size); 201 + memory_region_add_subregion(parent, base, &phb->mr_m64[index]); 202 + } 203 + 204 + static void pnv_phb3_check_all_m64s(PnvPHB3 *phb) 205 + { 206 + uint64_t i; 207 + 208 + for (i = 0; i < PNV_PHB3_NUM_M64; i++) { 209 + pnv_phb3_check_m64(phb, i); 210 + } 211 + } 212 + 213 + static void pnv_phb3_lxivt_write(PnvPHB3 *phb, unsigned idx, uint64_t val) 214 + { 215 + uint8_t server, prio; 216 + 217 + phb->ioda_LXIVT[idx] = val & (IODA2_LXIVT_SERVER | 218 + IODA2_LXIVT_PRIORITY | 219 + IODA2_LXIVT_NODE_ID); 220 + server = GETFIELD(IODA2_LXIVT_SERVER, val); 221 + prio = GETFIELD(IODA2_LXIVT_PRIORITY, val); 222 + 223 + /* 224 + * The low order 2 bits are the link pointer (Type II interrupts). 225 + * Shift back to get a valid IRQ server. 226 + */ 227 + server >>= 2; 228 + 229 + ics_write_xive(&phb->lsis, idx, server, prio, prio); 230 + } 231 + 232 + static uint64_t *pnv_phb3_ioda_access(PnvPHB3 *phb, 233 + unsigned *out_table, unsigned *out_idx) 234 + { 235 + uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3]; 236 + unsigned int index = GETFIELD(PHB_IODA_AD_TADR, adreg); 237 + unsigned int table = GETFIELD(PHB_IODA_AD_TSEL, adreg); 238 + unsigned int mask; 239 + uint64_t *tptr = NULL; 240 + 241 + switch (table) { 242 + case IODA2_TBL_LIST: 243 + tptr = phb->ioda_LIST; 244 + mask = 7; 245 + break; 246 + case IODA2_TBL_LXIVT: 247 + tptr = phb->ioda_LXIVT; 248 + mask = 7; 249 + break; 250 + case IODA2_TBL_IVC_CAM: 251 + case IODA2_TBL_RBA: 252 + mask = 31; 253 + break; 254 + case IODA2_TBL_RCAM: 255 + mask = 63; 256 + break; 257 + case IODA2_TBL_MRT: 258 + mask = 7; 259 + break; 260 + case IODA2_TBL_PESTA: 261 + case IODA2_TBL_PESTB: 262 + mask = 255; 263 + break; 264 + case IODA2_TBL_TVT: 265 + tptr = phb->ioda_TVT; 266 + mask = 511; 267 + break; 268 + case IODA2_TBL_TCAM: 269 + case IODA2_TBL_TDR: 270 + mask = 63; 271 + break; 272 + case IODA2_TBL_M64BT: 273 + tptr = phb->ioda_M64BT; 274 + mask = 15; 275 + break; 276 + case IODA2_TBL_M32DT: 277 + tptr = phb->ioda_MDT; 278 + mask = 255; 279 + break; 280 + case IODA2_TBL_PEEV: 281 + tptr = phb->ioda_PEEV; 282 + mask = 3; 283 + break; 284 + default: 285 + phb3_error(phb, "invalid IODA table %d", table); 286 + return NULL; 287 + } 288 + index &= mask; 289 + if (out_idx) { 290 + *out_idx = index; 291 + } 292 + if (out_table) { 293 + *out_table = table; 294 + } 295 + if (tptr) { 296 + tptr += index; 297 + } 298 + if (adreg & PHB_IODA_AD_AUTOINC) { 299 + index = (index + 1) & mask; 300 + adreg = SETFIELD(PHB_IODA_AD_TADR, adreg, index); 301 + } 302 + phb->regs[PHB_IODA_ADDR >> 3] = adreg; 303 + return tptr; 304 + } 305 + 306 + static uint64_t pnv_phb3_ioda_read(PnvPHB3 *phb) 307 + { 308 + unsigned table; 309 + uint64_t *tptr; 310 + 311 + tptr = pnv_phb3_ioda_access(phb, &table, NULL); 312 + if (!tptr) { 313 + /* Return 0 on unsupported tables, not ff's */ 314 + return 0; 315 + } 316 + return *tptr; 317 + } 318 + 319 + static void pnv_phb3_ioda_write(PnvPHB3 *phb, uint64_t val) 320 + { 321 + unsigned table, idx; 322 + uint64_t *tptr; 323 + 324 + tptr = pnv_phb3_ioda_access(phb, &table, &idx); 325 + if (!tptr) { 326 + return; 327 + } 328 + 329 + /* Handle side effects */ 330 + switch (table) { 331 + case IODA2_TBL_LXIVT: 332 + pnv_phb3_lxivt_write(phb, idx, val); 333 + break; 334 + case IODA2_TBL_M64BT: 335 + *tptr = val; 336 + pnv_phb3_check_m64(phb, idx); 337 + break; 338 + default: 339 + *tptr = val; 340 + } 341 + } 342 + 343 + /* 344 + * This is called whenever the PHB LSI, MSI source ID register or 345 + * the PBCQ irq filters are written. 346 + */ 347 + void pnv_phb3_remap_irqs(PnvPHB3 *phb) 348 + { 349 + ICSState *ics = &phb->lsis; 350 + uint32_t local, global, count, mask, comp; 351 + uint64_t baren; 352 + PnvPBCQState *pbcq = &phb->pbcq; 353 + 354 + /* 355 + * First check if we are enabled. Unlike real HW we don't separate 356 + * TX and RX so we enable if both are set 357 + */ 358 + baren = pbcq->nest_regs[PBCQ_NEST_BAR_EN]; 359 + if (!(baren & PBCQ_NEST_BAR_EN_IRSN_RX) || 360 + !(baren & PBCQ_NEST_BAR_EN_IRSN_TX)) { 361 + ics->offset = 0; 362 + return; 363 + } 364 + 365 + /* Grab local LSI source ID */ 366 + local = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]) << 3; 367 + 368 + /* Grab global one and compare */ 369 + global = GETFIELD(PBCQ_NEST_LSI_SRC, 370 + pbcq->nest_regs[PBCQ_NEST_LSI_SRC_ID]) << 3; 371 + if (global != local) { 372 + /* 373 + * This happens during initialization, let's come back when we 374 + * are properly configured 375 + */ 376 + ics->offset = 0; 377 + return; 378 + } 379 + 380 + /* Get the base on the powerbus */ 381 + comp = GETFIELD(PBCQ_NEST_IRSN_COMP, 382 + pbcq->nest_regs[PBCQ_NEST_IRSN_COMPARE]); 383 + mask = GETFIELD(PBCQ_NEST_IRSN_COMP, 384 + pbcq->nest_regs[PBCQ_NEST_IRSN_MASK]); 385 + count = ((~mask) + 1) & 0x7ffff; 386 + phb->total_irq = count; 387 + 388 + /* Sanity checks */ 389 + if ((global + PNV_PHB3_NUM_LSI) > count) { 390 + phb3_error(phb, "LSIs out of reach: LSI base=%d total irq=%d", global, 391 + count); 392 + } 393 + 394 + if (count > 2048) { 395 + phb3_error(phb, "More interrupts than supported: %d", count); 396 + } 397 + 398 + if ((comp & mask) != comp) { 399 + phb3_error(phb, "IRQ compare bits not in mask: comp=0x%x mask=0x%x", 400 + comp, mask); 401 + comp &= mask; 402 + } 403 + /* Setup LSI offset */ 404 + ics->offset = comp + global; 405 + 406 + /* Setup MSI offset */ 407 + pnv_phb3_msi_update_config(&phb->msis, comp, count - PNV_PHB3_NUM_LSI); 408 + } 409 + 410 + static void pnv_phb3_lsi_src_id_write(PnvPHB3 *phb, uint64_t val) 411 + { 412 + /* Sanitize content */ 413 + val &= PHB_LSI_SRC_ID; 414 + phb->regs[PHB_LSI_SOURCE_ID >> 3] = val; 415 + pnv_phb3_remap_irqs(phb); 416 + } 417 + 418 + static void pnv_phb3_rtc_invalidate(PnvPHB3 *phb, uint64_t val) 419 + { 420 + PnvPhb3DMASpace *ds; 421 + 422 + /* Always invalidate all for now ... */ 423 + QLIST_FOREACH(ds, &phb->dma_spaces, list) { 424 + ds->pe_num = PHB_INVALID_PE; 425 + } 426 + } 427 + 428 + 429 + static void pnv_phb3_update_msi_regions(PnvPhb3DMASpace *ds) 430 + { 431 + uint64_t cfg = ds->phb->regs[PHB_PHB3_CONFIG >> 3]; 432 + 433 + if (cfg & PHB_PHB3C_32BIT_MSI_EN) { 434 + if (!memory_region_is_mapped(&ds->msi32_mr)) { 435 + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), 436 + 0xffff0000, &ds->msi32_mr); 437 + } 438 + } else { 439 + if (memory_region_is_mapped(&ds->msi32_mr)) { 440 + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), 441 + &ds->msi32_mr); 442 + } 443 + } 444 + 445 + if (cfg & PHB_PHB3C_64BIT_MSI_EN) { 446 + if (!memory_region_is_mapped(&ds->msi64_mr)) { 447 + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), 448 + (1ull << 60), &ds->msi64_mr); 449 + } 450 + } else { 451 + if (memory_region_is_mapped(&ds->msi64_mr)) { 452 + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), 453 + &ds->msi64_mr); 454 + } 455 + } 456 + } 457 + 458 + static void pnv_phb3_update_all_msi_regions(PnvPHB3 *phb) 459 + { 460 + PnvPhb3DMASpace *ds; 461 + 462 + QLIST_FOREACH(ds, &phb->dma_spaces, list) { 463 + pnv_phb3_update_msi_regions(ds); 464 + } 465 + } 466 + 467 + void pnv_phb3_reg_write(void *opaque, hwaddr off, uint64_t val, unsigned size) 468 + { 469 + PnvPHB3 *phb = opaque; 470 + bool changed; 471 + 472 + /* Special case configuration data */ 473 + if ((off & 0xfffc) == PHB_CONFIG_DATA) { 474 + pnv_phb3_config_write(phb, off & 0x3, size, val); 475 + return; 476 + } 477 + 478 + /* Other registers are 64-bit only */ 479 + if (size != 8 || off & 0x7) { 480 + phb3_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", 481 + off, size); 482 + return; 483 + } 484 + 485 + /* Handle masking & filtering */ 486 + switch (off) { 487 + case PHB_M64_UPPER_BITS: 488 + val &= 0xfffc000000000000ull; 489 + break; 490 + case PHB_Q_DMA_R: 491 + /* 492 + * This is enough logic to make SW happy but we aren't actually 493 + * quiescing the DMAs 494 + */ 495 + if (val & PHB_Q_DMA_R_AUTORESET) { 496 + val = 0; 497 + } else { 498 + val &= PHB_Q_DMA_R_QUIESCE_DMA; 499 + } 500 + break; 501 + /* LEM stuff */ 502 + case PHB_LEM_FIR_AND_MASK: 503 + phb->regs[PHB_LEM_FIR_ACCUM >> 3] &= val; 504 + return; 505 + case PHB_LEM_FIR_OR_MASK: 506 + phb->regs[PHB_LEM_FIR_ACCUM >> 3] |= val; 507 + return; 508 + case PHB_LEM_ERROR_AND_MASK: 509 + phb->regs[PHB_LEM_ERROR_MASK >> 3] &= val; 510 + return; 511 + case PHB_LEM_ERROR_OR_MASK: 512 + phb->regs[PHB_LEM_ERROR_MASK >> 3] |= val; 513 + return; 514 + case PHB_LEM_WOF: 515 + val = 0; 516 + break; 517 + } 518 + 519 + /* Record whether it changed */ 520 + changed = phb->regs[off >> 3] != val; 521 + 522 + /* Store in register cache first */ 523 + phb->regs[off >> 3] = val; 524 + 525 + /* Handle side effects */ 526 + switch (off) { 527 + case PHB_PHB3_CONFIG: 528 + if (changed) { 529 + pnv_phb3_update_all_msi_regions(phb); 530 + } 531 + /* fall through */ 532 + case PHB_M32_BASE_ADDR: 533 + case PHB_M32_BASE_MASK: 534 + case PHB_M32_START_ADDR: 535 + if (changed) { 536 + pnv_phb3_check_m32(phb); 537 + } 538 + break; 539 + case PHB_M64_UPPER_BITS: 540 + if (changed) { 541 + pnv_phb3_check_all_m64s(phb); 542 + } 543 + break; 544 + case PHB_LSI_SOURCE_ID: 545 + if (changed) { 546 + pnv_phb3_lsi_src_id_write(phb, val); 547 + } 548 + break; 549 + 550 + /* IODA table accesses */ 551 + case PHB_IODA_DATA0: 552 + pnv_phb3_ioda_write(phb, val); 553 + break; 554 + 555 + /* RTC invalidation */ 556 + case PHB_RTC_INVALIDATE: 557 + pnv_phb3_rtc_invalidate(phb, val); 558 + break; 559 + 560 + /* FFI request */ 561 + case PHB_FFI_REQUEST: 562 + pnv_phb3_msi_ffi(&phb->msis, val); 563 + break; 564 + 565 + /* Silent simple writes */ 566 + case PHB_CONFIG_ADDRESS: 567 + case PHB_IODA_ADDR: 568 + case PHB_TCE_KILL: 569 + case PHB_TCE_SPEC_CTL: 570 + case PHB_PEST_BAR: 571 + case PHB_PELTV_BAR: 572 + case PHB_RTT_BAR: 573 + case PHB_RBA_BAR: 574 + case PHB_IVT_BAR: 575 + case PHB_FFI_LOCK: 576 + case PHB_LEM_FIR_ACCUM: 577 + case PHB_LEM_ERROR_MASK: 578 + case PHB_LEM_ACTION0: 579 + case PHB_LEM_ACTION1: 580 + break; 581 + 582 + /* Noise on anything else */ 583 + default: 584 + qemu_log_mask(LOG_UNIMP, "phb3: reg_write 0x%"PRIx64"=%"PRIx64"\n", 585 + off, val); 586 + } 587 + } 588 + 589 + uint64_t pnv_phb3_reg_read(void *opaque, hwaddr off, unsigned size) 590 + { 591 + PnvPHB3 *phb = opaque; 592 + PCIHostState *pci = PCI_HOST_BRIDGE(phb); 593 + uint64_t val; 594 + 595 + if ((off & 0xfffc) == PHB_CONFIG_DATA) { 596 + return pnv_phb3_config_read(phb, off & 0x3, size); 597 + } 598 + 599 + /* Other registers are 64-bit only */ 600 + if (size != 8 || off & 0x7) { 601 + phb3_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", 602 + off, size); 603 + return ~0ull; 604 + } 605 + 606 + /* Default read from cache */ 607 + val = phb->regs[off >> 3]; 608 + 609 + switch (off) { 610 + /* Simulate venice DD2.0 */ 611 + case PHB_VERSION: 612 + return 0x000000a300000005ull; 613 + case PHB_PCIE_SYSTEM_CONFIG: 614 + return 0x441100fc30000000; 615 + 616 + /* IODA table accesses */ 617 + case PHB_IODA_DATA0: 618 + return pnv_phb3_ioda_read(phb); 619 + 620 + /* Link training always appears trained */ 621 + case PHB_PCIE_DLP_TRAIN_CTL: 622 + if (!pci_find_device(pci->bus, 1, 0)) { 623 + return 0; 624 + } 625 + return PHB_PCIE_DLP_INBAND_PRESENCE | PHB_PCIE_DLP_TC_DL_LINKACT; 626 + 627 + /* FFI Lock */ 628 + case PHB_FFI_LOCK: 629 + /* Set lock and return previous value */ 630 + phb->regs[off >> 3] |= PHB_FFI_LOCK_STATE; 631 + return val; 632 + 633 + /* DMA read sync: make it look like it's complete */ 634 + case PHB_DMARD_SYNC: 635 + return PHB_DMARD_SYNC_COMPLETE; 636 + 637 + /* Silent simple reads */ 638 + case PHB_PHB3_CONFIG: 639 + case PHB_M32_BASE_ADDR: 640 + case PHB_M32_BASE_MASK: 641 + case PHB_M32_START_ADDR: 642 + case PHB_CONFIG_ADDRESS: 643 + case PHB_IODA_ADDR: 644 + case PHB_RTC_INVALIDATE: 645 + case PHB_TCE_KILL: 646 + case PHB_TCE_SPEC_CTL: 647 + case PHB_PEST_BAR: 648 + case PHB_PELTV_BAR: 649 + case PHB_RTT_BAR: 650 + case PHB_RBA_BAR: 651 + case PHB_IVT_BAR: 652 + case PHB_M64_UPPER_BITS: 653 + case PHB_LEM_FIR_ACCUM: 654 + case PHB_LEM_ERROR_MASK: 655 + case PHB_LEM_ACTION0: 656 + case PHB_LEM_ACTION1: 657 + break; 658 + 659 + /* Noise on anything else */ 660 + default: 661 + qemu_log_mask(LOG_UNIMP, "phb3: reg_read 0x%"PRIx64"=%"PRIx64"\n", 662 + off, val); 663 + } 664 + return val; 665 + } 666 + 667 + static const MemoryRegionOps pnv_phb3_reg_ops = { 668 + .read = pnv_phb3_reg_read, 669 + .write = pnv_phb3_reg_write, 670 + .valid.min_access_size = 1, 671 + .valid.max_access_size = 8, 672 + .impl.min_access_size = 1, 673 + .impl.max_access_size = 8, 674 + .endianness = DEVICE_BIG_ENDIAN, 675 + }; 676 + 677 + static int pnv_phb3_map_irq(PCIDevice *pci_dev, int irq_num) 678 + { 679 + /* Check that out properly ... */ 680 + return irq_num & 3; 681 + } 682 + 683 + static void pnv_phb3_set_irq(void *opaque, int irq_num, int level) 684 + { 685 + PnvPHB3 *phb = opaque; 686 + 687 + /* LSI only ... */ 688 + if (irq_num > 3) { 689 + phb3_error(phb, "Unknown IRQ to set %d", irq_num); 690 + } 691 + qemu_set_irq(phb->qirqs[irq_num], level); 692 + } 693 + 694 + static bool pnv_phb3_resolve_pe(PnvPhb3DMASpace *ds) 695 + { 696 + uint64_t rtt, addr; 697 + uint16_t rte; 698 + int bus_num; 699 + 700 + /* Already resolved ? */ 701 + if (ds->pe_num != PHB_INVALID_PE) { 702 + return true; 703 + } 704 + 705 + /* We need to lookup the RTT */ 706 + rtt = ds->phb->regs[PHB_RTT_BAR >> 3]; 707 + if (!(rtt & PHB_RTT_BAR_ENABLE)) { 708 + phb3_error(ds->phb, "DMA with RTT BAR disabled !"); 709 + /* Set error bits ? fence ? ... */ 710 + return false; 711 + } 712 + 713 + /* Read RTE */ 714 + bus_num = pci_bus_num(ds->bus); 715 + addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; 716 + addr += 2 * ((bus_num << 8) | ds->devfn); 717 + if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) { 718 + phb3_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); 719 + /* Set error bits ? fence ? ... */ 720 + return false; 721 + } 722 + rte = be16_to_cpu(rte); 723 + 724 + /* Fail upon reading of invalid PE# */ 725 + if (rte >= PNV_PHB3_NUM_PE) { 726 + phb3_error(ds->phb, "RTE for RID 0x%x invalid (%04x", ds->devfn, rte); 727 + /* Set error bits ? fence ? ... */ 728 + return false; 729 + } 730 + ds->pe_num = rte; 731 + return true; 732 + } 733 + 734 + static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr, 735 + bool is_write, uint64_t tve, 736 + IOMMUTLBEntry *tlb) 737 + { 738 + uint64_t tta = GETFIELD(IODA2_TVT_TABLE_ADDR, tve); 739 + int32_t lev = GETFIELD(IODA2_TVT_NUM_LEVELS, tve); 740 + uint32_t tts = GETFIELD(IODA2_TVT_TCE_TABLE_SIZE, tve); 741 + uint32_t tps = GETFIELD(IODA2_TVT_IO_PSIZE, tve); 742 + PnvPHB3 *phb = ds->phb; 743 + 744 + /* Invalid levels */ 745 + if (lev > 4) { 746 + phb3_error(phb, "Invalid #levels in TVE %d", lev); 747 + return; 748 + } 749 + 750 + /* IO Page Size of 0 means untranslated, else use TCEs */ 751 + if (tps == 0) { 752 + /* 753 + * We only support non-translate in top window. 754 + * 755 + * TODO: Venice/Murano support it on bottom window above 4G and 756 + * Naples suports it on everything 757 + */ 758 + if (!(tve & PPC_BIT(51))) { 759 + phb3_error(phb, "xlate for invalid non-translate TVE"); 760 + return; 761 + } 762 + /* TODO: Handle boundaries */ 763 + 764 + /* Use 4k pages like q35 ... for now */ 765 + tlb->iova = addr & 0xfffffffffffff000ull; 766 + tlb->translated_addr = addr & 0x0003fffffffff000ull; 767 + tlb->addr_mask = 0xfffull; 768 + tlb->perm = IOMMU_RW; 769 + } else { 770 + uint32_t tce_shift, tbl_shift, sh; 771 + uint64_t base, taddr, tce, tce_mask; 772 + 773 + /* TVE disabled ? */ 774 + if (tts == 0) { 775 + phb3_error(phb, "xlate for invalid translated TVE"); 776 + return; 777 + } 778 + 779 + /* Address bits per bottom level TCE entry */ 780 + tce_shift = tps + 11; 781 + 782 + /* Address bits per table level */ 783 + tbl_shift = tts + 8; 784 + 785 + /* Top level table base address */ 786 + base = tta << 12; 787 + 788 + /* Total shift to first level */ 789 + sh = tbl_shift * lev + tce_shift; 790 + 791 + /* TODO: Multi-level untested */ 792 + while ((lev--) >= 0) { 793 + /* Grab the TCE address */ 794 + taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); 795 + if (dma_memory_read(&address_space_memory, taddr, &tce, 796 + sizeof(tce))) { 797 + phb3_error(phb, "Failed to read TCE at 0x%"PRIx64, taddr); 798 + return; 799 + } 800 + tce = be64_to_cpu(tce); 801 + 802 + /* Check permission for indirect TCE */ 803 + if ((lev >= 0) && !(tce & 3)) { 804 + phb3_error(phb, "Invalid indirect TCE at 0x%"PRIx64, taddr); 805 + phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, 806 + is_write ? 'W' : 'R', tve); 807 + phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", 808 + tta, lev, tts, tps); 809 + return; 810 + } 811 + sh -= tbl_shift; 812 + base = tce & ~0xfffull; 813 + } 814 + 815 + /* We exit the loop with TCE being the final TCE */ 816 + tce_mask = ~((1ull << tce_shift) - 1); 817 + tlb->iova = addr & tce_mask; 818 + tlb->translated_addr = tce & tce_mask; 819 + tlb->addr_mask = ~tce_mask; 820 + tlb->perm = tce & 3; 821 + if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) { 822 + phb3_error(phb, "TCE access fault at 0x%"PRIx64, taddr); 823 + phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, 824 + is_write ? 'W' : 'R', tve); 825 + phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", 826 + tta, lev, tts, tps); 827 + } 828 + } 829 + } 830 + 831 + static IOMMUTLBEntry pnv_phb3_translate_iommu(IOMMUMemoryRegion *iommu, 832 + hwaddr addr, 833 + IOMMUAccessFlags flag, 834 + int iommu_idx) 835 + { 836 + PnvPhb3DMASpace *ds = container_of(iommu, PnvPhb3DMASpace, dma_mr); 837 + int tve_sel; 838 + uint64_t tve, cfg; 839 + IOMMUTLBEntry ret = { 840 + .target_as = &address_space_memory, 841 + .iova = addr, 842 + .translated_addr = 0, 843 + .addr_mask = ~(hwaddr)0, 844 + .perm = IOMMU_NONE, 845 + }; 846 + PnvPHB3 *phb = ds->phb; 847 + 848 + /* Resolve PE# */ 849 + if (!pnv_phb3_resolve_pe(ds)) { 850 + phb3_error(phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", 851 + ds->bus, pci_bus_num(ds->bus), ds->devfn); 852 + return ret; 853 + } 854 + 855 + /* Check top bits */ 856 + switch (addr >> 60) { 857 + case 00: 858 + /* DMA or 32-bit MSI ? */ 859 + cfg = ds->phb->regs[PHB_PHB3_CONFIG >> 3]; 860 + if ((cfg & PHB_PHB3C_32BIT_MSI_EN) && 861 + ((addr & 0xffffffffffff0000ull) == 0xffff0000ull)) { 862 + phb3_error(phb, "xlate on 32-bit MSI region"); 863 + return ret; 864 + } 865 + /* Choose TVE XXX Use PHB3 Control Register */ 866 + tve_sel = (addr >> 59) & 1; 867 + tve = ds->phb->ioda_TVT[ds->pe_num * 2 + tve_sel]; 868 + pnv_phb3_translate_tve(ds, addr, flag & IOMMU_WO, tve, &ret); 869 + break; 870 + case 01: 871 + phb3_error(phb, "xlate on 64-bit MSI region"); 872 + break; 873 + default: 874 + phb3_error(phb, "xlate on unsupported address 0x%"PRIx64, addr); 875 + } 876 + return ret; 877 + } 878 + 879 + #define TYPE_PNV_PHB3_IOMMU_MEMORY_REGION "pnv-phb3-iommu-memory-region" 880 + #define PNV_PHB3_IOMMU_MEMORY_REGION(obj) \ 881 + OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_PNV_PHB3_IOMMU_MEMORY_REGION) 882 + 883 + static void pnv_phb3_iommu_memory_region_class_init(ObjectClass *klass, 884 + void *data) 885 + { 886 + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 887 + 888 + imrc->translate = pnv_phb3_translate_iommu; 889 + } 890 + 891 + static const TypeInfo pnv_phb3_iommu_memory_region_info = { 892 + .parent = TYPE_IOMMU_MEMORY_REGION, 893 + .name = TYPE_PNV_PHB3_IOMMU_MEMORY_REGION, 894 + .class_init = pnv_phb3_iommu_memory_region_class_init, 895 + }; 896 + 897 + /* 898 + * MSI/MSIX memory region implementation. 899 + * The handler handles both MSI and MSIX. 900 + */ 901 + static void pnv_phb3_msi_write(void *opaque, hwaddr addr, 902 + uint64_t data, unsigned size) 903 + { 904 + PnvPhb3DMASpace *ds = opaque; 905 + 906 + /* Resolve PE# */ 907 + if (!pnv_phb3_resolve_pe(ds)) { 908 + phb3_error(ds->phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", 909 + ds->bus, pci_bus_num(ds->bus), ds->devfn); 910 + return; 911 + } 912 + 913 + pnv_phb3_msi_send(&ds->phb->msis, addr, data, ds->pe_num); 914 + } 915 + 916 + /* There is no .read as the read result is undefined by PCI spec */ 917 + static uint64_t pnv_phb3_msi_read(void *opaque, hwaddr addr, unsigned size) 918 + { 919 + PnvPhb3DMASpace *ds = opaque; 920 + 921 + phb3_error(ds->phb, "invalid read @ 0x%" HWADDR_PRIx, addr); 922 + return -1; 923 + } 924 + 925 + static const MemoryRegionOps pnv_phb3_msi_ops = { 926 + .read = pnv_phb3_msi_read, 927 + .write = pnv_phb3_msi_write, 928 + .endianness = DEVICE_LITTLE_ENDIAN 929 + }; 930 + 931 + static AddressSpace *pnv_phb3_dma_iommu(PCIBus *bus, void *opaque, int devfn) 932 + { 933 + PnvPHB3 *phb = opaque; 934 + PnvPhb3DMASpace *ds; 935 + 936 + QLIST_FOREACH(ds, &phb->dma_spaces, list) { 937 + if (ds->bus == bus && ds->devfn == devfn) { 938 + break; 939 + } 940 + } 941 + 942 + if (ds == NULL) { 943 + ds = g_malloc0(sizeof(PnvPhb3DMASpace)); 944 + ds->bus = bus; 945 + ds->devfn = devfn; 946 + ds->pe_num = PHB_INVALID_PE; 947 + ds->phb = phb; 948 + memory_region_init_iommu(&ds->dma_mr, sizeof(ds->dma_mr), 949 + TYPE_PNV_PHB3_IOMMU_MEMORY_REGION, 950 + OBJECT(phb), "phb3_iommu", UINT64_MAX); 951 + address_space_init(&ds->dma_as, MEMORY_REGION(&ds->dma_mr), 952 + "phb3_iommu"); 953 + memory_region_init_io(&ds->msi32_mr, OBJECT(phb), &pnv_phb3_msi_ops, 954 + ds, "msi32", 0x10000); 955 + memory_region_init_io(&ds->msi64_mr, OBJECT(phb), &pnv_phb3_msi_ops, 956 + ds, "msi64", 0x100000); 957 + pnv_phb3_update_msi_regions(ds); 958 + 959 + QLIST_INSERT_HEAD(&phb->dma_spaces, ds, list); 960 + } 961 + return &ds->dma_as; 962 + } 963 + 964 + static void pnv_phb3_instance_init(Object *obj) 965 + { 966 + PnvPHB3 *phb = PNV_PHB3(obj); 967 + 968 + QLIST_INIT(&phb->dma_spaces); 969 + 970 + /* LSI sources */ 971 + object_initialize_child(obj, "lsi", &phb->lsis, sizeof(phb->lsis), 972 + TYPE_ICS, &error_abort, NULL); 973 + 974 + /* Default init ... will be fixed by HW inits */ 975 + phb->lsis.offset = 0; 976 + 977 + /* MSI sources */ 978 + object_initialize_child(obj, "msi", &phb->msis, sizeof(phb->msis), 979 + TYPE_PHB3_MSI, &error_abort, NULL); 980 + 981 + /* Power Bus Common Queue */ 982 + object_initialize_child(obj, "pbcq", &phb->pbcq, sizeof(phb->pbcq), 983 + TYPE_PNV_PBCQ, &error_abort, NULL); 984 + 985 + /* Root Port */ 986 + object_initialize_child(obj, "root", &phb->root, sizeof(phb->root), 987 + TYPE_PNV_PHB3_ROOT_PORT, &error_abort, NULL); 988 + qdev_prop_set_int32(DEVICE(&phb->root), "addr", PCI_DEVFN(0, 0)); 989 + qdev_prop_set_bit(DEVICE(&phb->root), "multifunction", false); 990 + } 991 + 992 + static void pnv_phb3_realize(DeviceState *dev, Error **errp) 993 + { 994 + PnvPHB3 *phb = PNV_PHB3(dev); 995 + PCIHostState *pci = PCI_HOST_BRIDGE(dev); 996 + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); 997 + Error *local_err = NULL; 998 + int i; 999 + 1000 + if (phb->phb_id >= PNV8_CHIP_PHB3_MAX) { 1001 + error_setg(errp, "invalid PHB index: %d", phb->phb_id); 1002 + return; 1003 + } 1004 + 1005 + /* LSI sources */ 1006 + object_property_set_link(OBJECT(&phb->lsis), OBJECT(pnv), "xics", 1007 + &error_abort); 1008 + object_property_set_int(OBJECT(&phb->lsis), PNV_PHB3_NUM_LSI, "nr-irqs", 1009 + &error_abort); 1010 + object_property_set_bool(OBJECT(&phb->lsis), true, "realized", &local_err); 1011 + if (local_err) { 1012 + error_propagate(errp, local_err); 1013 + return; 1014 + } 1015 + 1016 + for (i = 0; i < phb->lsis.nr_irqs; i++) { 1017 + ics_set_irq_type(&phb->lsis, i, true); 1018 + } 1019 + 1020 + phb->qirqs = qemu_allocate_irqs(ics_set_irq, &phb->lsis, phb->lsis.nr_irqs); 1021 + 1022 + /* MSI sources */ 1023 + object_property_set_link(OBJECT(&phb->msis), OBJECT(phb), "phb", 1024 + &error_abort); 1025 + object_property_set_link(OBJECT(&phb->msis), OBJECT(pnv), "xics", 1026 + &error_abort); 1027 + object_property_set_int(OBJECT(&phb->msis), PHB3_MAX_MSI, "nr-irqs", 1028 + &error_abort); 1029 + object_property_set_bool(OBJECT(&phb->msis), true, "realized", &local_err); 1030 + if (local_err) { 1031 + error_propagate(errp, local_err); 1032 + return; 1033 + } 1034 + 1035 + /* Power Bus Common Queue */ 1036 + object_property_set_link(OBJECT(&phb->pbcq), OBJECT(phb), "phb", 1037 + &error_abort); 1038 + object_property_set_bool(OBJECT(&phb->pbcq), true, "realized", &local_err); 1039 + if (local_err) { 1040 + error_propagate(errp, local_err); 1041 + return; 1042 + } 1043 + 1044 + /* Controller Registers */ 1045 + memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb3_reg_ops, phb, 1046 + "phb3-regs", 0x1000); 1047 + 1048 + /* 1049 + * PHB3 doesn't support IO space. However, qemu gets very upset if 1050 + * we don't have an IO region to anchor IO BARs onto so we just 1051 + * initialize one which we never hook up to anything 1052 + */ 1053 + memory_region_init(&phb->pci_io, OBJECT(phb), "pci-io", 0x10000); 1054 + memory_region_init(&phb->pci_mmio, OBJECT(phb), "pci-mmio", 1055 + PCI_MMIO_TOTAL_SIZE); 1056 + 1057 + pci->bus = pci_register_root_bus(dev, "root-bus", 1058 + pnv_phb3_set_irq, pnv_phb3_map_irq, phb, 1059 + &phb->pci_mmio, &phb->pci_io, 1060 + 0, 4, TYPE_PNV_PHB3_ROOT_BUS); 1061 + 1062 + pci_setup_iommu(pci->bus, pnv_phb3_dma_iommu, phb); 1063 + 1064 + /* Add a single Root port */ 1065 + qdev_prop_set_uint8(DEVICE(&phb->root), "chassis", phb->chip_id); 1066 + qdev_prop_set_uint16(DEVICE(&phb->root), "slot", phb->phb_id); 1067 + qdev_set_parent_bus(DEVICE(&phb->root), BUS(pci->bus)); 1068 + qdev_init_nofail(DEVICE(&phb->root)); 1069 + } 1070 + 1071 + void pnv_phb3_update_regions(PnvPHB3 *phb) 1072 + { 1073 + PnvPBCQState *pbcq = &phb->pbcq; 1074 + 1075 + /* Unmap first always */ 1076 + if (memory_region_is_mapped(&phb->mr_regs)) { 1077 + memory_region_del_subregion(&pbcq->phbbar, &phb->mr_regs); 1078 + } 1079 + 1080 + /* Map registers if enabled */ 1081 + if (memory_region_is_mapped(&pbcq->phbbar)) { 1082 + /* TODO: We should use the PHB BAR 2 register but we don't ... */ 1083 + memory_region_add_subregion(&pbcq->phbbar, 0, &phb->mr_regs); 1084 + } 1085 + 1086 + /* Check/update m32 */ 1087 + if (memory_region_is_mapped(&phb->mr_m32)) { 1088 + pnv_phb3_check_m32(phb); 1089 + } 1090 + pnv_phb3_check_all_m64s(phb); 1091 + } 1092 + 1093 + static const char *pnv_phb3_root_bus_path(PCIHostState *host_bridge, 1094 + PCIBus *rootbus) 1095 + { 1096 + PnvPHB3 *phb = PNV_PHB3(host_bridge); 1097 + 1098 + snprintf(phb->bus_path, sizeof(phb->bus_path), "00%02x:%02x", 1099 + phb->chip_id, phb->phb_id); 1100 + return phb->bus_path; 1101 + } 1102 + 1103 + static Property pnv_phb3_properties[] = { 1104 + DEFINE_PROP_UINT32("index", PnvPHB3, phb_id, 0), 1105 + DEFINE_PROP_UINT32("chip-id", PnvPHB3, chip_id, 0), 1106 + DEFINE_PROP_END_OF_LIST(), 1107 + }; 1108 + 1109 + static void pnv_phb3_class_init(ObjectClass *klass, void *data) 1110 + { 1111 + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); 1112 + DeviceClass *dc = DEVICE_CLASS(klass); 1113 + 1114 + hc->root_bus_path = pnv_phb3_root_bus_path; 1115 + dc->realize = pnv_phb3_realize; 1116 + device_class_set_props(dc, pnv_phb3_properties); 1117 + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); 1118 + } 1119 + 1120 + static const TypeInfo pnv_phb3_type_info = { 1121 + .name = TYPE_PNV_PHB3, 1122 + .parent = TYPE_PCIE_HOST_BRIDGE, 1123 + .instance_size = sizeof(PnvPHB3), 1124 + .class_init = pnv_phb3_class_init, 1125 + .instance_init = pnv_phb3_instance_init, 1126 + }; 1127 + 1128 + static void pnv_phb3_root_bus_class_init(ObjectClass *klass, void *data) 1129 + { 1130 + BusClass *k = BUS_CLASS(klass); 1131 + 1132 + /* 1133 + * PHB3 has only a single root complex. Enforce the limit on the 1134 + * parent bus 1135 + */ 1136 + k->max_dev = 1; 1137 + } 1138 + 1139 + static const TypeInfo pnv_phb3_root_bus_info = { 1140 + .name = TYPE_PNV_PHB3_ROOT_BUS, 1141 + .parent = TYPE_PCIE_BUS, 1142 + .class_init = pnv_phb3_root_bus_class_init, 1143 + .interfaces = (InterfaceInfo[]) { 1144 + { INTERFACE_PCIE_DEVICE }, 1145 + { } 1146 + }, 1147 + }; 1148 + 1149 + static void pnv_phb3_root_port_realize(DeviceState *dev, Error **errp) 1150 + { 1151 + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); 1152 + Error *local_err = NULL; 1153 + 1154 + rpc->parent_realize(dev, &local_err); 1155 + if (local_err) { 1156 + error_propagate(errp, local_err); 1157 + return; 1158 + } 1159 + } 1160 + 1161 + static void pnv_phb3_root_port_class_init(ObjectClass *klass, void *data) 1162 + { 1163 + DeviceClass *dc = DEVICE_CLASS(klass); 1164 + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1165 + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); 1166 + 1167 + dc->desc = "IBM PHB3 PCIE Root Port"; 1168 + 1169 + device_class_set_parent_realize(dc, pnv_phb3_root_port_realize, 1170 + &rpc->parent_realize); 1171 + 1172 + k->vendor_id = PCI_VENDOR_ID_IBM; 1173 + k->device_id = 0x03dc; 1174 + k->revision = 0; 1175 + 1176 + rpc->exp_offset = 0x48; 1177 + rpc->aer_offset = 0x100; 1178 + } 1179 + 1180 + static const TypeInfo pnv_phb3_root_port_info = { 1181 + .name = TYPE_PNV_PHB3_ROOT_PORT, 1182 + .parent = TYPE_PCIE_ROOT_PORT, 1183 + .instance_size = sizeof(PnvPHB3RootPort), 1184 + .class_init = pnv_phb3_root_port_class_init, 1185 + }; 1186 + 1187 + static void pnv_phb3_register_types(void) 1188 + { 1189 + type_register_static(&pnv_phb3_root_bus_info); 1190 + type_register_static(&pnv_phb3_root_port_info); 1191 + type_register_static(&pnv_phb3_type_info); 1192 + type_register_static(&pnv_phb3_iommu_memory_region_info); 1193 + } 1194 + 1195 + type_init(pnv_phb3_register_types)
+349
hw/pci-host/pnv_phb3_msi.c
··· 1 + /* 2 + * QEMU PowerPC PowerNV (POWER8) PHB3 model 3 + * 4 + * Copyright (c) 2014-2020, IBM Corporation. 5 + * 6 + * This code is licensed under the GPL version 2 or later. See the 7 + * COPYING file in the top-level directory. 8 + */ 9 + #include "qemu/osdep.h" 10 + #include "qemu/log.h" 11 + #include "qapi/error.h" 12 + #include "qemu-common.h" 13 + #include "hw/pci-host/pnv_phb3_regs.h" 14 + #include "hw/pci-host/pnv_phb3.h" 15 + #include "hw/ppc/pnv.h" 16 + #include "hw/pci/msi.h" 17 + #include "monitor/monitor.h" 18 + #include "hw/irq.h" 19 + #include "hw/qdev-properties.h" 20 + #include "sysemu/reset.h" 21 + 22 + static uint64_t phb3_msi_ive_addr(PnvPHB3 *phb, int srcno) 23 + { 24 + uint64_t ivtbar = phb->regs[PHB_IVT_BAR >> 3]; 25 + uint64_t phbctl = phb->regs[PHB_CONTROL >> 3]; 26 + 27 + if (!(ivtbar & PHB_IVT_BAR_ENABLE)) { 28 + qemu_log_mask(LOG_GUEST_ERROR, "Failed access to disable IVT BAR !"); 29 + return 0; 30 + } 31 + 32 + if (srcno >= (ivtbar & PHB_IVT_LENGTH_MASK)) { 33 + qemu_log_mask(LOG_GUEST_ERROR, "MSI out of bounds (%d vs 0x%"PRIx64")", 34 + srcno, (uint64_t) (ivtbar & PHB_IVT_LENGTH_MASK)); 35 + return 0; 36 + } 37 + 38 + ivtbar &= PHB_IVT_BASE_ADDRESS_MASK; 39 + 40 + if (phbctl & PHB_CTRL_IVE_128_BYTES) { 41 + return ivtbar + 128 * srcno; 42 + } else { 43 + return ivtbar + 16 * srcno; 44 + } 45 + } 46 + 47 + static bool phb3_msi_read_ive(PnvPHB3 *phb, int srcno, uint64_t *out_ive) 48 + { 49 + uint64_t ive_addr, ive; 50 + 51 + ive_addr = phb3_msi_ive_addr(phb, srcno); 52 + if (!ive_addr) { 53 + return false; 54 + } 55 + 56 + if (dma_memory_read(&address_space_memory, ive_addr, &ive, sizeof(ive))) { 57 + qemu_log_mask(LOG_GUEST_ERROR, "Failed to read IVE at 0x%" PRIx64, 58 + ive_addr); 59 + return false; 60 + } 61 + *out_ive = be64_to_cpu(ive); 62 + 63 + return true; 64 + } 65 + 66 + static void phb3_msi_set_p(Phb3MsiState *msi, int srcno, uint8_t gen) 67 + { 68 + uint64_t ive_addr; 69 + uint8_t p = 0x01 | (gen << 1); 70 + 71 + ive_addr = phb3_msi_ive_addr(msi->phb, srcno); 72 + if (!ive_addr) { 73 + return; 74 + } 75 + 76 + if (dma_memory_write(&address_space_memory, ive_addr + 4, &p, 1)) { 77 + qemu_log_mask(LOG_GUEST_ERROR, 78 + "Failed to write IVE (set P) at 0x%" PRIx64, ive_addr); 79 + } 80 + } 81 + 82 + static void phb3_msi_set_q(Phb3MsiState *msi, int srcno) 83 + { 84 + uint64_t ive_addr; 85 + uint8_t q = 0x01; 86 + 87 + ive_addr = phb3_msi_ive_addr(msi->phb, srcno); 88 + if (!ive_addr) { 89 + return; 90 + } 91 + 92 + if (dma_memory_write(&address_space_memory, ive_addr + 5, &q, 1)) { 93 + qemu_log_mask(LOG_GUEST_ERROR, 94 + "Failed to write IVE (set Q) at 0x%" PRIx64, ive_addr); 95 + } 96 + } 97 + 98 + static void phb3_msi_try_send(Phb3MsiState *msi, int srcno, bool force) 99 + { 100 + ICSState *ics = ICS(msi); 101 + uint64_t ive; 102 + uint64_t server, prio, pq, gen; 103 + 104 + if (!phb3_msi_read_ive(msi->phb, srcno, &ive)) { 105 + return; 106 + } 107 + 108 + server = GETFIELD(IODA2_IVT_SERVER, ive); 109 + prio = GETFIELD(IODA2_IVT_PRIORITY, ive); 110 + if (!force) { 111 + pq = GETFIELD(IODA2_IVT_Q, ive) | (GETFIELD(IODA2_IVT_P, ive) << 1); 112 + } else { 113 + pq = 0; 114 + } 115 + gen = GETFIELD(IODA2_IVT_GEN, ive); 116 + 117 + /* 118 + * The low order 2 bits are the link pointer (Type II interrupts). 119 + * Shift back to get a valid IRQ server. 120 + */ 121 + server >>= 2; 122 + 123 + switch (pq) { 124 + case 0: /* 00 */ 125 + if (prio == 0xff) { 126 + /* Masked, set Q */ 127 + phb3_msi_set_q(msi, srcno); 128 + } else { 129 + /* Enabled, set P and send */ 130 + phb3_msi_set_p(msi, srcno, gen); 131 + icp_irq(ics, server, srcno + ics->offset, prio); 132 + } 133 + break; 134 + case 2: /* 10 */ 135 + /* Already pending, set Q */ 136 + phb3_msi_set_q(msi, srcno); 137 + break; 138 + case 1: /* 01 */ 139 + case 3: /* 11 */ 140 + default: 141 + /* Just drop stuff if Q already set */ 142 + break; 143 + } 144 + } 145 + 146 + static void phb3_msi_set_irq(void *opaque, int srcno, int val) 147 + { 148 + Phb3MsiState *msi = PHB3_MSI(opaque); 149 + 150 + if (val) { 151 + phb3_msi_try_send(msi, srcno, false); 152 + } 153 + } 154 + 155 + 156 + void pnv_phb3_msi_send(Phb3MsiState *msi, uint64_t addr, uint16_t data, 157 + int32_t dev_pe) 158 + { 159 + ICSState *ics = ICS(msi); 160 + uint64_t ive; 161 + uint16_t pe; 162 + uint32_t src = ((addr >> 4) & 0xffff) | (data & 0x1f); 163 + 164 + if (src >= ics->nr_irqs) { 165 + qemu_log_mask(LOG_GUEST_ERROR, "MSI %d out of bounds", src); 166 + return; 167 + } 168 + if (dev_pe >= 0) { 169 + if (!phb3_msi_read_ive(msi->phb, src, &ive)) { 170 + return; 171 + } 172 + pe = GETFIELD(IODA2_IVT_PE, ive); 173 + if (pe != dev_pe) { 174 + qemu_log_mask(LOG_GUEST_ERROR, 175 + "MSI %d send by PE#%d but assigned to PE#%d", 176 + src, dev_pe, pe); 177 + return; 178 + } 179 + } 180 + qemu_irq_pulse(msi->qirqs[src]); 181 + } 182 + 183 + void pnv_phb3_msi_ffi(Phb3MsiState *msi, uint64_t val) 184 + { 185 + /* Emit interrupt */ 186 + pnv_phb3_msi_send(msi, val, 0, -1); 187 + 188 + /* Clear FFI lock */ 189 + msi->phb->regs[PHB_FFI_LOCK >> 3] = 0; 190 + } 191 + 192 + static void phb3_msi_reject(ICSState *ics, uint32_t nr) 193 + { 194 + Phb3MsiState *msi = PHB3_MSI(ics); 195 + unsigned int srcno = nr - ics->offset; 196 + unsigned int idx = srcno >> 6; 197 + unsigned int bit = 1ull << (srcno & 0x3f); 198 + 199 + assert(srcno < PHB3_MAX_MSI); 200 + 201 + msi->rba[idx] |= bit; 202 + msi->rba_sum |= (1u << idx); 203 + } 204 + 205 + static void phb3_msi_resend(ICSState *ics) 206 + { 207 + Phb3MsiState *msi = PHB3_MSI(ics); 208 + unsigned int i, j; 209 + 210 + if (msi->rba_sum == 0) { 211 + return; 212 + } 213 + 214 + for (i = 0; i < 32; i++) { 215 + if ((msi->rba_sum & (1u << i)) == 0) { 216 + continue; 217 + } 218 + msi->rba_sum &= ~(1u << i); 219 + for (j = 0; j < 64; j++) { 220 + if ((msi->rba[i] & (1ull << j)) == 0) { 221 + continue; 222 + } 223 + msi->rba[i] &= ~(1u << j); 224 + phb3_msi_try_send(msi, i * 64 + j, true); 225 + } 226 + } 227 + } 228 + 229 + static void phb3_msi_reset(DeviceState *dev) 230 + { 231 + Phb3MsiState *msi = PHB3_MSI(dev); 232 + ICSStateClass *icsc = ICS_GET_CLASS(dev); 233 + 234 + icsc->parent_reset(dev); 235 + 236 + memset(msi->rba, 0, sizeof(msi->rba)); 237 + msi->rba_sum = 0; 238 + } 239 + 240 + static void phb3_msi_reset_handler(void *dev) 241 + { 242 + phb3_msi_reset(dev); 243 + } 244 + 245 + void pnv_phb3_msi_update_config(Phb3MsiState *msi, uint32_t base, 246 + uint32_t count) 247 + { 248 + ICSState *ics = ICS(msi); 249 + 250 + if (count > PHB3_MAX_MSI) { 251 + count = PHB3_MAX_MSI; 252 + } 253 + ics->nr_irqs = count; 254 + ics->offset = base; 255 + } 256 + 257 + static void phb3_msi_realize(DeviceState *dev, Error **errp) 258 + { 259 + Phb3MsiState *msi = PHB3_MSI(dev); 260 + ICSState *ics = ICS(msi); 261 + ICSStateClass *icsc = ICS_GET_CLASS(ics); 262 + Error *local_err = NULL; 263 + 264 + assert(msi->phb); 265 + 266 + icsc->parent_realize(dev, &local_err); 267 + if (local_err) { 268 + error_propagate(errp, local_err); 269 + return; 270 + } 271 + 272 + msi->qirqs = qemu_allocate_irqs(phb3_msi_set_irq, msi, ics->nr_irqs); 273 + 274 + qemu_register_reset(phb3_msi_reset_handler, dev); 275 + } 276 + 277 + static void phb3_msi_instance_init(Object *obj) 278 + { 279 + Phb3MsiState *msi = PHB3_MSI(obj); 280 + ICSState *ics = ICS(obj); 281 + 282 + object_property_add_link(obj, "phb", TYPE_PNV_PHB3, 283 + (Object **)&msi->phb, 284 + object_property_allow_set_link, 285 + OBJ_PROP_LINK_STRONG, 286 + &error_abort); 287 + 288 + /* Will be overriden later */ 289 + ics->offset = 0; 290 + } 291 + 292 + static void phb3_msi_class_init(ObjectClass *klass, void *data) 293 + { 294 + DeviceClass *dc = DEVICE_CLASS(klass); 295 + ICSStateClass *isc = ICS_CLASS(klass); 296 + 297 + device_class_set_parent_realize(dc, phb3_msi_realize, 298 + &isc->parent_realize); 299 + device_class_set_parent_reset(dc, phb3_msi_reset, 300 + &isc->parent_reset); 301 + 302 + isc->reject = phb3_msi_reject; 303 + isc->resend = phb3_msi_resend; 304 + } 305 + 306 + static const TypeInfo phb3_msi_info = { 307 + .name = TYPE_PHB3_MSI, 308 + .parent = TYPE_ICS, 309 + .instance_size = sizeof(Phb3MsiState), 310 + .class_init = phb3_msi_class_init, 311 + .class_size = sizeof(ICSStateClass), 312 + .instance_init = phb3_msi_instance_init, 313 + }; 314 + 315 + static void pnv_phb3_msi_register_types(void) 316 + { 317 + type_register_static(&phb3_msi_info); 318 + } 319 + 320 + type_init(pnv_phb3_msi_register_types); 321 + 322 + void pnv_phb3_msi_pic_print_info(Phb3MsiState *msi, Monitor *mon) 323 + { 324 + ICSState *ics = ICS(msi); 325 + int i; 326 + 327 + monitor_printf(mon, "ICS %4x..%4x %p\n", 328 + ics->offset, ics->offset + ics->nr_irqs - 1, ics); 329 + 330 + for (i = 0; i < ics->nr_irqs; i++) { 331 + uint64_t ive; 332 + 333 + if (!phb3_msi_read_ive(msi->phb, i, &ive)) { 334 + return; 335 + } 336 + 337 + if (GETFIELD(IODA2_IVT_PRIORITY, ive) == 0xff) { 338 + continue; 339 + } 340 + 341 + monitor_printf(mon, " %4x %c%c server=%04x prio=%02x gen=%d\n", 342 + ics->offset + i, 343 + GETFIELD(IODA2_IVT_P, ive) ? 'P' : '-', 344 + GETFIELD(IODA2_IVT_Q, ive) ? 'Q' : '-', 345 + (uint32_t) GETFIELD(IODA2_IVT_SERVER, ive) >> 2, 346 + (uint32_t) GETFIELD(IODA2_IVT_PRIORITY, ive), 347 + (uint32_t) GETFIELD(IODA2_IVT_GEN, ive)); 348 + } 349 + }
+357
hw/pci-host/pnv_phb3_pbcq.c
··· 1 + /* 2 + * QEMU PowerPC PowerNV (POWER8) PHB3 model 3 + * 4 + * Copyright (c) 2014-2020, IBM Corporation. 5 + * 6 + * This code is licensed under the GPL version 2 or later. See the 7 + * COPYING file in the top-level directory. 8 + */ 9 + #include "qemu/osdep.h" 10 + #include "qapi/error.h" 11 + #include "qemu-common.h" 12 + #include "qemu/log.h" 13 + #include "target/ppc/cpu.h" 14 + #include "hw/ppc/fdt.h" 15 + #include "hw/pci-host/pnv_phb3_regs.h" 16 + #include "hw/pci-host/pnv_phb3.h" 17 + #include "hw/ppc/pnv.h" 18 + #include "hw/ppc/pnv_xscom.h" 19 + #include "hw/pci/pci_bridge.h" 20 + #include "hw/pci/pci_bus.h" 21 + 22 + #include <libfdt.h> 23 + 24 + #define phb3_pbcq_error(pbcq, fmt, ...) \ 25 + qemu_log_mask(LOG_GUEST_ERROR, "phb3_pbcq[%d:%d]: " fmt "\n", \ 26 + (pbcq)->phb->chip_id, (pbcq)->phb->phb_id, ## __VA_ARGS__) 27 + 28 + static uint64_t pnv_pbcq_nest_xscom_read(void *opaque, hwaddr addr, 29 + unsigned size) 30 + { 31 + PnvPBCQState *pbcq = PNV_PBCQ(opaque); 32 + uint32_t offset = addr >> 3; 33 + 34 + return pbcq->nest_regs[offset]; 35 + } 36 + 37 + static uint64_t pnv_pbcq_pci_xscom_read(void *opaque, hwaddr addr, 38 + unsigned size) 39 + { 40 + PnvPBCQState *pbcq = PNV_PBCQ(opaque); 41 + uint32_t offset = addr >> 3; 42 + 43 + return pbcq->pci_regs[offset]; 44 + } 45 + 46 + static uint64_t pnv_pbcq_spci_xscom_read(void *opaque, hwaddr addr, 47 + unsigned size) 48 + { 49 + PnvPBCQState *pbcq = PNV_PBCQ(opaque); 50 + uint32_t offset = addr >> 3; 51 + 52 + if (offset == PBCQ_SPCI_ASB_DATA) { 53 + return pnv_phb3_reg_read(pbcq->phb, 54 + pbcq->spci_regs[PBCQ_SPCI_ASB_ADDR], 8); 55 + } 56 + return pbcq->spci_regs[offset]; 57 + } 58 + 59 + static void pnv_pbcq_update_map(PnvPBCQState *pbcq) 60 + { 61 + uint64_t bar_en = pbcq->nest_regs[PBCQ_NEST_BAR_EN]; 62 + uint64_t bar, mask, size; 63 + 64 + /* 65 + * NOTE: This will really not work well if those are remapped 66 + * after the PHB has created its sub regions. We could do better 67 + * if we had a way to resize regions but we don't really care 68 + * that much in practice as the stuff below really only happens 69 + * once early during boot 70 + */ 71 + 72 + /* Handle unmaps */ 73 + if (memory_region_is_mapped(&pbcq->mmbar0) && 74 + !(bar_en & PBCQ_NEST_BAR_EN_MMIO0)) { 75 + memory_region_del_subregion(get_system_memory(), &pbcq->mmbar0); 76 + } 77 + if (memory_region_is_mapped(&pbcq->mmbar1) && 78 + !(bar_en & PBCQ_NEST_BAR_EN_MMIO1)) { 79 + memory_region_del_subregion(get_system_memory(), &pbcq->mmbar1); 80 + } 81 + if (memory_region_is_mapped(&pbcq->phbbar) && 82 + !(bar_en & PBCQ_NEST_BAR_EN_PHB)) { 83 + memory_region_del_subregion(get_system_memory(), &pbcq->phbbar); 84 + } 85 + 86 + /* Update PHB */ 87 + pnv_phb3_update_regions(pbcq->phb); 88 + 89 + /* Handle maps */ 90 + if (!memory_region_is_mapped(&pbcq->mmbar0) && 91 + (bar_en & PBCQ_NEST_BAR_EN_MMIO0)) { 92 + bar = pbcq->nest_regs[PBCQ_NEST_MMIO_BAR0] >> 14; 93 + mask = pbcq->nest_regs[PBCQ_NEST_MMIO_MASK0]; 94 + size = ((~mask) >> 14) + 1; 95 + memory_region_init(&pbcq->mmbar0, OBJECT(pbcq), "pbcq-mmio0", size); 96 + memory_region_add_subregion(get_system_memory(), bar, &pbcq->mmbar0); 97 + pbcq->mmio0_base = bar; 98 + pbcq->mmio0_size = size; 99 + } 100 + if (!memory_region_is_mapped(&pbcq->mmbar1) && 101 + (bar_en & PBCQ_NEST_BAR_EN_MMIO1)) { 102 + bar = pbcq->nest_regs[PBCQ_NEST_MMIO_BAR1] >> 14; 103 + mask = pbcq->nest_regs[PBCQ_NEST_MMIO_MASK1]; 104 + size = ((~mask) >> 14) + 1; 105 + memory_region_init(&pbcq->mmbar1, OBJECT(pbcq), "pbcq-mmio1", size); 106 + memory_region_add_subregion(get_system_memory(), bar, &pbcq->mmbar1); 107 + pbcq->mmio1_base = bar; 108 + pbcq->mmio1_size = size; 109 + } 110 + if (!memory_region_is_mapped(&pbcq->phbbar) 111 + && (bar_en & PBCQ_NEST_BAR_EN_PHB)) { 112 + bar = pbcq->nest_regs[PBCQ_NEST_PHB_BAR] >> 14; 113 + size = 0x1000; 114 + memory_region_init(&pbcq->phbbar, OBJECT(pbcq), "pbcq-phb", size); 115 + memory_region_add_subregion(get_system_memory(), bar, &pbcq->phbbar); 116 + } 117 + 118 + /* Update PHB */ 119 + pnv_phb3_update_regions(pbcq->phb); 120 + } 121 + 122 + static void pnv_pbcq_nest_xscom_write(void *opaque, hwaddr addr, 123 + uint64_t val, unsigned size) 124 + { 125 + PnvPBCQState *pbcq = PNV_PBCQ(opaque); 126 + uint32_t reg = addr >> 3; 127 + 128 + switch (reg) { 129 + case PBCQ_NEST_MMIO_BAR0: 130 + case PBCQ_NEST_MMIO_BAR1: 131 + case PBCQ_NEST_MMIO_MASK0: 132 + case PBCQ_NEST_MMIO_MASK1: 133 + if (pbcq->nest_regs[PBCQ_NEST_BAR_EN] & 134 + (PBCQ_NEST_BAR_EN_MMIO0 | 135 + PBCQ_NEST_BAR_EN_MMIO1)) { 136 + phb3_pbcq_error(pbcq, "Changing enabled BAR unsupported"); 137 + } 138 + pbcq->nest_regs[reg] = val & 0xffffffffc0000000ull; 139 + break; 140 + case PBCQ_NEST_PHB_BAR: 141 + if (pbcq->nest_regs[PBCQ_NEST_BAR_EN] & PBCQ_NEST_BAR_EN_PHB) { 142 + phb3_pbcq_error(pbcq, "Changing enabled BAR unsupported"); 143 + } 144 + pbcq->nest_regs[reg] = val & 0xfffffffffc000000ull; 145 + break; 146 + case PBCQ_NEST_BAR_EN: 147 + pbcq->nest_regs[reg] = val & 0xf800000000000000ull; 148 + pnv_pbcq_update_map(pbcq); 149 + pnv_phb3_remap_irqs(pbcq->phb); 150 + break; 151 + case PBCQ_NEST_IRSN_COMPARE: 152 + case PBCQ_NEST_IRSN_MASK: 153 + pbcq->nest_regs[reg] = val & PBCQ_NEST_IRSN_COMP; 154 + pnv_phb3_remap_irqs(pbcq->phb); 155 + break; 156 + case PBCQ_NEST_LSI_SRC_ID: 157 + pbcq->nest_regs[reg] = val & PBCQ_NEST_LSI_SRC; 158 + pnv_phb3_remap_irqs(pbcq->phb); 159 + break; 160 + default: 161 + phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, 162 + addr, val); 163 + } 164 + } 165 + 166 + static void pnv_pbcq_pci_xscom_write(void *opaque, hwaddr addr, 167 + uint64_t val, unsigned size) 168 + { 169 + PnvPBCQState *pbcq = PNV_PBCQ(opaque); 170 + uint32_t reg = addr >> 3; 171 + 172 + switch (reg) { 173 + case PBCQ_PCI_BAR2: 174 + pbcq->pci_regs[reg] = val & 0xfffffffffc000000ull; 175 + pnv_pbcq_update_map(pbcq); 176 + default: 177 + phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, 178 + addr, val); 179 + } 180 + } 181 + 182 + static void pnv_pbcq_spci_xscom_write(void *opaque, hwaddr addr, 183 + uint64_t val, unsigned size) 184 + { 185 + PnvPBCQState *pbcq = PNV_PBCQ(opaque); 186 + uint32_t reg = addr >> 3; 187 + 188 + switch (reg) { 189 + case PBCQ_SPCI_ASB_ADDR: 190 + pbcq->spci_regs[reg] = val & 0xfff; 191 + break; 192 + case PBCQ_SPCI_ASB_STATUS: 193 + pbcq->spci_regs[reg] &= ~val; 194 + break; 195 + case PBCQ_SPCI_ASB_DATA: 196 + pnv_phb3_reg_write(pbcq->phb, pbcq->spci_regs[PBCQ_SPCI_ASB_ADDR], 197 + val, 8); 198 + break; 199 + case PBCQ_SPCI_AIB_CAPP_EN: 200 + case PBCQ_SPCI_CAPP_SEC_TMR: 201 + break; 202 + default: 203 + phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, 204 + addr, val); 205 + } 206 + } 207 + 208 + static const MemoryRegionOps pnv_pbcq_nest_xscom_ops = { 209 + .read = pnv_pbcq_nest_xscom_read, 210 + .write = pnv_pbcq_nest_xscom_write, 211 + .valid.min_access_size = 8, 212 + .valid.max_access_size = 8, 213 + .impl.min_access_size = 8, 214 + .impl.max_access_size = 8, 215 + .endianness = DEVICE_BIG_ENDIAN, 216 + }; 217 + 218 + static const MemoryRegionOps pnv_pbcq_pci_xscom_ops = { 219 + .read = pnv_pbcq_pci_xscom_read, 220 + .write = pnv_pbcq_pci_xscom_write, 221 + .valid.min_access_size = 8, 222 + .valid.max_access_size = 8, 223 + .impl.min_access_size = 8, 224 + .impl.max_access_size = 8, 225 + .endianness = DEVICE_BIG_ENDIAN, 226 + }; 227 + 228 + static const MemoryRegionOps pnv_pbcq_spci_xscom_ops = { 229 + .read = pnv_pbcq_spci_xscom_read, 230 + .write = pnv_pbcq_spci_xscom_write, 231 + .valid.min_access_size = 8, 232 + .valid.max_access_size = 8, 233 + .impl.min_access_size = 8, 234 + .impl.max_access_size = 8, 235 + .endianness = DEVICE_BIG_ENDIAN, 236 + }; 237 + 238 + static void pnv_pbcq_default_bars(PnvPBCQState *pbcq) 239 + { 240 + uint64_t mm0, mm1, reg; 241 + PnvPHB3 *phb = pbcq->phb; 242 + 243 + mm0 = 0x3d00000000000ull + 0x4000000000ull * phb->chip_id + 244 + 0x1000000000ull * phb->phb_id; 245 + mm1 = 0x3ff8000000000ull + 0x0200000000ull * phb->chip_id + 246 + 0x0080000000ull * phb->phb_id; 247 + reg = 0x3fffe40000000ull + 0x0000400000ull * phb->chip_id + 248 + 0x0000100000ull * phb->phb_id; 249 + 250 + pbcq->nest_regs[PBCQ_NEST_MMIO_BAR0] = mm0 << 14; 251 + pbcq->nest_regs[PBCQ_NEST_MMIO_BAR1] = mm1 << 14; 252 + pbcq->nest_regs[PBCQ_NEST_PHB_BAR] = reg << 14; 253 + pbcq->nest_regs[PBCQ_NEST_MMIO_MASK0] = 0x3fff000000000ull << 14; 254 + pbcq->nest_regs[PBCQ_NEST_MMIO_MASK1] = 0x3ffff80000000ull << 14; 255 + pbcq->pci_regs[PBCQ_PCI_BAR2] = reg << 14; 256 + } 257 + 258 + static void pnv_pbcq_realize(DeviceState *dev, Error **errp) 259 + { 260 + PnvPBCQState *pbcq = PNV_PBCQ(dev); 261 + PnvPHB3 *phb; 262 + char name[32]; 263 + 264 + assert(pbcq->phb); 265 + phb = pbcq->phb; 266 + 267 + /* TODO: Fix OPAL to do that: establish default BAR values */ 268 + pnv_pbcq_default_bars(pbcq); 269 + 270 + /* Initialize the XSCOM region for the PBCQ registers */ 271 + snprintf(name, sizeof(name), "xscom-pbcq-nest-%d.%d", 272 + phb->chip_id, phb->phb_id); 273 + pnv_xscom_region_init(&pbcq->xscom_nest_regs, OBJECT(dev), 274 + &pnv_pbcq_nest_xscom_ops, pbcq, name, 275 + PNV_XSCOM_PBCQ_NEST_SIZE); 276 + snprintf(name, sizeof(name), "xscom-pbcq-pci-%d.%d", 277 + phb->chip_id, phb->phb_id); 278 + pnv_xscom_region_init(&pbcq->xscom_pci_regs, OBJECT(dev), 279 + &pnv_pbcq_pci_xscom_ops, pbcq, name, 280 + PNV_XSCOM_PBCQ_PCI_SIZE); 281 + snprintf(name, sizeof(name), "xscom-pbcq-spci-%d.%d", 282 + phb->chip_id, phb->phb_id); 283 + pnv_xscom_region_init(&pbcq->xscom_spci_regs, OBJECT(dev), 284 + &pnv_pbcq_spci_xscom_ops, pbcq, name, 285 + PNV_XSCOM_PBCQ_SPCI_SIZE); 286 + } 287 + 288 + static int pnv_pbcq_dt_xscom(PnvXScomInterface *dev, void *fdt, 289 + int xscom_offset) 290 + { 291 + const char compat[] = "ibm,power8-pbcq"; 292 + PnvPHB3 *phb = PNV_PBCQ(dev)->phb; 293 + char *name; 294 + int offset; 295 + uint32_t lpc_pcba = PNV_XSCOM_PBCQ_NEST_BASE + 0x400 * phb->phb_id; 296 + uint32_t reg[] = { 297 + cpu_to_be32(lpc_pcba), 298 + cpu_to_be32(PNV_XSCOM_PBCQ_NEST_SIZE), 299 + cpu_to_be32(PNV_XSCOM_PBCQ_PCI_BASE + 0x400 * phb->phb_id), 300 + cpu_to_be32(PNV_XSCOM_PBCQ_PCI_SIZE), 301 + cpu_to_be32(PNV_XSCOM_PBCQ_SPCI_BASE + 0x040 * phb->phb_id), 302 + cpu_to_be32(PNV_XSCOM_PBCQ_SPCI_SIZE) 303 + }; 304 + 305 + name = g_strdup_printf("pbcq@%x", lpc_pcba); 306 + offset = fdt_add_subnode(fdt, xscom_offset, name); 307 + _FDT(offset); 308 + g_free(name); 309 + 310 + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); 311 + 312 + _FDT((fdt_setprop_cell(fdt, offset, "ibm,phb-index", phb->phb_id))); 313 + _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", phb->chip_id))); 314 + _FDT((fdt_setprop(fdt, offset, "compatible", compat, 315 + sizeof(compat)))); 316 + return 0; 317 + } 318 + 319 + static void phb3_pbcq_instance_init(Object *obj) 320 + { 321 + PnvPBCQState *pbcq = PNV_PBCQ(obj); 322 + 323 + object_property_add_link(obj, "phb", TYPE_PNV_PHB3, 324 + (Object **)&pbcq->phb, 325 + object_property_allow_set_link, 326 + OBJ_PROP_LINK_STRONG, 327 + &error_abort); 328 + } 329 + 330 + static void pnv_pbcq_class_init(ObjectClass *klass, void *data) 331 + { 332 + DeviceClass *dc = DEVICE_CLASS(klass); 333 + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); 334 + 335 + xdc->dt_xscom = pnv_pbcq_dt_xscom; 336 + 337 + dc->realize = pnv_pbcq_realize; 338 + } 339 + 340 + static const TypeInfo pnv_pbcq_type_info = { 341 + .name = TYPE_PNV_PBCQ, 342 + .parent = TYPE_DEVICE, 343 + .instance_size = sizeof(PnvPBCQState), 344 + .instance_init = phb3_pbcq_instance_init, 345 + .class_init = pnv_pbcq_class_init, 346 + .interfaces = (InterfaceInfo[]) { 347 + { TYPE_PNV_XSCOM_INTERFACE }, 348 + { } 349 + } 350 + }; 351 + 352 + static void pnv_pbcq_register_types(void) 353 + { 354 + type_register_static(&pnv_pbcq_type_info); 355 + } 356 + 357 + type_init(pnv_pbcq_register_types)
+67 -2
hw/ppc/pnv.c
··· 616 616 static void pnv_chip_power8_pic_print_info(PnvChip *chip, Monitor *mon) 617 617 { 618 618 Pnv8Chip *chip8 = PNV8_CHIP(chip); 619 + int i; 619 620 620 621 ics_pic_print_info(&chip8->psi.ics, mon); 622 + for (i = 0; i < chip->num_phbs; i++) { 623 + pnv_phb3_msi_pic_print_info(&chip8->phbs[i].msis, mon); 624 + ics_pic_print_info(&chip8->phbs[i].lsis, mon); 625 + } 621 626 } 622 627 623 628 static void pnv_chip_power9_pic_print_info(PnvChip *chip, Monitor *mon) ··· 1026 1031 1027 1032 static void pnv_chip_power8_instance_init(Object *obj) 1028 1033 { 1034 + PnvChip *chip = PNV_CHIP(obj); 1029 1035 Pnv8Chip *chip8 = PNV8_CHIP(obj); 1036 + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj); 1037 + int i; 1030 1038 1031 1039 object_property_add_link(obj, "xics", TYPE_XICS_FABRIC, 1032 1040 (Object **)&chip8->xics, ··· 1045 1053 1046 1054 object_initialize_child(obj, "homer", &chip8->homer, sizeof(chip8->homer), 1047 1055 TYPE_PNV8_HOMER, &error_abort, NULL); 1056 + 1057 + for (i = 0; i < pcc->num_phbs; i++) { 1058 + object_initialize_child(obj, "phb[*]", &chip8->phbs[i], 1059 + sizeof(chip8->phbs[i]), TYPE_PNV_PHB3, 1060 + &error_abort, NULL); 1061 + } 1062 + 1063 + /* 1064 + * Number of PHBs is the chip default 1065 + */ 1066 + chip->num_phbs = pcc->num_phbs; 1048 1067 } 1049 1068 1050 1069 static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp) ··· 1083 1102 Pnv8Chip *chip8 = PNV8_CHIP(dev); 1084 1103 Pnv8Psi *psi8 = &chip8->psi; 1085 1104 Error *local_err = NULL; 1105 + int i; 1086 1106 1087 1107 assert(chip8->xics); 1088 1108 ··· 1163 1183 /* Homer mmio region */ 1164 1184 memory_region_add_subregion(get_system_memory(), PNV_HOMER_BASE(chip), 1165 1185 &chip8->homer.regs); 1186 + 1187 + /* PHB3 controllers */ 1188 + for (i = 0; i < chip->num_phbs; i++) { 1189 + PnvPHB3 *phb = &chip8->phbs[i]; 1190 + PnvPBCQState *pbcq = &phb->pbcq; 1191 + 1192 + object_property_set_int(OBJECT(phb), i, "index", &error_fatal); 1193 + object_property_set_int(OBJECT(phb), chip->chip_id, "chip-id", 1194 + &error_fatal); 1195 + object_property_set_bool(OBJECT(phb), true, "realized", &local_err); 1196 + if (local_err) { 1197 + error_propagate(errp, local_err); 1198 + return; 1199 + } 1200 + qdev_set_parent_bus(DEVICE(phb), sysbus_get_default()); 1201 + 1202 + /* Populate the XSCOM address space. */ 1203 + pnv_xscom_add_subregion(chip, 1204 + PNV_XSCOM_PBCQ_NEST_BASE + 0x400 * phb->phb_id, 1205 + &pbcq->xscom_nest_regs); 1206 + pnv_xscom_add_subregion(chip, 1207 + PNV_XSCOM_PBCQ_PCI_BASE + 0x400 * phb->phb_id, 1208 + &pbcq->xscom_pci_regs); 1209 + pnv_xscom_add_subregion(chip, 1210 + PNV_XSCOM_PBCQ_SPCI_BASE + 0x040 * phb->phb_id, 1211 + &pbcq->xscom_spci_regs); 1212 + } 1166 1213 } 1167 1214 1168 1215 static uint32_t pnv_chip_power8_xscom_pcba(PnvChip *chip, uint64_t addr) ··· 1178 1225 1179 1226 k->chip_cfam_id = 0x221ef04980000000ull; /* P8 Murano DD2.1 */ 1180 1227 k->cores_mask = POWER8E_CORE_MASK; 1228 + k->num_phbs = 3; 1181 1229 k->core_pir = pnv_chip_core_pir_p8; 1182 1230 k->intc_create = pnv_chip_power8_intc_create; 1183 1231 k->intc_reset = pnv_chip_power8_intc_reset; ··· 1201 1249 1202 1250 k->chip_cfam_id = 0x220ea04980000000ull; /* P8 Venice DD2.0 */ 1203 1251 k->cores_mask = POWER8_CORE_MASK; 1252 + k->num_phbs = 3; 1204 1253 k->core_pir = pnv_chip_core_pir_p8; 1205 1254 k->intc_create = pnv_chip_power8_intc_create; 1206 1255 k->intc_reset = pnv_chip_power8_intc_reset; ··· 1224 1273 1225 1274 k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */ 1226 1275 k->cores_mask = POWER8_CORE_MASK; 1276 + k->num_phbs = 3; 1227 1277 k->core_pir = pnv_chip_core_pir_p8; 1228 1278 k->intc_create = pnv_chip_power8_intc_create; 1229 1279 k->intc_reset = pnv_chip_power8_intc_reset; ··· 1748 1798 static ICSState *pnv_ics_get(XICSFabric *xi, int irq) 1749 1799 { 1750 1800 PnvMachineState *pnv = PNV_MACHINE(xi); 1751 - int i; 1801 + int i, j; 1752 1802 1753 1803 for (i = 0; i < pnv->num_chips; i++) { 1804 + PnvChip *chip = pnv->chips[i]; 1754 1805 Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); 1755 1806 1756 1807 if (ics_valid_irq(&chip8->psi.ics, irq)) { 1757 1808 return &chip8->psi.ics; 1758 1809 } 1810 + for (j = 0; j < chip->num_phbs; j++) { 1811 + if (ics_valid_irq(&chip8->phbs[j].lsis, irq)) { 1812 + return &chip8->phbs[j].lsis; 1813 + } 1814 + if (ics_valid_irq(ICS(&chip8->phbs[j].msis), irq)) { 1815 + return ICS(&chip8->phbs[j].msis); 1816 + } 1817 + } 1759 1818 } 1760 1819 return NULL; 1761 1820 } ··· 1763 1822 static void pnv_ics_resend(XICSFabric *xi) 1764 1823 { 1765 1824 PnvMachineState *pnv = PNV_MACHINE(xi); 1766 - int i; 1825 + int i, j; 1767 1826 1768 1827 for (i = 0; i < pnv->num_chips; i++) { 1828 + PnvChip *chip = pnv->chips[i]; 1769 1829 Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); 1830 + 1770 1831 ics_resend(&chip8->psi.ics); 1832 + for (j = 0; j < chip->num_phbs; j++) { 1833 + ics_resend(&chip8->phbs[j].lsis); 1834 + ics_resend(ICS(&chip8->phbs[j].msis)); 1835 + } 1771 1836 } 1772 1837 } 1773 1838
+164
include/hw/pci-host/pnv_phb3.h
··· 1 + /* 2 + * QEMU PowerPC PowerNV (POWER8) PHB3 model 3 + * 4 + * Copyright (c) 2014-2020, IBM Corporation. 5 + * 6 + * This code is licensed under the GPL version 2 or later. See the 7 + * COPYING file in the top-level directory. 8 + */ 9 + 10 + #ifndef PCI_HOST_PNV_PHB3_H 11 + #define PCI_HOST_PNV_PHB3_H 12 + 13 + #include "hw/pci/pcie_host.h" 14 + #include "hw/pci/pcie_port.h" 15 + #include "hw/ppc/xics.h" 16 + 17 + typedef struct PnvPHB3 PnvPHB3; 18 + 19 + /* 20 + * PHB3 XICS Source for MSIs 21 + */ 22 + #define TYPE_PHB3_MSI "phb3-msi" 23 + #define PHB3_MSI(obj) OBJECT_CHECK(Phb3MsiState, (obj), TYPE_PHB3_MSI) 24 + 25 + #define PHB3_MAX_MSI 2048 26 + 27 + typedef struct Phb3MsiState { 28 + ICSState ics; 29 + qemu_irq *qirqs; 30 + 31 + PnvPHB3 *phb; 32 + uint64_t rba[PHB3_MAX_MSI / 64]; 33 + uint32_t rba_sum; 34 + } Phb3MsiState; 35 + 36 + void pnv_phb3_msi_update_config(Phb3MsiState *msis, uint32_t base, 37 + uint32_t count); 38 + void pnv_phb3_msi_send(Phb3MsiState *msis, uint64_t addr, uint16_t data, 39 + int32_t dev_pe); 40 + void pnv_phb3_msi_ffi(Phb3MsiState *msis, uint64_t val); 41 + void pnv_phb3_msi_pic_print_info(Phb3MsiState *msis, Monitor *mon); 42 + 43 + 44 + /* 45 + * We have one such address space wrapper per possible device under 46 + * the PHB since they need to be assigned statically at qemu device 47 + * creation time. The relationship to a PE is done later dynamically. 48 + * This means we can potentially create a lot of these guys. Q35 49 + * stores them as some kind of radix tree but we never really need to 50 + * do fast lookups so instead we simply keep a QLIST of them for now, 51 + * we can add the radix if needed later on. 52 + * 53 + * We do cache the PE number to speed things up a bit though. 54 + */ 55 + typedef struct PnvPhb3DMASpace { 56 + PCIBus *bus; 57 + uint8_t devfn; 58 + int pe_num; /* Cached PE number */ 59 + #define PHB_INVALID_PE (-1) 60 + PnvPHB3 *phb; 61 + AddressSpace dma_as; 62 + IOMMUMemoryRegion dma_mr; 63 + MemoryRegion msi32_mr; 64 + MemoryRegion msi64_mr; 65 + QLIST_ENTRY(PnvPhb3DMASpace) list; 66 + } PnvPhb3DMASpace; 67 + 68 + /* 69 + * PHB3 Power Bus Common Queue 70 + */ 71 + #define TYPE_PNV_PBCQ "pnv-pbcq" 72 + #define PNV_PBCQ(obj) OBJECT_CHECK(PnvPBCQState, (obj), TYPE_PNV_PBCQ) 73 + 74 + typedef struct PnvPBCQState { 75 + DeviceState parent; 76 + 77 + uint32_t nest_xbase; 78 + uint32_t spci_xbase; 79 + uint32_t pci_xbase; 80 + #define PBCQ_NEST_REGS_COUNT 0x46 81 + #define PBCQ_PCI_REGS_COUNT 0x15 82 + #define PBCQ_SPCI_REGS_COUNT 0x5 83 + 84 + uint64_t nest_regs[PBCQ_NEST_REGS_COUNT]; 85 + uint64_t spci_regs[PBCQ_SPCI_REGS_COUNT]; 86 + uint64_t pci_regs[PBCQ_PCI_REGS_COUNT]; 87 + MemoryRegion mmbar0; 88 + MemoryRegion mmbar1; 89 + MemoryRegion phbbar; 90 + uint64_t mmio0_base; 91 + uint64_t mmio0_size; 92 + uint64_t mmio1_base; 93 + uint64_t mmio1_size; 94 + PnvPHB3 *phb; 95 + 96 + MemoryRegion xscom_nest_regs; 97 + MemoryRegion xscom_pci_regs; 98 + MemoryRegion xscom_spci_regs; 99 + } PnvPBCQState; 100 + 101 + /* 102 + * PHB3 PCIe Root port 103 + */ 104 + #define TYPE_PNV_PHB3_ROOT_BUS "pnv-phb3-root-bus" 105 + 106 + #define TYPE_PNV_PHB3_ROOT_PORT "pnv-phb3-root-port" 107 + 108 + typedef struct PnvPHB3RootPort { 109 + PCIESlot parent_obj; 110 + } PnvPHB3RootPort; 111 + 112 + /* 113 + * PHB3 PCIe Host Bridge for PowerNV machines (POWER8) 114 + */ 115 + #define TYPE_PNV_PHB3 "pnv-phb3" 116 + #define PNV_PHB3(obj) OBJECT_CHECK(PnvPHB3, (obj), TYPE_PNV_PHB3) 117 + 118 + #define PNV_PHB3_NUM_M64 16 119 + #define PNV_PHB3_NUM_REGS (0x1000 >> 3) 120 + #define PNV_PHB3_NUM_LSI 8 121 + #define PNV_PHB3_NUM_PE 256 122 + 123 + #define PCI_MMIO_TOTAL_SIZE (0x1ull << 60) 124 + 125 + struct PnvPHB3 { 126 + PCIExpressHost parent_obj; 127 + 128 + uint32_t chip_id; 129 + uint32_t phb_id; 130 + char bus_path[8]; 131 + 132 + uint64_t regs[PNV_PHB3_NUM_REGS]; 133 + MemoryRegion mr_regs; 134 + 135 + MemoryRegion mr_m32; 136 + MemoryRegion mr_m64[PNV_PHB3_NUM_M64]; 137 + MemoryRegion pci_mmio; 138 + MemoryRegion pci_io; 139 + 140 + uint64_t ioda_LIST[8]; 141 + uint64_t ioda_LXIVT[8]; 142 + uint64_t ioda_TVT[512]; 143 + uint64_t ioda_M64BT[16]; 144 + uint64_t ioda_MDT[256]; 145 + uint64_t ioda_PEEV[4]; 146 + 147 + uint32_t total_irq; 148 + ICSState lsis; 149 + qemu_irq *qirqs; 150 + Phb3MsiState msis; 151 + 152 + PnvPBCQState pbcq; 153 + 154 + PnvPHB3RootPort root; 155 + 156 + QLIST_HEAD(, PnvPhb3DMASpace) dma_spaces; 157 + }; 158 + 159 + uint64_t pnv_phb3_reg_read(void *opaque, hwaddr off, unsigned size); 160 + void pnv_phb3_reg_write(void *opaque, hwaddr off, uint64_t val, unsigned size); 161 + void pnv_phb3_update_regions(PnvPHB3 *phb); 162 + void pnv_phb3_remap_irqs(PnvPHB3 *phb); 163 + 164 + #endif /* PCI_HOST_PNV_PHB3_H */
+450
include/hw/pci-host/pnv_phb3_regs.h
··· 1 + /* 2 + * QEMU PowerPC PowerNV (POWER8) PHB3 model 3 + * 4 + * Copyright (c) 2013-2020, IBM Corporation. 5 + * 6 + * This code is licensed under the GPL version 2 or later. See the 7 + * COPYING file in the top-level directory. 8 + */ 9 + 10 + #ifndef PCI_HOST_PNV_PHB3_REGS_H 11 + #define PCI_HOST_PNV_PHB3_REGS_H 12 + 13 + #include "qemu/host-utils.h" 14 + 15 + /* 16 + * QEMU version of the GETFIELD/SETFIELD macros 17 + * 18 + * These are common with the PnvXive model. 19 + */ 20 + static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) 21 + { 22 + return (word & mask) >> ctz64(mask); 23 + } 24 + 25 + static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, 26 + uint64_t value) 27 + { 28 + return (word & ~mask) | ((value << ctz64(mask)) & mask); 29 + } 30 + 31 + /* 32 + * PBCQ XSCOM registers 33 + */ 34 + 35 + #define PBCQ_NEST_IRSN_COMPARE 0x1a 36 + #define PBCQ_NEST_IRSN_COMP PPC_BITMASK(0, 18) 37 + #define PBCQ_NEST_IRSN_MASK 0x1b 38 + #define PBCQ_NEST_LSI_SRC_ID 0x1f 39 + #define PBCQ_NEST_LSI_SRC PPC_BITMASK(0, 7) 40 + #define PBCQ_NEST_REGS_COUNT 0x46 41 + #define PBCQ_NEST_MMIO_BAR0 0x40 42 + #define PBCQ_NEST_MMIO_BAR1 0x41 43 + #define PBCQ_NEST_PHB_BAR 0x42 44 + #define PBCQ_NEST_MMIO_MASK0 0x43 45 + #define PBCQ_NEST_MMIO_MASK1 0x44 46 + #define PBCQ_NEST_BAR_EN 0x45 47 + #define PBCQ_NEST_BAR_EN_MMIO0 PPC_BIT(0) 48 + #define PBCQ_NEST_BAR_EN_MMIO1 PPC_BIT(1) 49 + #define PBCQ_NEST_BAR_EN_PHB PPC_BIT(2) 50 + #define PBCQ_NEST_BAR_EN_IRSN_RX PPC_BIT(3) 51 + #define PBCQ_NEST_BAR_EN_IRSN_TX PPC_BIT(4) 52 + 53 + #define PBCQ_PCI_REGS_COUNT 0x15 54 + #define PBCQ_PCI_BAR2 0x0b 55 + 56 + #define PBCQ_SPCI_REGS_COUNT 0x5 57 + #define PBCQ_SPCI_ASB_ADDR 0x0 58 + #define PBCQ_SPCI_ASB_STATUS 0x1 59 + #define PBCQ_SPCI_ASB_DATA 0x2 60 + #define PBCQ_SPCI_AIB_CAPP_EN 0x3 61 + #define PBCQ_SPCI_CAPP_SEC_TMR 0x4 62 + 63 + /* 64 + * PHB MMIO registers 65 + */ 66 + 67 + /* PHB Fundamental register set A */ 68 + #define PHB_LSI_SOURCE_ID 0x100 69 + #define PHB_LSI_SRC_ID PPC_BITMASK(5, 12) 70 + #define PHB_DMA_CHAN_STATUS 0x110 71 + #define PHB_DMA_CHAN_ANY_ERR PPC_BIT(27) 72 + #define PHB_DMA_CHAN_ANY_ERR1 PPC_BIT(28) 73 + #define PHB_DMA_CHAN_ANY_FREEZE PPC_BIT(29) 74 + #define PHB_CPU_LOADSTORE_STATUS 0x120 75 + #define PHB_CPU_LS_ANY_ERR PPC_BIT(27) 76 + #define PHB_CPU_LS_ANY_ERR1 PPC_BIT(28) 77 + #define PHB_CPU_LS_ANY_FREEZE PPC_BIT(29) 78 + #define PHB_DMA_MSI_NODE_ID 0x128 79 + #define PHB_DMAMSI_NID_FIXED PPC_BIT(0) 80 + #define PHB_DMAMSI_NID PPC_BITMASK(24, 31) 81 + #define PHB_CONFIG_DATA 0x130 82 + #define PHB_LOCK0 0x138 83 + #define PHB_CONFIG_ADDRESS 0x140 84 + #define PHB_CA_ENABLE PPC_BIT(0) 85 + #define PHB_CA_BUS PPC_BITMASK(4, 11) 86 + #define PHB_CA_DEV PPC_BITMASK(12, 16) 87 + #define PHB_CA_FUNC PPC_BITMASK(17, 19) 88 + #define PHB_CA_REG PPC_BITMASK(20, 31) 89 + #define PHB_CA_PE PPC_BITMASK(40, 47) 90 + #define PHB_LOCK1 0x148 91 + #define PHB_IVT_BAR 0x150 92 + #define PHB_IVT_BAR_ENABLE PPC_BIT(0) 93 + #define PHB_IVT_BASE_ADDRESS_MASK PPC_BITMASK(14, 48) 94 + #define PHB_IVT_LENGTH_MASK PPC_BITMASK(52, 63) 95 + #define PHB_RBA_BAR 0x158 96 + #define PHB_RBA_BAR_ENABLE PPC_BIT(0) 97 + #define PHB_RBA_BASE_ADDRESS PPC_BITMASK(14, 55) 98 + #define PHB_PHB3_CONFIG 0x160 99 + #define PHB_PHB3C_64B_TCE_EN PPC_BIT(2) 100 + #define PHB_PHB3C_32BIT_MSI_EN PPC_BIT(8) 101 + #define PHB_PHB3C_64BIT_MSI_EN PPC_BIT(14) 102 + #define PHB_PHB3C_M32_EN PPC_BIT(16) 103 + #define PHB_RTT_BAR 0x168 104 + #define PHB_RTT_BAR_ENABLE PPC_BIT(0) 105 + #define PHB_RTT_BASE_ADDRESS_MASK PPC_BITMASK(14, 46) 106 + #define PHB_PELTV_BAR 0x188 107 + #define PHB_PELTV_BAR_ENABLE PPC_BIT(0) 108 + #define PHB_PELTV_BASE_ADDRESS PPC_BITMASK(14, 50) 109 + #define PHB_M32_BASE_ADDR 0x190 110 + #define PHB_M32_BASE_MASK 0x198 111 + #define PHB_M32_START_ADDR 0x1a0 112 + #define PHB_PEST_BAR 0x1a8 113 + #define PHB_PEST_BAR_ENABLE PPC_BIT(0) 114 + #define PHB_PEST_BASE_ADDRESS PPC_BITMASK(14, 51) 115 + #define PHB_M64_UPPER_BITS 0x1f0 116 + #define PHB_INTREP_TIMER 0x1f8 117 + #define PHB_DMARD_SYNC 0x200 118 + #define PHB_DMARD_SYNC_START PPC_BIT(0) 119 + #define PHB_DMARD_SYNC_COMPLETE PPC_BIT(1) 120 + #define PHB_RTC_INVALIDATE 0x208 121 + #define PHB_RTC_INVALIDATE_ALL PPC_BIT(0) 122 + #define PHB_RTC_INVALIDATE_RID PPC_BITMASK(16, 31) 123 + #define PHB_TCE_KILL 0x210 124 + #define PHB_TCE_KILL_ALL PPC_BIT(0) 125 + #define PHB_TCE_SPEC_CTL 0x218 126 + #define PHB_IODA_ADDR 0x220 127 + #define PHB_IODA_AD_AUTOINC PPC_BIT(0) 128 + #define PHB_IODA_AD_TSEL PPC_BITMASK(11, 15) 129 + #define PHB_IODA_AD_TADR PPC_BITMASK(55, 63) 130 + #define PHB_IODA_DATA0 0x228 131 + #define PHB_FFI_REQUEST 0x238 132 + #define PHB_FFI_LOCK_CLEAR PPC_BIT(3) 133 + #define PHB_FFI_REQUEST_ISN PPC_BITMASK(49, 59) 134 + #define PHB_FFI_LOCK 0x240 135 + #define PHB_FFI_LOCK_STATE PPC_BIT(0) 136 + #define PHB_XIVE_UPDATE 0x248 /* Broken in DD1 */ 137 + #define PHB_PHB3_GEN_CAP 0x250 138 + #define PHB_PHB3_TCE_CAP 0x258 139 + #define PHB_PHB3_IRQ_CAP 0x260 140 + #define PHB_PHB3_EEH_CAP 0x268 141 + #define PHB_IVC_INVALIDATE 0x2a0 142 + #define PHB_IVC_INVALIDATE_ALL PPC_BIT(0) 143 + #define PHB_IVC_INVALIDATE_SID PPC_BITMASK(16, 31) 144 + #define PHB_IVC_UPDATE 0x2a8 145 + #define PHB_IVC_UPDATE_ENABLE_P PPC_BIT(0) 146 + #define PHB_IVC_UPDATE_ENABLE_Q PPC_BIT(1) 147 + #define PHB_IVC_UPDATE_ENABLE_SERVER PPC_BIT(2) 148 + #define PHB_IVC_UPDATE_ENABLE_PRI PPC_BIT(3) 149 + #define PHB_IVC_UPDATE_ENABLE_GEN PPC_BIT(4) 150 + #define PHB_IVC_UPDATE_ENABLE_CON PPC_BIT(5) 151 + #define PHB_IVC_UPDATE_GEN_MATCH PPC_BITMASK(6, 7) 152 + #define PHB_IVC_UPDATE_SERVER PPC_BITMASK(8, 23) 153 + #define PHB_IVC_UPDATE_PRI PPC_BITMASK(24, 31) 154 + #define PHB_IVC_UPDATE_GEN PPC_BITMASK(32, 33) 155 + #define PHB_IVC_UPDATE_P PPC_BITMASK(34, 34) 156 + #define PHB_IVC_UPDATE_Q PPC_BITMASK(35, 35) 157 + #define PHB_IVC_UPDATE_SID PPC_BITMASK(48, 63) 158 + #define PHB_PAPR_ERR_INJ_CTL 0x2b0 159 + #define PHB_PAPR_ERR_INJ_CTL_INB PPC_BIT(0) 160 + #define PHB_PAPR_ERR_INJ_CTL_OUTB PPC_BIT(1) 161 + #define PHB_PAPR_ERR_INJ_CTL_STICKY PPC_BIT(2) 162 + #define PHB_PAPR_ERR_INJ_CTL_CFG PPC_BIT(3) 163 + #define PHB_PAPR_ERR_INJ_CTL_RD PPC_BIT(4) 164 + #define PHB_PAPR_ERR_INJ_CTL_WR PPC_BIT(5) 165 + #define PHB_PAPR_ERR_INJ_CTL_FREEZE PPC_BIT(6) 166 + #define PHB_PAPR_ERR_INJ_ADDR 0x2b8 167 + #define PHB_PAPR_ERR_INJ_ADDR_MMIO PPC_BITMASK(16, 63) 168 + #define PHB_PAPR_ERR_INJ_MASK 0x2c0 169 + #define PHB_PAPR_ERR_INJ_MASK_CFG PPC_BITMASK(4, 11) 170 + #define PHB_PAPR_ERR_INJ_MASK_MMIO PPC_BITMASK(16, 63) 171 + #define PHB_ETU_ERR_SUMMARY 0x2c8 172 + 173 + /* UTL registers */ 174 + #define UTL_SYS_BUS_CONTROL 0x400 175 + #define UTL_STATUS 0x408 176 + #define UTL_SYS_BUS_AGENT_STATUS 0x410 177 + #define UTL_SYS_BUS_AGENT_ERR_SEVERITY 0x418 178 + #define UTL_SYS_BUS_AGENT_IRQ_EN 0x420 179 + #define UTL_SYS_BUS_BURST_SZ_CONF 0x440 180 + #define UTL_REVISION_ID 0x448 181 + #define UTL_BCLK_DOMAIN_DBG1 0x460 182 + #define UTL_BCLK_DOMAIN_DBG2 0x468 183 + #define UTL_BCLK_DOMAIN_DBG3 0x470 184 + #define UTL_BCLK_DOMAIN_DBG4 0x478 185 + #define UTL_BCLK_DOMAIN_DBG5 0x480 186 + #define UTL_BCLK_DOMAIN_DBG6 0x488 187 + #define UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0 188 + #define UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0 189 + #define UTL_IN_POST_HDR_BUF_ALLOC 0x4e0 190 + #define UTL_IN_POST_DAT_BUF_ALLOC 0x4f0 191 + #define UTL_OUT_NP_BUF_ALLOC 0x500 192 + #define UTL_IN_NP_BUF_ALLOC 0x510 193 + #define UTL_PCIE_TAGS_ALLOC 0x520 194 + #define UTL_GBIF_READ_TAGS_ALLOC 0x530 195 + #define UTL_PCIE_PORT_CONTROL 0x540 196 + #define UTL_PCIE_PORT_STATUS 0x548 197 + #define UTL_PCIE_PORT_ERROR_SEV 0x550 198 + #define UTL_PCIE_PORT_IRQ_EN 0x558 199 + #define UTL_RC_STATUS 0x560 200 + #define UTL_RC_ERR_SEVERITY 0x568 201 + #define UTL_RC_IRQ_EN 0x570 202 + #define UTL_EP_STATUS 0x578 203 + #define UTL_EP_ERR_SEVERITY 0x580 204 + #define UTL_EP_ERR_IRQ_EN 0x588 205 + #define UTL_PCI_PM_CTRL1 0x590 206 + #define UTL_PCI_PM_CTRL2 0x598 207 + #define UTL_GP_CTL1 0x5a0 208 + #define UTL_GP_CTL2 0x5a8 209 + #define UTL_PCLK_DOMAIN_DBG1 0x5b0 210 + #define UTL_PCLK_DOMAIN_DBG2 0x5b8 211 + #define UTL_PCLK_DOMAIN_DBG3 0x5c0 212 + #define UTL_PCLK_DOMAIN_DBG4 0x5c8 213 + 214 + /* PCI-E Stack registers */ 215 + #define PHB_PCIE_SYSTEM_CONFIG 0x600 216 + #define PHB_PCIE_BUS_NUMBER 0x608 217 + #define PHB_PCIE_SYSTEM_TEST 0x618 218 + #define PHB_PCIE_LINK_MANAGEMENT 0x630 219 + #define PHB_PCIE_LM_LINK_ACTIVE PPC_BIT(8) 220 + #define PHB_PCIE_DLP_TRAIN_CTL 0x640 221 + #define PHB_PCIE_DLP_TCTX_DISABLE PPC_BIT(1) 222 + #define PHB_PCIE_DLP_TCRX_DISABLED PPC_BIT(16) 223 + #define PHB_PCIE_DLP_INBAND_PRESENCE PPC_BIT(19) 224 + #define PHB_PCIE_DLP_TC_DL_LINKUP PPC_BIT(21) 225 + #define PHB_PCIE_DLP_TC_DL_PGRESET PPC_BIT(22) 226 + #define PHB_PCIE_DLP_TC_DL_LINKACT PPC_BIT(23) 227 + #define PHB_PCIE_SLOP_LOOPBACK_STATUS 0x648 228 + #define PHB_PCIE_SYS_LINK_INIT 0x668 229 + #define PHB_PCIE_UTL_CONFIG 0x670 230 + #define PHB_PCIE_DLP_CONTROL 0x678 231 + #define PHB_PCIE_UTL_ERRLOG1 0x680 232 + #define PHB_PCIE_UTL_ERRLOG2 0x688 233 + #define PHB_PCIE_UTL_ERRLOG3 0x690 234 + #define PHB_PCIE_UTL_ERRLOG4 0x698 235 + #define PHB_PCIE_DLP_ERRLOG1 0x6a0 236 + #define PHB_PCIE_DLP_ERRLOG2 0x6a8 237 + #define PHB_PCIE_DLP_ERR_STATUS 0x6b0 238 + #define PHB_PCIE_DLP_ERR_COUNTERS 0x6b8 239 + #define PHB_PCIE_UTL_ERR_INJECT 0x6c0 240 + #define PHB_PCIE_TLDLP_ERR_INJECT 0x6c8 241 + #define PHB_PCIE_LANE_EQ_CNTL0 0x6d0 242 + #define PHB_PCIE_LANE_EQ_CNTL1 0x6d8 243 + #define PHB_PCIE_LANE_EQ_CNTL2 0x6e0 244 + #define PHB_PCIE_LANE_EQ_CNTL3 0x6e8 245 + #define PHB_PCIE_STRAPPING 0x700 246 + 247 + /* Fundamental register set B */ 248 + #define PHB_VERSION 0x800 249 + #define PHB_RESET 0x808 250 + #define PHB_CONTROL 0x810 251 + #define PHB_CTRL_IVE_128_BYTES PPC_BIT(24) 252 + #define PHB_AIB_RX_CRED_INIT_TIMER 0x818 253 + #define PHB_AIB_RX_CMD_CRED 0x820 254 + #define PHB_AIB_RX_DATA_CRED 0x828 255 + #define PHB_AIB_TX_CMD_CRED 0x830 256 + #define PHB_AIB_TX_DATA_CRED 0x838 257 + #define PHB_AIB_TX_CHAN_MAPPING 0x840 258 + #define PHB_AIB_TAG_ENABLE 0x858 259 + #define PHB_AIB_FENCE_CTRL 0x860 260 + #define PHB_TCE_TAG_ENABLE 0x868 261 + #define PHB_TCE_WATERMARK 0x870 262 + #define PHB_TIMEOUT_CTRL1 0x878 263 + #define PHB_TIMEOUT_CTRL2 0x880 264 + #define PHB_Q_DMA_R 0x888 265 + #define PHB_Q_DMA_R_QUIESCE_DMA PPC_BIT(0) 266 + #define PHB_Q_DMA_R_AUTORESET PPC_BIT(1) 267 + #define PHB_Q_DMA_R_DMA_RESP_STATUS PPC_BIT(4) 268 + #define PHB_Q_DMA_R_MMIO_RESP_STATUS PPC_BIT(5) 269 + #define PHB_Q_DMA_R_TCE_RESP_STATUS PPC_BIT(6) 270 + #define PHB_AIB_TAG_STATUS 0x900 271 + #define PHB_TCE_TAG_STATUS 0x908 272 + 273 + /* FIR & Error registers */ 274 + #define PHB_LEM_FIR_ACCUM 0xc00 275 + #define PHB_LEM_FIR_AND_MASK 0xc08 276 + #define PHB_LEM_FIR_OR_MASK 0xc10 277 + #define PHB_LEM_ERROR_MASK 0xc18 278 + #define PHB_LEM_ERROR_AND_MASK 0xc20 279 + #define PHB_LEM_ERROR_OR_MASK 0xc28 280 + #define PHB_LEM_ACTION0 0xc30 281 + #define PHB_LEM_ACTION1 0xc38 282 + #define PHB_LEM_WOF 0xc40 283 + #define PHB_ERR_STATUS 0xc80 284 + #define PHB_ERR1_STATUS 0xc88 285 + #define PHB_ERR_INJECT 0xc90 286 + #define PHB_ERR_LEM_ENABLE 0xc98 287 + #define PHB_ERR_IRQ_ENABLE 0xca0 288 + #define PHB_ERR_FREEZE_ENABLE 0xca8 289 + #define PHB_ERR_AIB_FENCE_ENABLE 0xcb0 290 + #define PHB_ERR_LOG_0 0xcc0 291 + #define PHB_ERR_LOG_1 0xcc8 292 + #define PHB_ERR_STATUS_MASK 0xcd0 293 + #define PHB_ERR1_STATUS_MASK 0xcd8 294 + 295 + #define PHB_OUT_ERR_STATUS 0xd00 296 + #define PHB_OUT_ERR1_STATUS 0xd08 297 + #define PHB_OUT_ERR_INJECT 0xd10 298 + #define PHB_OUT_ERR_LEM_ENABLE 0xd18 299 + #define PHB_OUT_ERR_IRQ_ENABLE 0xd20 300 + #define PHB_OUT_ERR_FREEZE_ENABLE 0xd28 301 + #define PHB_OUT_ERR_AIB_FENCE_ENABLE 0xd30 302 + #define PHB_OUT_ERR_LOG_0 0xd40 303 + #define PHB_OUT_ERR_LOG_1 0xd48 304 + #define PHB_OUT_ERR_STATUS_MASK 0xd50 305 + #define PHB_OUT_ERR1_STATUS_MASK 0xd58 306 + 307 + #define PHB_INA_ERR_STATUS 0xd80 308 + #define PHB_INA_ERR1_STATUS 0xd88 309 + #define PHB_INA_ERR_INJECT 0xd90 310 + #define PHB_INA_ERR_LEM_ENABLE 0xd98 311 + #define PHB_INA_ERR_IRQ_ENABLE 0xda0 312 + #define PHB_INA_ERR_FREEZE_ENABLE 0xda8 313 + #define PHB_INA_ERR_AIB_FENCE_ENABLE 0xdb0 314 + #define PHB_INA_ERR_LOG_0 0xdc0 315 + #define PHB_INA_ERR_LOG_1 0xdc8 316 + #define PHB_INA_ERR_STATUS_MASK 0xdd0 317 + #define PHB_INA_ERR1_STATUS_MASK 0xdd8 318 + 319 + #define PHB_INB_ERR_STATUS 0xe00 320 + #define PHB_INB_ERR1_STATUS 0xe08 321 + #define PHB_INB_ERR_INJECT 0xe10 322 + #define PHB_INB_ERR_LEM_ENABLE 0xe18 323 + #define PHB_INB_ERR_IRQ_ENABLE 0xe20 324 + #define PHB_INB_ERR_FREEZE_ENABLE 0xe28 325 + #define PHB_INB_ERR_AIB_FENCE_ENABLE 0xe30 326 + #define PHB_INB_ERR_LOG_0 0xe40 327 + #define PHB_INB_ERR_LOG_1 0xe48 328 + #define PHB_INB_ERR_STATUS_MASK 0xe50 329 + #define PHB_INB_ERR1_STATUS_MASK 0xe58 330 + 331 + /* Performance monitor & Debug registers */ 332 + #define PHB_TRACE_CONTROL 0xf80 333 + #define PHB_PERFMON_CONFIG 0xf88 334 + #define PHB_PERFMON_CTR0 0xf90 335 + #define PHB_PERFMON_CTR1 0xf98 336 + #define PHB_PERFMON_CTR2 0xfa0 337 + #define PHB_PERFMON_CTR3 0xfa8 338 + #define PHB_HOTPLUG_OVERRIDE 0xfb0 339 + #define PHB_HPOVR_FORCE_RESAMPLE PPC_BIT(9) 340 + #define PHB_HPOVR_PRESENCE_A PPC_BIT(10) 341 + #define PHB_HPOVR_PRESENCE_B PPC_BIT(11) 342 + #define PHB_HPOVR_LINK_ACTIVE PPC_BIT(12) 343 + #define PHB_HPOVR_LINK_BIFURCATED PPC_BIT(13) 344 + #define PHB_HPOVR_LINK_LANE_SWAPPED PPC_BIT(14) 345 + 346 + /* 347 + * IODA2 on-chip tables 348 + */ 349 + 350 + #define IODA2_TBL_LIST 1 351 + #define IODA2_TBL_LXIVT 2 352 + #define IODA2_TBL_IVC_CAM 3 353 + #define IODA2_TBL_RBA 4 354 + #define IODA2_TBL_RCAM 5 355 + #define IODA2_TBL_MRT 6 356 + #define IODA2_TBL_PESTA 7 357 + #define IODA2_TBL_PESTB 8 358 + #define IODA2_TBL_TVT 9 359 + #define IODA2_TBL_TCAM 10 360 + #define IODA2_TBL_TDR 11 361 + #define IODA2_TBL_M64BT 16 362 + #define IODA2_TBL_M32DT 17 363 + #define IODA2_TBL_PEEV 20 364 + 365 + /* LXIVT */ 366 + #define IODA2_LXIVT_SERVER PPC_BITMASK(8, 23) 367 + #define IODA2_LXIVT_PRIORITY PPC_BITMASK(24, 31) 368 + #define IODA2_LXIVT_NODE_ID PPC_BITMASK(56, 63) 369 + 370 + /* IVT */ 371 + #define IODA2_IVT_SERVER PPC_BITMASK(0, 23) 372 + #define IODA2_IVT_PRIORITY PPC_BITMASK(24, 31) 373 + #define IODA2_IVT_GEN PPC_BITMASK(37, 38) 374 + #define IODA2_IVT_P PPC_BITMASK(39, 39) 375 + #define IODA2_IVT_Q PPC_BITMASK(47, 47) 376 + #define IODA2_IVT_PE PPC_BITMASK(48, 63) 377 + 378 + /* TVT */ 379 + #define IODA2_TVT_TABLE_ADDR PPC_BITMASK(0, 47) 380 + #define IODA2_TVT_NUM_LEVELS PPC_BITMASK(48, 50) 381 + #define IODA2_TVE_1_LEVEL 0 382 + #define IODA2_TVE_2_LEVELS 1 383 + #define IODA2_TVE_3_LEVELS 2 384 + #define IODA2_TVE_4_LEVELS 3 385 + #define IODA2_TVE_5_LEVELS 4 386 + #define IODA2_TVT_TCE_TABLE_SIZE PPC_BITMASK(51, 55) 387 + #define IODA2_TVT_IO_PSIZE PPC_BITMASK(59, 63) 388 + 389 + /* PESTA */ 390 + #define IODA2_PESTA_MMIO_FROZEN PPC_BIT(0) 391 + 392 + /* PESTB */ 393 + #define IODA2_PESTB_DMA_STOPPED PPC_BIT(0) 394 + 395 + /* M32DT */ 396 + #define IODA2_M32DT_PE PPC_BITMASK(8, 15) 397 + 398 + /* M64BT */ 399 + #define IODA2_M64BT_ENABLE PPC_BIT(0) 400 + #define IODA2_M64BT_SINGLE_PE PPC_BIT(1) 401 + #define IODA2_M64BT_BASE PPC_BITMASK(2, 31) 402 + #define IODA2_M64BT_MASK PPC_BITMASK(34, 63) 403 + #define IODA2_M64BT_SINGLE_BASE PPC_BITMASK(2, 26) 404 + #define IODA2_M64BT_PE_HI PPC_BITMASK(27, 31) 405 + #define IODA2_M64BT_SINGLE_MASK PPC_BITMASK(34, 58) 406 + #define IODA2_M64BT_PE_LOW PPC_BITMASK(59, 63) 407 + 408 + /* 409 + * IODA2 in-memory tables 410 + */ 411 + 412 + /* 413 + * PEST 414 + * 415 + * 2x8 bytes entries, PEST0 and PEST1 416 + */ 417 + 418 + #define IODA2_PEST0_MMIO_CAUSE PPC_BIT(2) 419 + #define IODA2_PEST0_CFG_READ PPC_BIT(3) 420 + #define IODA2_PEST0_CFG_WRITE PPC_BIT(4) 421 + #define IODA2_PEST0_TTYPE PPC_BITMASK(5, 7) 422 + #define PEST_TTYPE_DMA_WRITE 0 423 + #define PEST_TTYPE_MSI 1 424 + #define PEST_TTYPE_DMA_READ 2 425 + #define PEST_TTYPE_DMA_READ_RESP 3 426 + #define PEST_TTYPE_MMIO_LOAD 4 427 + #define PEST_TTYPE_MMIO_STORE 5 428 + #define PEST_TTYPE_OTHER 7 429 + #define IODA2_PEST0_CA_RETURN PPC_BIT(8) 430 + #define IODA2_PEST0_UTL_RTOS_TIMEOUT PPC_BIT(8) /* Same bit as CA return */ 431 + #define IODA2_PEST0_UR_RETURN PPC_BIT(9) 432 + #define IODA2_PEST0_UTL_NONFATAL PPC_BIT(10) 433 + #define IODA2_PEST0_UTL_FATAL PPC_BIT(11) 434 + #define IODA2_PEST0_PARITY_UE PPC_BIT(13) 435 + #define IODA2_PEST0_UTL_CORRECTABLE PPC_BIT(14) 436 + #define IODA2_PEST0_UTL_INTERRUPT PPC_BIT(15) 437 + #define IODA2_PEST0_MMIO_XLATE PPC_BIT(16) 438 + #define IODA2_PEST0_IODA2_ERROR PPC_BIT(16) /* Same bit as MMIO xlate */ 439 + #define IODA2_PEST0_TCE_PAGE_FAULT PPC_BIT(18) 440 + #define IODA2_PEST0_TCE_ACCESS_FAULT PPC_BIT(19) 441 + #define IODA2_PEST0_DMA_RESP_TIMEOUT PPC_BIT(20) 442 + #define IODA2_PEST0_AIB_SIZE_INVALID PPC_BIT(21) 443 + #define IODA2_PEST0_LEM_BIT PPC_BITMASK(26, 31) 444 + #define IODA2_PEST0_RID PPC_BITMASK(32, 47) 445 + #define IODA2_PEST0_MSI_DATA PPC_BITMASK(48, 63) 446 + 447 + #define IODA2_PEST1_FAIL_ADDR PPC_BITMASK(3, 63) 448 + 449 + 450 + #endif /* PCI_HOST_PNV_PHB3_REGS_H */
+4
include/hw/ppc/pnv.h
··· 30 30 #include "hw/ppc/pnv_homer.h" 31 31 #include "hw/ppc/pnv_xive.h" 32 32 #include "hw/ppc/pnv_core.h" 33 + #include "hw/pci-host/pnv_phb3.h" 33 34 #include "hw/pci-host/pnv_phb4.h" 34 35 35 36 #define TYPE_PNV_CHIP "pnv-chip" ··· 76 77 Pnv8Psi psi; 77 78 PnvOCC occ; 78 79 PnvHomer homer; 80 + 81 + #define PNV8_CHIP_PHB3_MAX 4 82 + PnvPHB3 phbs[PNV8_CHIP_PHB3_MAX]; 79 83 80 84 XICSFabric *xics; 81 85 } Pnv8Chip;
+9
include/hw/ppc/pnv_xscom.h
··· 71 71 #define PNV_XSCOM_PBA_BASE 0x2013f00 72 72 #define PNV_XSCOM_PBA_SIZE 0x40 73 73 74 + #define PNV_XSCOM_PBCQ_NEST_BASE 0x2012000 75 + #define PNV_XSCOM_PBCQ_NEST_SIZE 0x46 76 + 77 + #define PNV_XSCOM_PBCQ_PCI_BASE 0x9012000 78 + #define PNV_XSCOM_PBCQ_PCI_SIZE 0x15 79 + 80 + #define PNV_XSCOM_PBCQ_SPCI_BASE 0x9013c00 81 + #define PNV_XSCOM_PBCQ_SPCI_SIZE 0x5 82 + 74 83 /* 75 84 * Layout of the XSCOM PCB addresses (POWER 9) 76 85 */
+5
include/hw/ppc/xics.h
··· 101 101 DeviceClass parent_class; 102 102 103 103 DeviceRealize parent_realize; 104 + DeviceReset parent_reset; 105 + 106 + void (*reject)(ICSState *s, uint32_t irq); 107 + void (*resend)(ICSState *s); 104 108 }; 105 109 106 110 struct ICSState { ··· 161 165 uint32_t icp_accept(ICPState *ss); 162 166 uint32_t icp_ipoll(ICPState *ss, uint32_t *mfrr); 163 167 void icp_eoi(ICPState *icp, uint32_t xirr); 168 + void icp_irq(ICSState *ics, int server, int nr, uint8_t priority); 164 169 void icp_reset(ICPState *icp); 165 170 166 171 void ics_write_xive(ICSState *ics, int nr, int server,