qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio
at jcs-hda-dma 3174 lines 101 kB view raw
1/* 2 * Physical memory management 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16#include "qemu/osdep.h" 17#include "qapi/error.h" 18#include "qemu-common.h" 19#include "cpu.h" 20#include "exec/memory.h" 21#include "exec/address-spaces.h" 22#include "exec/ioport.h" 23#include "qapi/visitor.h" 24#include "qemu/bitops.h" 25#include "qemu/error-report.h" 26#include "qom/object.h" 27#include "trace-root.h" 28 29#include "exec/memory-internal.h" 30#include "exec/ram_addr.h" 31#include "sysemu/kvm.h" 32#include "sysemu/sysemu.h" 33#include "hw/misc/mmio_interface.h" 34#include "hw/qdev-properties.h" 35#include "migration/vmstate.h" 36 37//#define DEBUG_UNASSIGNED 38 39static unsigned memory_region_transaction_depth; 40static bool memory_region_update_pending; 41static bool ioeventfd_update_pending; 42static bool global_dirty_log = false; 43 44static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners 45 = QTAILQ_HEAD_INITIALIZER(memory_listeners); 46 47static QTAILQ_HEAD(, AddressSpace) address_spaces 48 = QTAILQ_HEAD_INITIALIZER(address_spaces); 49 50static GHashTable *flat_views; 51 52typedef struct AddrRange AddrRange; 53 54/* 55 * Note that signed integers are needed for negative offsetting in aliases 56 * (large MemoryRegion::alias_offset). 57 */ 58struct AddrRange { 59 Int128 start; 60 Int128 size; 61}; 62 63static AddrRange addrrange_make(Int128 start, Int128 size) 64{ 65 return (AddrRange) { start, size }; 66} 67 68static bool addrrange_equal(AddrRange r1, AddrRange r2) 69{ 70 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); 71} 72 73static Int128 addrrange_end(AddrRange r) 74{ 75 return int128_add(r.start, r.size); 76} 77 78static AddrRange addrrange_shift(AddrRange range, Int128 delta) 79{ 80 int128_addto(&range.start, delta); 81 return range; 82} 83 84static bool addrrange_contains(AddrRange range, Int128 addr) 85{ 86 return int128_ge(addr, range.start) 87 && int128_lt(addr, addrrange_end(range)); 88} 89 90static bool addrrange_intersects(AddrRange r1, AddrRange r2) 91{ 92 return addrrange_contains(r1, r2.start) 93 || addrrange_contains(r2, r1.start); 94} 95 96static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) 97{ 98 Int128 start = int128_max(r1.start, r2.start); 99 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); 100 return addrrange_make(start, int128_sub(end, start)); 101} 102 103enum ListenerDirection { Forward, Reverse }; 104 105#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ 106 do { \ 107 MemoryListener *_listener; \ 108 \ 109 switch (_direction) { \ 110 case Forward: \ 111 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ 112 if (_listener->_callback) { \ 113 _listener->_callback(_listener, ##_args); \ 114 } \ 115 } \ 116 break; \ 117 case Reverse: \ 118 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, \ 119 memory_listeners, link) { \ 120 if (_listener->_callback) { \ 121 _listener->_callback(_listener, ##_args); \ 122 } \ 123 } \ 124 break; \ 125 default: \ 126 abort(); \ 127 } \ 128 } while (0) 129 130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \ 131 do { \ 132 MemoryListener *_listener; \ 133 struct memory_listeners_as *list = &(_as)->listeners; \ 134 \ 135 switch (_direction) { \ 136 case Forward: \ 137 QTAILQ_FOREACH(_listener, list, link_as) { \ 138 if (_listener->_callback) { \ 139 _listener->_callback(_listener, _section, ##_args); \ 140 } \ 141 } \ 142 break; \ 143 case Reverse: \ 144 QTAILQ_FOREACH_REVERSE(_listener, list, memory_listeners_as, \ 145 link_as) { \ 146 if (_listener->_callback) { \ 147 _listener->_callback(_listener, _section, ##_args); \ 148 } \ 149 } \ 150 break; \ 151 default: \ 152 abort(); \ 153 } \ 154 } while (0) 155 156/* No need to ref/unref .mr, the FlatRange keeps it alive. */ 157#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ 158 do { \ 159 MemoryRegionSection mrs = section_from_flat_range(fr, \ 160 address_space_to_flatview(as)); \ 161 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \ 162 } while(0) 163 164struct CoalescedMemoryRange { 165 AddrRange addr; 166 QTAILQ_ENTRY(CoalescedMemoryRange) link; 167}; 168 169struct MemoryRegionIoeventfd { 170 AddrRange addr; 171 bool match_data; 172 uint64_t data; 173 EventNotifier *e; 174}; 175 176static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a, 177 MemoryRegionIoeventfd b) 178{ 179 if (int128_lt(a.addr.start, b.addr.start)) { 180 return true; 181 } else if (int128_gt(a.addr.start, b.addr.start)) { 182 return false; 183 } else if (int128_lt(a.addr.size, b.addr.size)) { 184 return true; 185 } else if (int128_gt(a.addr.size, b.addr.size)) { 186 return false; 187 } else if (a.match_data < b.match_data) { 188 return true; 189 } else if (a.match_data > b.match_data) { 190 return false; 191 } else if (a.match_data) { 192 if (a.data < b.data) { 193 return true; 194 } else if (a.data > b.data) { 195 return false; 196 } 197 } 198 if (a.e < b.e) { 199 return true; 200 } else if (a.e > b.e) { 201 return false; 202 } 203 return false; 204} 205 206static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a, 207 MemoryRegionIoeventfd b) 208{ 209 return !memory_region_ioeventfd_before(a, b) 210 && !memory_region_ioeventfd_before(b, a); 211} 212 213/* Range of memory in the global map. Addresses are absolute. */ 214struct FlatRange { 215 MemoryRegion *mr; 216 hwaddr offset_in_region; 217 AddrRange addr; 218 uint8_t dirty_log_mask; 219 bool romd_mode; 220 bool readonly; 221}; 222 223typedef struct AddressSpaceOps AddressSpaceOps; 224 225#define FOR_EACH_FLAT_RANGE(var, view) \ 226 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) 227 228static inline MemoryRegionSection 229section_from_flat_range(FlatRange *fr, FlatView *fv) 230{ 231 return (MemoryRegionSection) { 232 .mr = fr->mr, 233 .fv = fv, 234 .offset_within_region = fr->offset_in_region, 235 .size = fr->addr.size, 236 .offset_within_address_space = int128_get64(fr->addr.start), 237 .readonly = fr->readonly, 238 }; 239} 240 241static bool flatrange_equal(FlatRange *a, FlatRange *b) 242{ 243 return a->mr == b->mr 244 && addrrange_equal(a->addr, b->addr) 245 && a->offset_in_region == b->offset_in_region 246 && a->romd_mode == b->romd_mode 247 && a->readonly == b->readonly; 248} 249 250static FlatView *flatview_new(MemoryRegion *mr_root) 251{ 252 FlatView *view; 253 254 view = g_new0(FlatView, 1); 255 view->ref = 1; 256 view->root = mr_root; 257 memory_region_ref(mr_root); 258 trace_flatview_new(view, mr_root); 259 260 return view; 261} 262 263/* Insert a range into a given position. Caller is responsible for maintaining 264 * sorting order. 265 */ 266static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) 267{ 268 if (view->nr == view->nr_allocated) { 269 view->nr_allocated = MAX(2 * view->nr, 10); 270 view->ranges = g_realloc(view->ranges, 271 view->nr_allocated * sizeof(*view->ranges)); 272 } 273 memmove(view->ranges + pos + 1, view->ranges + pos, 274 (view->nr - pos) * sizeof(FlatRange)); 275 view->ranges[pos] = *range; 276 memory_region_ref(range->mr); 277 ++view->nr; 278} 279 280static void flatview_destroy(FlatView *view) 281{ 282 int i; 283 284 trace_flatview_destroy(view, view->root); 285 if (view->dispatch) { 286 address_space_dispatch_free(view->dispatch); 287 } 288 for (i = 0; i < view->nr; i++) { 289 memory_region_unref(view->ranges[i].mr); 290 } 291 g_free(view->ranges); 292 memory_region_unref(view->root); 293 g_free(view); 294} 295 296static bool flatview_ref(FlatView *view) 297{ 298 return atomic_fetch_inc_nonzero(&view->ref) > 0; 299} 300 301static void flatview_unref(FlatView *view) 302{ 303 if (atomic_fetch_dec(&view->ref) == 1) { 304 trace_flatview_destroy_rcu(view, view->root); 305 assert(view->root); 306 call_rcu(view, flatview_destroy, rcu); 307 } 308} 309 310static bool can_merge(FlatRange *r1, FlatRange *r2) 311{ 312 return int128_eq(addrrange_end(r1->addr), r2->addr.start) 313 && r1->mr == r2->mr 314 && int128_eq(int128_add(int128_make64(r1->offset_in_region), 315 r1->addr.size), 316 int128_make64(r2->offset_in_region)) 317 && r1->dirty_log_mask == r2->dirty_log_mask 318 && r1->romd_mode == r2->romd_mode 319 && r1->readonly == r2->readonly; 320} 321 322/* Attempt to simplify a view by merging adjacent ranges */ 323static void flatview_simplify(FlatView *view) 324{ 325 unsigned i, j; 326 327 i = 0; 328 while (i < view->nr) { 329 j = i + 1; 330 while (j < view->nr 331 && can_merge(&view->ranges[j-1], &view->ranges[j])) { 332 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); 333 ++j; 334 } 335 ++i; 336 memmove(&view->ranges[i], &view->ranges[j], 337 (view->nr - j) * sizeof(view->ranges[j])); 338 view->nr -= j - i; 339 } 340} 341 342static bool memory_region_big_endian(MemoryRegion *mr) 343{ 344#ifdef TARGET_WORDS_BIGENDIAN 345 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; 346#else 347 return mr->ops->endianness == DEVICE_BIG_ENDIAN; 348#endif 349} 350 351static bool memory_region_wrong_endianness(MemoryRegion *mr) 352{ 353#ifdef TARGET_WORDS_BIGENDIAN 354 return mr->ops->endianness == DEVICE_LITTLE_ENDIAN; 355#else 356 return mr->ops->endianness == DEVICE_BIG_ENDIAN; 357#endif 358} 359 360static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size) 361{ 362 if (memory_region_wrong_endianness(mr)) { 363 switch (size) { 364 case 1: 365 break; 366 case 2: 367 *data = bswap16(*data); 368 break; 369 case 4: 370 *data = bswap32(*data); 371 break; 372 case 8: 373 *data = bswap64(*data); 374 break; 375 default: 376 abort(); 377 } 378 } 379} 380 381static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) 382{ 383 MemoryRegion *root; 384 hwaddr abs_addr = offset; 385 386 abs_addr += mr->addr; 387 for (root = mr; root->container; ) { 388 root = root->container; 389 abs_addr += root->addr; 390 } 391 392 return abs_addr; 393} 394 395static int get_cpu_index(void) 396{ 397 if (current_cpu) { 398 return current_cpu->cpu_index; 399 } 400 return -1; 401} 402 403static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr, 404 hwaddr addr, 405 uint64_t *value, 406 unsigned size, 407 unsigned shift, 408 uint64_t mask, 409 MemTxAttrs attrs) 410{ 411 uint64_t tmp; 412 413 tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr); 414 if (mr->subpage) { 415 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); 416 } else if (mr == &io_mem_notdirty) { 417 /* Accesses to code which has previously been translated into a TB show 418 * up in the MMIO path, as accesses to the io_mem_notdirty 419 * MemoryRegion. */ 420 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); 421 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { 422 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 423 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); 424 } 425 *value |= (tmp & mask) << shift; 426 return MEMTX_OK; 427} 428 429static MemTxResult memory_region_read_accessor(MemoryRegion *mr, 430 hwaddr addr, 431 uint64_t *value, 432 unsigned size, 433 unsigned shift, 434 uint64_t mask, 435 MemTxAttrs attrs) 436{ 437 uint64_t tmp; 438 439 tmp = mr->ops->read(mr->opaque, addr, size); 440 if (mr->subpage) { 441 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); 442 } else if (mr == &io_mem_notdirty) { 443 /* Accesses to code which has previously been translated into a TB show 444 * up in the MMIO path, as accesses to the io_mem_notdirty 445 * MemoryRegion. */ 446 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); 447 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { 448 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 449 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); 450 } 451 *value |= (tmp & mask) << shift; 452 return MEMTX_OK; 453} 454 455static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, 456 hwaddr addr, 457 uint64_t *value, 458 unsigned size, 459 unsigned shift, 460 uint64_t mask, 461 MemTxAttrs attrs) 462{ 463 uint64_t tmp = 0; 464 MemTxResult r; 465 466 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); 467 if (mr->subpage) { 468 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); 469 } else if (mr == &io_mem_notdirty) { 470 /* Accesses to code which has previously been translated into a TB show 471 * up in the MMIO path, as accesses to the io_mem_notdirty 472 * MemoryRegion. */ 473 trace_memory_region_tb_read(get_cpu_index(), addr, tmp, size); 474 } else if (TRACE_MEMORY_REGION_OPS_READ_ENABLED) { 475 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 476 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); 477 } 478 *value |= (tmp & mask) << shift; 479 return r; 480} 481 482static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr, 483 hwaddr addr, 484 uint64_t *value, 485 unsigned size, 486 unsigned shift, 487 uint64_t mask, 488 MemTxAttrs attrs) 489{ 490 uint64_t tmp; 491 492 tmp = (*value >> shift) & mask; 493 if (mr->subpage) { 494 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); 495 } else if (mr == &io_mem_notdirty) { 496 /* Accesses to code which has previously been translated into a TB show 497 * up in the MMIO path, as accesses to the io_mem_notdirty 498 * MemoryRegion. */ 499 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); 500 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { 501 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 502 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); 503 } 504 mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp); 505 return MEMTX_OK; 506} 507 508static MemTxResult memory_region_write_accessor(MemoryRegion *mr, 509 hwaddr addr, 510 uint64_t *value, 511 unsigned size, 512 unsigned shift, 513 uint64_t mask, 514 MemTxAttrs attrs) 515{ 516 uint64_t tmp; 517 518 tmp = (*value >> shift) & mask; 519 if (mr->subpage) { 520 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); 521 } else if (mr == &io_mem_notdirty) { 522 /* Accesses to code which has previously been translated into a TB show 523 * up in the MMIO path, as accesses to the io_mem_notdirty 524 * MemoryRegion. */ 525 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); 526 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { 527 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 528 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); 529 } 530 mr->ops->write(mr->opaque, addr, tmp, size); 531 return MEMTX_OK; 532} 533 534static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, 535 hwaddr addr, 536 uint64_t *value, 537 unsigned size, 538 unsigned shift, 539 uint64_t mask, 540 MemTxAttrs attrs) 541{ 542 uint64_t tmp; 543 544 tmp = (*value >> shift) & mask; 545 if (mr->subpage) { 546 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); 547 } else if (mr == &io_mem_notdirty) { 548 /* Accesses to code which has previously been translated into a TB show 549 * up in the MMIO path, as accesses to the io_mem_notdirty 550 * MemoryRegion. */ 551 trace_memory_region_tb_write(get_cpu_index(), addr, tmp, size); 552 } else if (TRACE_MEMORY_REGION_OPS_WRITE_ENABLED) { 553 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 554 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); 555 } 556 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); 557} 558 559static MemTxResult access_with_adjusted_size(hwaddr addr, 560 uint64_t *value, 561 unsigned size, 562 unsigned access_size_min, 563 unsigned access_size_max, 564 MemTxResult (*access_fn) 565 (MemoryRegion *mr, 566 hwaddr addr, 567 uint64_t *value, 568 unsigned size, 569 unsigned shift, 570 uint64_t mask, 571 MemTxAttrs attrs), 572 MemoryRegion *mr, 573 MemTxAttrs attrs) 574{ 575 uint64_t access_mask; 576 unsigned access_size; 577 unsigned i; 578 MemTxResult r = MEMTX_OK; 579 580 if (!access_size_min) { 581 access_size_min = 1; 582 } 583 if (!access_size_max) { 584 access_size_max = 4; 585 } 586 587 /* FIXME: support unaligned access? */ 588 access_size = MAX(MIN(size, access_size_max), access_size_min); 589 access_mask = -1ULL >> (64 - access_size * 8); 590 if (memory_region_big_endian(mr)) { 591 for (i = 0; i < size; i += access_size) { 592 r |= access_fn(mr, addr + i, value, access_size, 593 (size - access_size - i) * 8, access_mask, attrs); 594 } 595 } else { 596 for (i = 0; i < size; i += access_size) { 597 r |= access_fn(mr, addr + i, value, access_size, i * 8, 598 access_mask, attrs); 599 } 600 } 601 return r; 602} 603 604static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) 605{ 606 AddressSpace *as; 607 608 while (mr->container) { 609 mr = mr->container; 610 } 611 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 612 if (mr == as->root) { 613 return as; 614 } 615 } 616 return NULL; 617} 618 619/* Render a memory region into the global view. Ranges in @view obscure 620 * ranges in @mr. 621 */ 622static void render_memory_region(FlatView *view, 623 MemoryRegion *mr, 624 Int128 base, 625 AddrRange clip, 626 bool readonly) 627{ 628 MemoryRegion *subregion; 629 unsigned i; 630 hwaddr offset_in_region; 631 Int128 remain; 632 Int128 now; 633 FlatRange fr; 634 AddrRange tmp; 635 636 if (!mr->enabled) { 637 return; 638 } 639 640 int128_addto(&base, int128_make64(mr->addr)); 641 readonly |= mr->readonly; 642 643 tmp = addrrange_make(base, mr->size); 644 645 if (!addrrange_intersects(tmp, clip)) { 646 return; 647 } 648 649 clip = addrrange_intersection(tmp, clip); 650 651 if (mr->alias) { 652 int128_subfrom(&base, int128_make64(mr->alias->addr)); 653 int128_subfrom(&base, int128_make64(mr->alias_offset)); 654 render_memory_region(view, mr->alias, base, clip, readonly); 655 return; 656 } 657 658 /* Render subregions in priority order. */ 659 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { 660 render_memory_region(view, subregion, base, clip, readonly); 661 } 662 663 if (!mr->terminates) { 664 return; 665 } 666 667 offset_in_region = int128_get64(int128_sub(clip.start, base)); 668 base = clip.start; 669 remain = clip.size; 670 671 fr.mr = mr; 672 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr); 673 fr.romd_mode = mr->romd_mode; 674 fr.readonly = readonly; 675 676 /* Render the region itself into any gaps left by the current view. */ 677 for (i = 0; i < view->nr && int128_nz(remain); ++i) { 678 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { 679 continue; 680 } 681 if (int128_lt(base, view->ranges[i].addr.start)) { 682 now = int128_min(remain, 683 int128_sub(view->ranges[i].addr.start, base)); 684 fr.offset_in_region = offset_in_region; 685 fr.addr = addrrange_make(base, now); 686 flatview_insert(view, i, &fr); 687 ++i; 688 int128_addto(&base, now); 689 offset_in_region += int128_get64(now); 690 int128_subfrom(&remain, now); 691 } 692 now = int128_sub(int128_min(int128_add(base, remain), 693 addrrange_end(view->ranges[i].addr)), 694 base); 695 int128_addto(&base, now); 696 offset_in_region += int128_get64(now); 697 int128_subfrom(&remain, now); 698 } 699 if (int128_nz(remain)) { 700 fr.offset_in_region = offset_in_region; 701 fr.addr = addrrange_make(base, remain); 702 flatview_insert(view, i, &fr); 703 } 704} 705 706static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) 707{ 708 while (mr->enabled) { 709 if (mr->alias) { 710 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) { 711 /* The alias is included in its entirety. Use it as 712 * the "real" root, so that we can share more FlatViews. 713 */ 714 mr = mr->alias; 715 continue; 716 } 717 } else if (!mr->terminates) { 718 unsigned int found = 0; 719 MemoryRegion *child, *next = NULL; 720 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) { 721 if (child->enabled) { 722 if (++found > 1) { 723 next = NULL; 724 break; 725 } 726 if (!child->addr && int128_ge(mr->size, child->size)) { 727 /* A child is included in its entirety. If it's the only 728 * enabled one, use it in the hope of finding an alias down the 729 * way. This will also let us share FlatViews. 730 */ 731 next = child; 732 } 733 } 734 } 735 if (found == 0) { 736 return NULL; 737 } 738 if (next) { 739 mr = next; 740 continue; 741 } 742 } 743 744 return mr; 745 } 746 747 return NULL; 748} 749 750/* Render a memory topology into a list of disjoint absolute ranges. */ 751static FlatView *generate_memory_topology(MemoryRegion *mr) 752{ 753 int i; 754 FlatView *view; 755 756 view = flatview_new(mr); 757 758 if (mr) { 759 render_memory_region(view, mr, int128_zero(), 760 addrrange_make(int128_zero(), int128_2_64()), false); 761 } 762 flatview_simplify(view); 763 764 view->dispatch = address_space_dispatch_new(view); 765 for (i = 0; i < view->nr; i++) { 766 MemoryRegionSection mrs = 767 section_from_flat_range(&view->ranges[i], view); 768 flatview_add_to_dispatch(view, &mrs); 769 } 770 address_space_dispatch_compact(view->dispatch); 771 g_hash_table_replace(flat_views, mr, view); 772 773 return view; 774} 775 776static void address_space_add_del_ioeventfds(AddressSpace *as, 777 MemoryRegionIoeventfd *fds_new, 778 unsigned fds_new_nb, 779 MemoryRegionIoeventfd *fds_old, 780 unsigned fds_old_nb) 781{ 782 unsigned iold, inew; 783 MemoryRegionIoeventfd *fd; 784 MemoryRegionSection section; 785 786 /* Generate a symmetric difference of the old and new fd sets, adding 787 * and deleting as necessary. 788 */ 789 790 iold = inew = 0; 791 while (iold < fds_old_nb || inew < fds_new_nb) { 792 if (iold < fds_old_nb 793 && (inew == fds_new_nb 794 || memory_region_ioeventfd_before(fds_old[iold], 795 fds_new[inew]))) { 796 fd = &fds_old[iold]; 797 section = (MemoryRegionSection) { 798 .fv = address_space_to_flatview(as), 799 .offset_within_address_space = int128_get64(fd->addr.start), 800 .size = fd->addr.size, 801 }; 802 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section, 803 fd->match_data, fd->data, fd->e); 804 ++iold; 805 } else if (inew < fds_new_nb 806 && (iold == fds_old_nb 807 || memory_region_ioeventfd_before(fds_new[inew], 808 fds_old[iold]))) { 809 fd = &fds_new[inew]; 810 section = (MemoryRegionSection) { 811 .fv = address_space_to_flatview(as), 812 .offset_within_address_space = int128_get64(fd->addr.start), 813 .size = fd->addr.size, 814 }; 815 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section, 816 fd->match_data, fd->data, fd->e); 817 ++inew; 818 } else { 819 ++iold; 820 ++inew; 821 } 822 } 823} 824 825static FlatView *address_space_get_flatview(AddressSpace *as) 826{ 827 FlatView *view; 828 829 rcu_read_lock(); 830 do { 831 view = address_space_to_flatview(as); 832 /* If somebody has replaced as->current_map concurrently, 833 * flatview_ref returns false. 834 */ 835 } while (!flatview_ref(view)); 836 rcu_read_unlock(); 837 return view; 838} 839 840static void address_space_update_ioeventfds(AddressSpace *as) 841{ 842 FlatView *view; 843 FlatRange *fr; 844 unsigned ioeventfd_nb = 0; 845 MemoryRegionIoeventfd *ioeventfds = NULL; 846 AddrRange tmp; 847 unsigned i; 848 849 view = address_space_get_flatview(as); 850 FOR_EACH_FLAT_RANGE(fr, view) { 851 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { 852 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, 853 int128_sub(fr->addr.start, 854 int128_make64(fr->offset_in_region))); 855 if (addrrange_intersects(fr->addr, tmp)) { 856 ++ioeventfd_nb; 857 ioeventfds = g_realloc(ioeventfds, 858 ioeventfd_nb * sizeof(*ioeventfds)); 859 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; 860 ioeventfds[ioeventfd_nb-1].addr = tmp; 861 } 862 } 863 } 864 865 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, 866 as->ioeventfds, as->ioeventfd_nb); 867 868 g_free(as->ioeventfds); 869 as->ioeventfds = ioeventfds; 870 as->ioeventfd_nb = ioeventfd_nb; 871 flatview_unref(view); 872} 873 874static void address_space_update_topology_pass(AddressSpace *as, 875 const FlatView *old_view, 876 const FlatView *new_view, 877 bool adding) 878{ 879 unsigned iold, inew; 880 FlatRange *frold, *frnew; 881 882 /* Generate a symmetric difference of the old and new memory maps. 883 * Kill ranges in the old map, and instantiate ranges in the new map. 884 */ 885 iold = inew = 0; 886 while (iold < old_view->nr || inew < new_view->nr) { 887 if (iold < old_view->nr) { 888 frold = &old_view->ranges[iold]; 889 } else { 890 frold = NULL; 891 } 892 if (inew < new_view->nr) { 893 frnew = &new_view->ranges[inew]; 894 } else { 895 frnew = NULL; 896 } 897 898 if (frold 899 && (!frnew 900 || int128_lt(frold->addr.start, frnew->addr.start) 901 || (int128_eq(frold->addr.start, frnew->addr.start) 902 && !flatrange_equal(frold, frnew)))) { 903 /* In old but not in new, or in both but attributes changed. */ 904 905 if (!adding) { 906 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); 907 } 908 909 ++iold; 910 } else if (frold && frnew && flatrange_equal(frold, frnew)) { 911 /* In both and unchanged (except logging may have changed) */ 912 913 if (adding) { 914 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); 915 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) { 916 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start, 917 frold->dirty_log_mask, 918 frnew->dirty_log_mask); 919 } 920 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) { 921 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop, 922 frold->dirty_log_mask, 923 frnew->dirty_log_mask); 924 } 925 } 926 927 ++iold; 928 ++inew; 929 } else { 930 /* In new */ 931 932 if (adding) { 933 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); 934 } 935 936 ++inew; 937 } 938 } 939} 940 941static void flatviews_init(void) 942{ 943 static FlatView *empty_view; 944 945 if (flat_views) { 946 return; 947 } 948 949 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, 950 (GDestroyNotify) flatview_unref); 951 if (!empty_view) { 952 empty_view = generate_memory_topology(NULL); 953 /* We keep it alive forever in the global variable. */ 954 flatview_ref(empty_view); 955 } else { 956 g_hash_table_replace(flat_views, NULL, empty_view); 957 flatview_ref(empty_view); 958 } 959} 960 961static void flatviews_reset(void) 962{ 963 AddressSpace *as; 964 965 if (flat_views) { 966 g_hash_table_unref(flat_views); 967 flat_views = NULL; 968 } 969 flatviews_init(); 970 971 /* Render unique FVs */ 972 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 973 MemoryRegion *physmr = memory_region_get_flatview_root(as->root); 974 975 if (g_hash_table_lookup(flat_views, physmr)) { 976 continue; 977 } 978 979 generate_memory_topology(physmr); 980 } 981} 982 983static void address_space_set_flatview(AddressSpace *as) 984{ 985 FlatView *old_view = address_space_to_flatview(as); 986 MemoryRegion *physmr = memory_region_get_flatview_root(as->root); 987 FlatView *new_view = g_hash_table_lookup(flat_views, physmr); 988 989 assert(new_view); 990 991 if (old_view == new_view) { 992 return; 993 } 994 995 if (old_view) { 996 flatview_ref(old_view); 997 } 998 999 flatview_ref(new_view); 1000 1001 if (!QTAILQ_EMPTY(&as->listeners)) { 1002 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; 1003 1004 if (!old_view2) { 1005 old_view2 = &tmpview; 1006 } 1007 address_space_update_topology_pass(as, old_view2, new_view, false); 1008 address_space_update_topology_pass(as, old_view2, new_view, true); 1009 } 1010 1011 /* Writes are protected by the BQL. */ 1012 atomic_rcu_set(&as->current_map, new_view); 1013 if (old_view) { 1014 flatview_unref(old_view); 1015 } 1016 1017 /* Note that all the old MemoryRegions are still alive up to this 1018 * point. This relieves most MemoryListeners from the need to 1019 * ref/unref the MemoryRegions they get---unless they use them 1020 * outside the iothread mutex, in which case precise reference 1021 * counting is necessary. 1022 */ 1023 if (old_view) { 1024 flatview_unref(old_view); 1025 } 1026} 1027 1028static void address_space_update_topology(AddressSpace *as) 1029{ 1030 MemoryRegion *physmr = memory_region_get_flatview_root(as->root); 1031 1032 flatviews_init(); 1033 if (!g_hash_table_lookup(flat_views, physmr)) { 1034 generate_memory_topology(physmr); 1035 } 1036 address_space_set_flatview(as); 1037} 1038 1039void memory_region_transaction_begin(void) 1040{ 1041 qemu_flush_coalesced_mmio_buffer(); 1042 ++memory_region_transaction_depth; 1043} 1044 1045void memory_region_transaction_commit(void) 1046{ 1047 AddressSpace *as; 1048 1049 assert(memory_region_transaction_depth); 1050 assert(qemu_mutex_iothread_locked()); 1051 1052 --memory_region_transaction_depth; 1053 if (!memory_region_transaction_depth) { 1054 if (memory_region_update_pending) { 1055 flatviews_reset(); 1056 1057 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); 1058 1059 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 1060 address_space_set_flatview(as); 1061 address_space_update_ioeventfds(as); 1062 } 1063 memory_region_update_pending = false; 1064 ioeventfd_update_pending = false; 1065 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); 1066 } else if (ioeventfd_update_pending) { 1067 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 1068 address_space_update_ioeventfds(as); 1069 } 1070 ioeventfd_update_pending = false; 1071 } 1072 } 1073} 1074 1075static void memory_region_destructor_none(MemoryRegion *mr) 1076{ 1077} 1078 1079static void memory_region_destructor_ram(MemoryRegion *mr) 1080{ 1081 qemu_ram_free(mr->ram_block); 1082} 1083 1084static bool memory_region_need_escape(char c) 1085{ 1086 return c == '/' || c == '[' || c == '\\' || c == ']'; 1087} 1088 1089static char *memory_region_escape_name(const char *name) 1090{ 1091 const char *p; 1092 char *escaped, *q; 1093 uint8_t c; 1094 size_t bytes = 0; 1095 1096 for (p = name; *p; p++) { 1097 bytes += memory_region_need_escape(*p) ? 4 : 1; 1098 } 1099 if (bytes == p - name) { 1100 return g_memdup(name, bytes + 1); 1101 } 1102 1103 escaped = g_malloc(bytes + 1); 1104 for (p = name, q = escaped; *p; p++) { 1105 c = *p; 1106 if (unlikely(memory_region_need_escape(c))) { 1107 *q++ = '\\'; 1108 *q++ = 'x'; 1109 *q++ = "0123456789abcdef"[c >> 4]; 1110 c = "0123456789abcdef"[c & 15]; 1111 } 1112 *q++ = c; 1113 } 1114 *q = 0; 1115 return escaped; 1116} 1117 1118static void memory_region_do_init(MemoryRegion *mr, 1119 Object *owner, 1120 const char *name, 1121 uint64_t size) 1122{ 1123 mr->size = int128_make64(size); 1124 if (size == UINT64_MAX) { 1125 mr->size = int128_2_64(); 1126 } 1127 mr->name = g_strdup(name); 1128 mr->owner = owner; 1129 mr->ram_block = NULL; 1130 1131 if (name) { 1132 char *escaped_name = memory_region_escape_name(name); 1133 char *name_array = g_strdup_printf("%s[*]", escaped_name); 1134 1135 if (!owner) { 1136 owner = container_get(qdev_get_machine(), "/unattached"); 1137 } 1138 1139 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort); 1140 object_unref(OBJECT(mr)); 1141 g_free(name_array); 1142 g_free(escaped_name); 1143 } 1144} 1145 1146void memory_region_init(MemoryRegion *mr, 1147 Object *owner, 1148 const char *name, 1149 uint64_t size) 1150{ 1151 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION); 1152 memory_region_do_init(mr, owner, name, size); 1153} 1154 1155static void memory_region_get_addr(Object *obj, Visitor *v, const char *name, 1156 void *opaque, Error **errp) 1157{ 1158 MemoryRegion *mr = MEMORY_REGION(obj); 1159 uint64_t value = mr->addr; 1160 1161 visit_type_uint64(v, name, &value, errp); 1162} 1163 1164static void memory_region_get_container(Object *obj, Visitor *v, 1165 const char *name, void *opaque, 1166 Error **errp) 1167{ 1168 MemoryRegion *mr = MEMORY_REGION(obj); 1169 gchar *path = (gchar *)""; 1170 1171 if (mr->container) { 1172 path = object_get_canonical_path(OBJECT(mr->container)); 1173 } 1174 visit_type_str(v, name, &path, errp); 1175 if (mr->container) { 1176 g_free(path); 1177 } 1178} 1179 1180static Object *memory_region_resolve_container(Object *obj, void *opaque, 1181 const char *part) 1182{ 1183 MemoryRegion *mr = MEMORY_REGION(obj); 1184 1185 return OBJECT(mr->container); 1186} 1187 1188static void memory_region_get_priority(Object *obj, Visitor *v, 1189 const char *name, void *opaque, 1190 Error **errp) 1191{ 1192 MemoryRegion *mr = MEMORY_REGION(obj); 1193 int32_t value = mr->priority; 1194 1195 visit_type_int32(v, name, &value, errp); 1196} 1197 1198static void memory_region_get_size(Object *obj, Visitor *v, const char *name, 1199 void *opaque, Error **errp) 1200{ 1201 MemoryRegion *mr = MEMORY_REGION(obj); 1202 uint64_t value = memory_region_size(mr); 1203 1204 visit_type_uint64(v, name, &value, errp); 1205} 1206 1207static void memory_region_initfn(Object *obj) 1208{ 1209 MemoryRegion *mr = MEMORY_REGION(obj); 1210 ObjectProperty *op; 1211 1212 mr->ops = &unassigned_mem_ops; 1213 mr->enabled = true; 1214 mr->romd_mode = true; 1215 mr->global_locking = true; 1216 mr->destructor = memory_region_destructor_none; 1217 QTAILQ_INIT(&mr->subregions); 1218 QTAILQ_INIT(&mr->coalesced); 1219 1220 op = object_property_add(OBJECT(mr), "container", 1221 "link<" TYPE_MEMORY_REGION ">", 1222 memory_region_get_container, 1223 NULL, /* memory_region_set_container */ 1224 NULL, NULL, &error_abort); 1225 op->resolve = memory_region_resolve_container; 1226 1227 object_property_add(OBJECT(mr), "addr", "uint64", 1228 memory_region_get_addr, 1229 NULL, /* memory_region_set_addr */ 1230 NULL, NULL, &error_abort); 1231 object_property_add(OBJECT(mr), "priority", "uint32", 1232 memory_region_get_priority, 1233 NULL, /* memory_region_set_priority */ 1234 NULL, NULL, &error_abort); 1235 object_property_add(OBJECT(mr), "size", "uint64", 1236 memory_region_get_size, 1237 NULL, /* memory_region_set_size, */ 1238 NULL, NULL, &error_abort); 1239} 1240 1241static void iommu_memory_region_initfn(Object *obj) 1242{ 1243 MemoryRegion *mr = MEMORY_REGION(obj); 1244 1245 mr->is_iommu = true; 1246} 1247 1248static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, 1249 unsigned size) 1250{ 1251#ifdef DEBUG_UNASSIGNED 1252 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 1253#endif 1254 if (current_cpu != NULL) { 1255 cpu_unassigned_access(current_cpu, addr, false, false, 0, size); 1256 } 1257 return 0; 1258} 1259 1260static void unassigned_mem_write(void *opaque, hwaddr addr, 1261 uint64_t val, unsigned size) 1262{ 1263#ifdef DEBUG_UNASSIGNED 1264 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); 1265#endif 1266 if (current_cpu != NULL) { 1267 cpu_unassigned_access(current_cpu, addr, true, false, 0, size); 1268 } 1269} 1270 1271static bool unassigned_mem_accepts(void *opaque, hwaddr addr, 1272 unsigned size, bool is_write) 1273{ 1274 return false; 1275} 1276 1277const MemoryRegionOps unassigned_mem_ops = { 1278 .valid.accepts = unassigned_mem_accepts, 1279 .endianness = DEVICE_NATIVE_ENDIAN, 1280}; 1281 1282static uint64_t memory_region_ram_device_read(void *opaque, 1283 hwaddr addr, unsigned size) 1284{ 1285 MemoryRegion *mr = opaque; 1286 uint64_t data = (uint64_t)~0; 1287 1288 switch (size) { 1289 case 1: 1290 data = *(uint8_t *)(mr->ram_block->host + addr); 1291 break; 1292 case 2: 1293 data = *(uint16_t *)(mr->ram_block->host + addr); 1294 break; 1295 case 4: 1296 data = *(uint32_t *)(mr->ram_block->host + addr); 1297 break; 1298 case 8: 1299 data = *(uint64_t *)(mr->ram_block->host + addr); 1300 break; 1301 } 1302 1303 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); 1304 1305 return data; 1306} 1307 1308static void memory_region_ram_device_write(void *opaque, hwaddr addr, 1309 uint64_t data, unsigned size) 1310{ 1311 MemoryRegion *mr = opaque; 1312 1313 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); 1314 1315 switch (size) { 1316 case 1: 1317 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; 1318 break; 1319 case 2: 1320 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; 1321 break; 1322 case 4: 1323 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; 1324 break; 1325 case 8: 1326 *(uint64_t *)(mr->ram_block->host + addr) = data; 1327 break; 1328 } 1329} 1330 1331static const MemoryRegionOps ram_device_mem_ops = { 1332 .read = memory_region_ram_device_read, 1333 .write = memory_region_ram_device_write, 1334 .endianness = DEVICE_HOST_ENDIAN, 1335 .valid = { 1336 .min_access_size = 1, 1337 .max_access_size = 8, 1338 .unaligned = true, 1339 }, 1340 .impl = { 1341 .min_access_size = 1, 1342 .max_access_size = 8, 1343 .unaligned = true, 1344 }, 1345}; 1346 1347bool memory_region_access_valid(MemoryRegion *mr, 1348 hwaddr addr, 1349 unsigned size, 1350 bool is_write) 1351{ 1352 int access_size_min, access_size_max; 1353 int access_size, i; 1354 1355 if (!mr->ops->valid.unaligned && (addr & (size - 1))) { 1356 return false; 1357 } 1358 1359 if (!mr->ops->valid.accepts) { 1360 return true; 1361 } 1362 1363 access_size_min = mr->ops->valid.min_access_size; 1364 if (!mr->ops->valid.min_access_size) { 1365 access_size_min = 1; 1366 } 1367 1368 access_size_max = mr->ops->valid.max_access_size; 1369 if (!mr->ops->valid.max_access_size) { 1370 access_size_max = 4; 1371 } 1372 1373 access_size = MAX(MIN(size, access_size_max), access_size_min); 1374 for (i = 0; i < size; i += access_size) { 1375 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size, 1376 is_write)) { 1377 return false; 1378 } 1379 } 1380 1381 return true; 1382} 1383 1384static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, 1385 hwaddr addr, 1386 uint64_t *pval, 1387 unsigned size, 1388 MemTxAttrs attrs) 1389{ 1390 *pval = 0; 1391 1392 if (mr->ops->read) { 1393 return access_with_adjusted_size(addr, pval, size, 1394 mr->ops->impl.min_access_size, 1395 mr->ops->impl.max_access_size, 1396 memory_region_read_accessor, 1397 mr, attrs); 1398 } else if (mr->ops->read_with_attrs) { 1399 return access_with_adjusted_size(addr, pval, size, 1400 mr->ops->impl.min_access_size, 1401 mr->ops->impl.max_access_size, 1402 memory_region_read_with_attrs_accessor, 1403 mr, attrs); 1404 } else { 1405 return access_with_adjusted_size(addr, pval, size, 1, 4, 1406 memory_region_oldmmio_read_accessor, 1407 mr, attrs); 1408 } 1409} 1410 1411MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 1412 hwaddr addr, 1413 uint64_t *pval, 1414 unsigned size, 1415 MemTxAttrs attrs) 1416{ 1417 MemTxResult r; 1418 1419 if (!memory_region_access_valid(mr, addr, size, false)) { 1420 *pval = unassigned_mem_read(mr, addr, size); 1421 return MEMTX_DECODE_ERROR; 1422 } 1423 1424 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); 1425 adjust_endianness(mr, pval, size); 1426 return r; 1427} 1428 1429/* Return true if an eventfd was signalled */ 1430static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, 1431 hwaddr addr, 1432 uint64_t data, 1433 unsigned size, 1434 MemTxAttrs attrs) 1435{ 1436 MemoryRegionIoeventfd ioeventfd = { 1437 .addr = addrrange_make(int128_make64(addr), int128_make64(size)), 1438 .data = data, 1439 }; 1440 unsigned i; 1441 1442 for (i = 0; i < mr->ioeventfd_nb; i++) { 1443 ioeventfd.match_data = mr->ioeventfds[i].match_data; 1444 ioeventfd.e = mr->ioeventfds[i].e; 1445 1446 if (memory_region_ioeventfd_equal(ioeventfd, mr->ioeventfds[i])) { 1447 event_notifier_set(ioeventfd.e); 1448 return true; 1449 } 1450 } 1451 1452 return false; 1453} 1454 1455MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 1456 hwaddr addr, 1457 uint64_t data, 1458 unsigned size, 1459 MemTxAttrs attrs) 1460{ 1461 if (!memory_region_access_valid(mr, addr, size, true)) { 1462 unassigned_mem_write(mr, addr, data, size); 1463 return MEMTX_DECODE_ERROR; 1464 } 1465 1466 adjust_endianness(mr, &data, size); 1467 1468 if ((!kvm_eventfds_enabled()) && 1469 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { 1470 return MEMTX_OK; 1471 } 1472 1473 if (mr->ops->write) { 1474 return access_with_adjusted_size(addr, &data, size, 1475 mr->ops->impl.min_access_size, 1476 mr->ops->impl.max_access_size, 1477 memory_region_write_accessor, mr, 1478 attrs); 1479 } else if (mr->ops->write_with_attrs) { 1480 return 1481 access_with_adjusted_size(addr, &data, size, 1482 mr->ops->impl.min_access_size, 1483 mr->ops->impl.max_access_size, 1484 memory_region_write_with_attrs_accessor, 1485 mr, attrs); 1486 } else { 1487 return access_with_adjusted_size(addr, &data, size, 1, 4, 1488 memory_region_oldmmio_write_accessor, 1489 mr, attrs); 1490 } 1491} 1492 1493void memory_region_init_io(MemoryRegion *mr, 1494 Object *owner, 1495 const MemoryRegionOps *ops, 1496 void *opaque, 1497 const char *name, 1498 uint64_t size) 1499{ 1500 memory_region_init(mr, owner, name, size); 1501 mr->ops = ops ? ops : &unassigned_mem_ops; 1502 mr->opaque = opaque; 1503 mr->terminates = true; 1504} 1505 1506void memory_region_init_ram_nomigrate(MemoryRegion *mr, 1507 Object *owner, 1508 const char *name, 1509 uint64_t size, 1510 Error **errp) 1511{ 1512 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); 1513} 1514 1515void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, 1516 Object *owner, 1517 const char *name, 1518 uint64_t size, 1519 bool share, 1520 Error **errp) 1521{ 1522 memory_region_init(mr, owner, name, size); 1523 mr->ram = true; 1524 mr->terminates = true; 1525 mr->destructor = memory_region_destructor_ram; 1526 mr->ram_block = qemu_ram_alloc(size, share, mr, errp); 1527 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1528} 1529 1530void memory_region_init_resizeable_ram(MemoryRegion *mr, 1531 Object *owner, 1532 const char *name, 1533 uint64_t size, 1534 uint64_t max_size, 1535 void (*resized)(const char*, 1536 uint64_t length, 1537 void *host), 1538 Error **errp) 1539{ 1540 memory_region_init(mr, owner, name, size); 1541 mr->ram = true; 1542 mr->terminates = true; 1543 mr->destructor = memory_region_destructor_ram; 1544 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized, 1545 mr, errp); 1546 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1547} 1548 1549#ifdef __linux__ 1550void memory_region_init_ram_from_file(MemoryRegion *mr, 1551 struct Object *owner, 1552 const char *name, 1553 uint64_t size, 1554 uint64_t align, 1555 bool share, 1556 const char *path, 1557 Error **errp) 1558{ 1559 memory_region_init(mr, owner, name, size); 1560 mr->ram = true; 1561 mr->terminates = true; 1562 mr->destructor = memory_region_destructor_ram; 1563 mr->align = align; 1564 mr->ram_block = qemu_ram_alloc_from_file(size, mr, share, path, errp); 1565 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1566} 1567 1568void memory_region_init_ram_from_fd(MemoryRegion *mr, 1569 struct Object *owner, 1570 const char *name, 1571 uint64_t size, 1572 bool share, 1573 int fd, 1574 Error **errp) 1575{ 1576 memory_region_init(mr, owner, name, size); 1577 mr->ram = true; 1578 mr->terminates = true; 1579 mr->destructor = memory_region_destructor_ram; 1580 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, share, fd, errp); 1581 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1582} 1583#endif 1584 1585void memory_region_init_ram_ptr(MemoryRegion *mr, 1586 Object *owner, 1587 const char *name, 1588 uint64_t size, 1589 void *ptr) 1590{ 1591 memory_region_init(mr, owner, name, size); 1592 mr->ram = true; 1593 mr->terminates = true; 1594 mr->destructor = memory_region_destructor_ram; 1595 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1596 1597 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ 1598 assert(ptr != NULL); 1599 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); 1600} 1601 1602void memory_region_init_ram_device_ptr(MemoryRegion *mr, 1603 Object *owner, 1604 const char *name, 1605 uint64_t size, 1606 void *ptr) 1607{ 1608 memory_region_init_ram_ptr(mr, owner, name, size, ptr); 1609 mr->ram_device = true; 1610 mr->ops = &ram_device_mem_ops; 1611 mr->opaque = mr; 1612} 1613 1614void memory_region_init_alias(MemoryRegion *mr, 1615 Object *owner, 1616 const char *name, 1617 MemoryRegion *orig, 1618 hwaddr offset, 1619 uint64_t size) 1620{ 1621 memory_region_init(mr, owner, name, size); 1622 mr->alias = orig; 1623 mr->alias_offset = offset; 1624} 1625 1626void memory_region_init_rom_nomigrate(MemoryRegion *mr, 1627 struct Object *owner, 1628 const char *name, 1629 uint64_t size, 1630 Error **errp) 1631{ 1632 memory_region_init(mr, owner, name, size); 1633 mr->ram = true; 1634 mr->readonly = true; 1635 mr->terminates = true; 1636 mr->destructor = memory_region_destructor_ram; 1637 mr->ram_block = qemu_ram_alloc(size, false, mr, errp); 1638 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1639} 1640 1641void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 1642 Object *owner, 1643 const MemoryRegionOps *ops, 1644 void *opaque, 1645 const char *name, 1646 uint64_t size, 1647 Error **errp) 1648{ 1649 assert(ops); 1650 memory_region_init(mr, owner, name, size); 1651 mr->ops = ops; 1652 mr->opaque = opaque; 1653 mr->terminates = true; 1654 mr->rom_device = true; 1655 mr->destructor = memory_region_destructor_ram; 1656 mr->ram_block = qemu_ram_alloc(size, false, mr, errp); 1657} 1658 1659void memory_region_init_iommu(void *_iommu_mr, 1660 size_t instance_size, 1661 const char *mrtypename, 1662 Object *owner, 1663 const char *name, 1664 uint64_t size) 1665{ 1666 struct IOMMUMemoryRegion *iommu_mr; 1667 struct MemoryRegion *mr; 1668 1669 object_initialize(_iommu_mr, instance_size, mrtypename); 1670 mr = MEMORY_REGION(_iommu_mr); 1671 memory_region_do_init(mr, owner, name, size); 1672 iommu_mr = IOMMU_MEMORY_REGION(mr); 1673 mr->terminates = true; /* then re-forwards */ 1674 QLIST_INIT(&iommu_mr->iommu_notify); 1675 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; 1676} 1677 1678static void memory_region_finalize(Object *obj) 1679{ 1680 MemoryRegion *mr = MEMORY_REGION(obj); 1681 1682 assert(!mr->container); 1683 1684 /* We know the region is not visible in any address space (it 1685 * does not have a container and cannot be a root either because 1686 * it has no references, so we can blindly clear mr->enabled. 1687 * memory_region_set_enabled instead could trigger a transaction 1688 * and cause an infinite loop. 1689 */ 1690 mr->enabled = false; 1691 memory_region_transaction_begin(); 1692 while (!QTAILQ_EMPTY(&mr->subregions)) { 1693 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); 1694 memory_region_del_subregion(mr, subregion); 1695 } 1696 memory_region_transaction_commit(); 1697 1698 mr->destructor(mr); 1699 memory_region_clear_coalescing(mr); 1700 g_free((char *)mr->name); 1701 g_free(mr->ioeventfds); 1702} 1703 1704Object *memory_region_owner(MemoryRegion *mr) 1705{ 1706 Object *obj = OBJECT(mr); 1707 return obj->parent; 1708} 1709 1710void memory_region_ref(MemoryRegion *mr) 1711{ 1712 /* MMIO callbacks most likely will access data that belongs 1713 * to the owner, hence the need to ref/unref the owner whenever 1714 * the memory region is in use. 1715 * 1716 * The memory region is a child of its owner. As long as the 1717 * owner doesn't call unparent itself on the memory region, 1718 * ref-ing the owner will also keep the memory region alive. 1719 * Memory regions without an owner are supposed to never go away; 1720 * we do not ref/unref them because it slows down DMA sensibly. 1721 */ 1722 if (mr && mr->owner) { 1723 object_ref(mr->owner); 1724 } 1725} 1726 1727void memory_region_unref(MemoryRegion *mr) 1728{ 1729 if (mr && mr->owner) { 1730 object_unref(mr->owner); 1731 } 1732} 1733 1734uint64_t memory_region_size(MemoryRegion *mr) 1735{ 1736 if (int128_eq(mr->size, int128_2_64())) { 1737 return UINT64_MAX; 1738 } 1739 return int128_get64(mr->size); 1740} 1741 1742const char *memory_region_name(const MemoryRegion *mr) 1743{ 1744 if (!mr->name) { 1745 ((MemoryRegion *)mr)->name = 1746 object_get_canonical_path_component(OBJECT(mr)); 1747 } 1748 return mr->name; 1749} 1750 1751bool memory_region_is_ram_device(MemoryRegion *mr) 1752{ 1753 return mr->ram_device; 1754} 1755 1756uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) 1757{ 1758 uint8_t mask = mr->dirty_log_mask; 1759 if (global_dirty_log && mr->ram_block) { 1760 mask |= (1 << DIRTY_MEMORY_MIGRATION); 1761 } 1762 return mask; 1763} 1764 1765bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) 1766{ 1767 return memory_region_get_dirty_log_mask(mr) & (1 << client); 1768} 1769 1770static void memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr) 1771{ 1772 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; 1773 IOMMUNotifier *iommu_notifier; 1774 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1775 1776 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { 1777 flags |= iommu_notifier->notifier_flags; 1778 } 1779 1780 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) { 1781 imrc->notify_flag_changed(iommu_mr, 1782 iommu_mr->iommu_notify_flags, 1783 flags); 1784 } 1785 1786 iommu_mr->iommu_notify_flags = flags; 1787} 1788 1789void memory_region_register_iommu_notifier(MemoryRegion *mr, 1790 IOMMUNotifier *n) 1791{ 1792 IOMMUMemoryRegion *iommu_mr; 1793 1794 if (mr->alias) { 1795 memory_region_register_iommu_notifier(mr->alias, n); 1796 return; 1797 } 1798 1799 /* We need to register for at least one bitfield */ 1800 iommu_mr = IOMMU_MEMORY_REGION(mr); 1801 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); 1802 assert(n->start <= n->end); 1803 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node); 1804 memory_region_update_iommu_notify_flags(iommu_mr); 1805} 1806 1807uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) 1808{ 1809 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1810 1811 if (imrc->get_min_page_size) { 1812 return imrc->get_min_page_size(iommu_mr); 1813 } 1814 return TARGET_PAGE_SIZE; 1815} 1816 1817void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) 1818{ 1819 MemoryRegion *mr = MEMORY_REGION(iommu_mr); 1820 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1821 hwaddr addr, granularity; 1822 IOMMUTLBEntry iotlb; 1823 1824 /* If the IOMMU has its own replay callback, override */ 1825 if (imrc->replay) { 1826 imrc->replay(iommu_mr, n); 1827 return; 1828 } 1829 1830 granularity = memory_region_iommu_get_min_page_size(iommu_mr); 1831 1832 for (addr = 0; addr < memory_region_size(mr); addr += granularity) { 1833 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE); 1834 if (iotlb.perm != IOMMU_NONE) { 1835 n->notify(n, &iotlb); 1836 } 1837 1838 /* if (2^64 - MR size) < granularity, it's possible to get an 1839 * infinite loop here. This should catch such a wraparound */ 1840 if ((addr + granularity) < addr) { 1841 break; 1842 } 1843 } 1844} 1845 1846void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr) 1847{ 1848 IOMMUNotifier *notifier; 1849 1850 IOMMU_NOTIFIER_FOREACH(notifier, iommu_mr) { 1851 memory_region_iommu_replay(iommu_mr, notifier); 1852 } 1853} 1854 1855void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 1856 IOMMUNotifier *n) 1857{ 1858 IOMMUMemoryRegion *iommu_mr; 1859 1860 if (mr->alias) { 1861 memory_region_unregister_iommu_notifier(mr->alias, n); 1862 return; 1863 } 1864 QLIST_REMOVE(n, node); 1865 iommu_mr = IOMMU_MEMORY_REGION(mr); 1866 memory_region_update_iommu_notify_flags(iommu_mr); 1867} 1868 1869void memory_region_notify_one(IOMMUNotifier *notifier, 1870 IOMMUTLBEntry *entry) 1871{ 1872 IOMMUNotifierFlag request_flags; 1873 1874 /* 1875 * Skip the notification if the notification does not overlap 1876 * with registered range. 1877 */ 1878 if (notifier->start > entry->iova + entry->addr_mask || 1879 notifier->end < entry->iova) { 1880 return; 1881 } 1882 1883 if (entry->perm & IOMMU_RW) { 1884 request_flags = IOMMU_NOTIFIER_MAP; 1885 } else { 1886 request_flags = IOMMU_NOTIFIER_UNMAP; 1887 } 1888 1889 if (notifier->notifier_flags & request_flags) { 1890 notifier->notify(notifier, entry); 1891 } 1892} 1893 1894void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 1895 IOMMUTLBEntry entry) 1896{ 1897 IOMMUNotifier *iommu_notifier; 1898 1899 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr))); 1900 1901 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { 1902 memory_region_notify_one(iommu_notifier, &entry); 1903 } 1904} 1905 1906int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 1907 enum IOMMUMemoryRegionAttr attr, 1908 void *data) 1909{ 1910 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1911 1912 if (!imrc->get_attr) { 1913 return -EINVAL; 1914 } 1915 1916 return imrc->get_attr(iommu_mr, attr, data); 1917} 1918 1919void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) 1920{ 1921 uint8_t mask = 1 << client; 1922 uint8_t old_logging; 1923 1924 assert(client == DIRTY_MEMORY_VGA); 1925 old_logging = mr->vga_logging_count; 1926 mr->vga_logging_count += log ? 1 : -1; 1927 if (!!old_logging == !!mr->vga_logging_count) { 1928 return; 1929 } 1930 1931 memory_region_transaction_begin(); 1932 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); 1933 memory_region_update_pending |= mr->enabled; 1934 memory_region_transaction_commit(); 1935} 1936 1937bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr, 1938 hwaddr size, unsigned client) 1939{ 1940 assert(mr->ram_block); 1941 return cpu_physical_memory_get_dirty(memory_region_get_ram_addr(mr) + addr, 1942 size, client); 1943} 1944 1945void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 1946 hwaddr size) 1947{ 1948 assert(mr->ram_block); 1949 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, 1950 size, 1951 memory_region_get_dirty_log_mask(mr)); 1952} 1953 1954static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) 1955{ 1956 MemoryListener *listener; 1957 AddressSpace *as; 1958 FlatView *view; 1959 FlatRange *fr; 1960 1961 /* If the same address space has multiple log_sync listeners, we 1962 * visit that address space's FlatView multiple times. But because 1963 * log_sync listeners are rare, it's still cheaper than walking each 1964 * address space once. 1965 */ 1966 QTAILQ_FOREACH(listener, &memory_listeners, link) { 1967 if (!listener->log_sync) { 1968 continue; 1969 } 1970 as = listener->address_space; 1971 view = address_space_get_flatview(as); 1972 FOR_EACH_FLAT_RANGE(fr, view) { 1973 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) { 1974 MemoryRegionSection mrs = section_from_flat_range(fr, view); 1975 listener->log_sync(listener, &mrs); 1976 } 1977 } 1978 flatview_unref(view); 1979 } 1980} 1981 1982DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 1983 hwaddr addr, 1984 hwaddr size, 1985 unsigned client) 1986{ 1987 assert(mr->ram_block); 1988 memory_region_sync_dirty_bitmap(mr); 1989 return cpu_physical_memory_snapshot_and_clear_dirty( 1990 memory_region_get_ram_addr(mr) + addr, size, client); 1991} 1992 1993bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap, 1994 hwaddr addr, hwaddr size) 1995{ 1996 assert(mr->ram_block); 1997 return cpu_physical_memory_snapshot_get_dirty(snap, 1998 memory_region_get_ram_addr(mr) + addr, size); 1999} 2000 2001void memory_region_set_readonly(MemoryRegion *mr, bool readonly) 2002{ 2003 if (mr->readonly != readonly) { 2004 memory_region_transaction_begin(); 2005 mr->readonly = readonly; 2006 memory_region_update_pending |= mr->enabled; 2007 memory_region_transaction_commit(); 2008 } 2009} 2010 2011void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) 2012{ 2013 if (mr->romd_mode != romd_mode) { 2014 memory_region_transaction_begin(); 2015 mr->romd_mode = romd_mode; 2016 memory_region_update_pending |= mr->enabled; 2017 memory_region_transaction_commit(); 2018 } 2019} 2020 2021void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 2022 hwaddr size, unsigned client) 2023{ 2024 assert(mr->ram_block); 2025 cpu_physical_memory_test_and_clear_dirty( 2026 memory_region_get_ram_addr(mr) + addr, size, client); 2027} 2028 2029int memory_region_get_fd(MemoryRegion *mr) 2030{ 2031 int fd; 2032 2033 rcu_read_lock(); 2034 while (mr->alias) { 2035 mr = mr->alias; 2036 } 2037 fd = mr->ram_block->fd; 2038 rcu_read_unlock(); 2039 2040 return fd; 2041} 2042 2043void *memory_region_get_ram_ptr(MemoryRegion *mr) 2044{ 2045 void *ptr; 2046 uint64_t offset = 0; 2047 2048 rcu_read_lock(); 2049 while (mr->alias) { 2050 offset += mr->alias_offset; 2051 mr = mr->alias; 2052 } 2053 assert(mr->ram_block); 2054 ptr = qemu_map_ram_ptr(mr->ram_block, offset); 2055 rcu_read_unlock(); 2056 2057 return ptr; 2058} 2059 2060MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) 2061{ 2062 RAMBlock *block; 2063 2064 block = qemu_ram_block_from_host(ptr, false, offset); 2065 if (!block) { 2066 return NULL; 2067 } 2068 2069 return block->mr; 2070} 2071 2072ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) 2073{ 2074 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; 2075} 2076 2077void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) 2078{ 2079 assert(mr->ram_block); 2080 2081 qemu_ram_resize(mr->ram_block, newsize, errp); 2082} 2083 2084static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as) 2085{ 2086 FlatView *view; 2087 FlatRange *fr; 2088 CoalescedMemoryRange *cmr; 2089 AddrRange tmp; 2090 MemoryRegionSection section; 2091 2092 view = address_space_get_flatview(as); 2093 FOR_EACH_FLAT_RANGE(fr, view) { 2094 if (fr->mr == mr) { 2095 section = (MemoryRegionSection) { 2096 .fv = view, 2097 .offset_within_address_space = int128_get64(fr->addr.start), 2098 .size = fr->addr.size, 2099 }; 2100 2101 MEMORY_LISTENER_CALL(as, coalesced_mmio_del, Reverse, &section, 2102 int128_get64(fr->addr.start), 2103 int128_get64(fr->addr.size)); 2104 QTAILQ_FOREACH(cmr, &mr->coalesced, link) { 2105 tmp = addrrange_shift(cmr->addr, 2106 int128_sub(fr->addr.start, 2107 int128_make64(fr->offset_in_region))); 2108 if (!addrrange_intersects(tmp, fr->addr)) { 2109 continue; 2110 } 2111 tmp = addrrange_intersection(tmp, fr->addr); 2112 MEMORY_LISTENER_CALL(as, coalesced_mmio_add, Forward, &section, 2113 int128_get64(tmp.start), 2114 int128_get64(tmp.size)); 2115 } 2116 } 2117 } 2118 flatview_unref(view); 2119} 2120 2121static void memory_region_update_coalesced_range(MemoryRegion *mr) 2122{ 2123 AddressSpace *as; 2124 2125 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 2126 memory_region_update_coalesced_range_as(mr, as); 2127 } 2128} 2129 2130void memory_region_set_coalescing(MemoryRegion *mr) 2131{ 2132 memory_region_clear_coalescing(mr); 2133 memory_region_add_coalescing(mr, 0, int128_get64(mr->size)); 2134} 2135 2136void memory_region_add_coalescing(MemoryRegion *mr, 2137 hwaddr offset, 2138 uint64_t size) 2139{ 2140 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr)); 2141 2142 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); 2143 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); 2144 memory_region_update_coalesced_range(mr); 2145 memory_region_set_flush_coalesced(mr); 2146} 2147 2148void memory_region_clear_coalescing(MemoryRegion *mr) 2149{ 2150 CoalescedMemoryRange *cmr; 2151 bool updated = false; 2152 2153 qemu_flush_coalesced_mmio_buffer(); 2154 mr->flush_coalesced_mmio = false; 2155 2156 while (!QTAILQ_EMPTY(&mr->coalesced)) { 2157 cmr = QTAILQ_FIRST(&mr->coalesced); 2158 QTAILQ_REMOVE(&mr->coalesced, cmr, link); 2159 g_free(cmr); 2160 updated = true; 2161 } 2162 2163 if (updated) { 2164 memory_region_update_coalesced_range(mr); 2165 } 2166} 2167 2168void memory_region_set_flush_coalesced(MemoryRegion *mr) 2169{ 2170 mr->flush_coalesced_mmio = true; 2171} 2172 2173void memory_region_clear_flush_coalesced(MemoryRegion *mr) 2174{ 2175 qemu_flush_coalesced_mmio_buffer(); 2176 if (QTAILQ_EMPTY(&mr->coalesced)) { 2177 mr->flush_coalesced_mmio = false; 2178 } 2179} 2180 2181void memory_region_clear_global_locking(MemoryRegion *mr) 2182{ 2183 mr->global_locking = false; 2184} 2185 2186static bool userspace_eventfd_warning; 2187 2188void memory_region_add_eventfd(MemoryRegion *mr, 2189 hwaddr addr, 2190 unsigned size, 2191 bool match_data, 2192 uint64_t data, 2193 EventNotifier *e) 2194{ 2195 MemoryRegionIoeventfd mrfd = { 2196 .addr.start = int128_make64(addr), 2197 .addr.size = int128_make64(size), 2198 .match_data = match_data, 2199 .data = data, 2200 .e = e, 2201 }; 2202 unsigned i; 2203 2204 if (kvm_enabled() && (!(kvm_eventfds_enabled() || 2205 userspace_eventfd_warning))) { 2206 userspace_eventfd_warning = true; 2207 error_report("Using eventfd without MMIO binding in KVM. " 2208 "Suboptimal performance expected"); 2209 } 2210 2211 if (size) { 2212 adjust_endianness(mr, &mrfd.data, size); 2213 } 2214 memory_region_transaction_begin(); 2215 for (i = 0; i < mr->ioeventfd_nb; ++i) { 2216 if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) { 2217 break; 2218 } 2219 } 2220 ++mr->ioeventfd_nb; 2221 mr->ioeventfds = g_realloc(mr->ioeventfds, 2222 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); 2223 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], 2224 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); 2225 mr->ioeventfds[i] = mrfd; 2226 ioeventfd_update_pending |= mr->enabled; 2227 memory_region_transaction_commit(); 2228} 2229 2230void memory_region_del_eventfd(MemoryRegion *mr, 2231 hwaddr addr, 2232 unsigned size, 2233 bool match_data, 2234 uint64_t data, 2235 EventNotifier *e) 2236{ 2237 MemoryRegionIoeventfd mrfd = { 2238 .addr.start = int128_make64(addr), 2239 .addr.size = int128_make64(size), 2240 .match_data = match_data, 2241 .data = data, 2242 .e = e, 2243 }; 2244 unsigned i; 2245 2246 if (size) { 2247 adjust_endianness(mr, &mrfd.data, size); 2248 } 2249 memory_region_transaction_begin(); 2250 for (i = 0; i < mr->ioeventfd_nb; ++i) { 2251 if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) { 2252 break; 2253 } 2254 } 2255 assert(i != mr->ioeventfd_nb); 2256 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], 2257 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); 2258 --mr->ioeventfd_nb; 2259 mr->ioeventfds = g_realloc(mr->ioeventfds, 2260 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); 2261 ioeventfd_update_pending |= mr->enabled; 2262 memory_region_transaction_commit(); 2263} 2264 2265static void memory_region_update_container_subregions(MemoryRegion *subregion) 2266{ 2267 MemoryRegion *mr = subregion->container; 2268 MemoryRegion *other; 2269 2270 memory_region_transaction_begin(); 2271 2272 memory_region_ref(subregion); 2273 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { 2274 if (subregion->priority >= other->priority) { 2275 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); 2276 goto done; 2277 } 2278 } 2279 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); 2280done: 2281 memory_region_update_pending |= mr->enabled && subregion->enabled; 2282 memory_region_transaction_commit(); 2283} 2284 2285static void memory_region_add_subregion_common(MemoryRegion *mr, 2286 hwaddr offset, 2287 MemoryRegion *subregion) 2288{ 2289 assert(!subregion->container); 2290 subregion->container = mr; 2291 subregion->addr = offset; 2292 memory_region_update_container_subregions(subregion); 2293} 2294 2295void memory_region_add_subregion(MemoryRegion *mr, 2296 hwaddr offset, 2297 MemoryRegion *subregion) 2298{ 2299 subregion->priority = 0; 2300 memory_region_add_subregion_common(mr, offset, subregion); 2301} 2302 2303void memory_region_add_subregion_overlap(MemoryRegion *mr, 2304 hwaddr offset, 2305 MemoryRegion *subregion, 2306 int priority) 2307{ 2308 subregion->priority = priority; 2309 memory_region_add_subregion_common(mr, offset, subregion); 2310} 2311 2312void memory_region_del_subregion(MemoryRegion *mr, 2313 MemoryRegion *subregion) 2314{ 2315 memory_region_transaction_begin(); 2316 assert(subregion->container == mr); 2317 subregion->container = NULL; 2318 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); 2319 memory_region_unref(subregion); 2320 memory_region_update_pending |= mr->enabled && subregion->enabled; 2321 memory_region_transaction_commit(); 2322} 2323 2324void memory_region_set_enabled(MemoryRegion *mr, bool enabled) 2325{ 2326 if (enabled == mr->enabled) { 2327 return; 2328 } 2329 memory_region_transaction_begin(); 2330 mr->enabled = enabled; 2331 memory_region_update_pending = true; 2332 memory_region_transaction_commit(); 2333} 2334 2335void memory_region_set_size(MemoryRegion *mr, uint64_t size) 2336{ 2337 Int128 s = int128_make64(size); 2338 2339 if (size == UINT64_MAX) { 2340 s = int128_2_64(); 2341 } 2342 if (int128_eq(s, mr->size)) { 2343 return; 2344 } 2345 memory_region_transaction_begin(); 2346 mr->size = s; 2347 memory_region_update_pending = true; 2348 memory_region_transaction_commit(); 2349} 2350 2351static void memory_region_readd_subregion(MemoryRegion *mr) 2352{ 2353 MemoryRegion *container = mr->container; 2354 2355 if (container) { 2356 memory_region_transaction_begin(); 2357 memory_region_ref(mr); 2358 memory_region_del_subregion(container, mr); 2359 mr->container = container; 2360 memory_region_update_container_subregions(mr); 2361 memory_region_unref(mr); 2362 memory_region_transaction_commit(); 2363 } 2364} 2365 2366void memory_region_set_address(MemoryRegion *mr, hwaddr addr) 2367{ 2368 if (addr != mr->addr) { 2369 mr->addr = addr; 2370 memory_region_readd_subregion(mr); 2371 } 2372} 2373 2374void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) 2375{ 2376 assert(mr->alias); 2377 2378 if (offset == mr->alias_offset) { 2379 return; 2380 } 2381 2382 memory_region_transaction_begin(); 2383 mr->alias_offset = offset; 2384 memory_region_update_pending |= mr->enabled; 2385 memory_region_transaction_commit(); 2386} 2387 2388uint64_t memory_region_get_alignment(const MemoryRegion *mr) 2389{ 2390 return mr->align; 2391} 2392 2393static int cmp_flatrange_addr(const void *addr_, const void *fr_) 2394{ 2395 const AddrRange *addr = addr_; 2396 const FlatRange *fr = fr_; 2397 2398 if (int128_le(addrrange_end(*addr), fr->addr.start)) { 2399 return -1; 2400 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { 2401 return 1; 2402 } 2403 return 0; 2404} 2405 2406static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) 2407{ 2408 return bsearch(&addr, view->ranges, view->nr, 2409 sizeof(FlatRange), cmp_flatrange_addr); 2410} 2411 2412bool memory_region_is_mapped(MemoryRegion *mr) 2413{ 2414 return mr->container ? true : false; 2415} 2416 2417/* Same as memory_region_find, but it does not add a reference to the 2418 * returned region. It must be called from an RCU critical section. 2419 */ 2420static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, 2421 hwaddr addr, uint64_t size) 2422{ 2423 MemoryRegionSection ret = { .mr = NULL }; 2424 MemoryRegion *root; 2425 AddressSpace *as; 2426 AddrRange range; 2427 FlatView *view; 2428 FlatRange *fr; 2429 2430 addr += mr->addr; 2431 for (root = mr; root->container; ) { 2432 root = root->container; 2433 addr += root->addr; 2434 } 2435 2436 as = memory_region_to_address_space(root); 2437 if (!as) { 2438 return ret; 2439 } 2440 range = addrrange_make(int128_make64(addr), int128_make64(size)); 2441 2442 view = address_space_to_flatview(as); 2443 fr = flatview_lookup(view, range); 2444 if (!fr) { 2445 return ret; 2446 } 2447 2448 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { 2449 --fr; 2450 } 2451 2452 ret.mr = fr->mr; 2453 ret.fv = view; 2454 range = addrrange_intersection(range, fr->addr); 2455 ret.offset_within_region = fr->offset_in_region; 2456 ret.offset_within_region += int128_get64(int128_sub(range.start, 2457 fr->addr.start)); 2458 ret.size = range.size; 2459 ret.offset_within_address_space = int128_get64(range.start); 2460 ret.readonly = fr->readonly; 2461 return ret; 2462} 2463 2464MemoryRegionSection memory_region_find(MemoryRegion *mr, 2465 hwaddr addr, uint64_t size) 2466{ 2467 MemoryRegionSection ret; 2468 rcu_read_lock(); 2469 ret = memory_region_find_rcu(mr, addr, size); 2470 if (ret.mr) { 2471 memory_region_ref(ret.mr); 2472 } 2473 rcu_read_unlock(); 2474 return ret; 2475} 2476 2477bool memory_region_present(MemoryRegion *container, hwaddr addr) 2478{ 2479 MemoryRegion *mr; 2480 2481 rcu_read_lock(); 2482 mr = memory_region_find_rcu(container, addr, 1).mr; 2483 rcu_read_unlock(); 2484 return mr && mr != container; 2485} 2486 2487void memory_global_dirty_log_sync(void) 2488{ 2489 memory_region_sync_dirty_bitmap(NULL); 2490} 2491 2492static VMChangeStateEntry *vmstate_change; 2493 2494void memory_global_dirty_log_start(void) 2495{ 2496 if (vmstate_change) { 2497 qemu_del_vm_change_state_handler(vmstate_change); 2498 vmstate_change = NULL; 2499 } 2500 2501 global_dirty_log = true; 2502 2503 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); 2504 2505 /* Refresh DIRTY_LOG_MIGRATION bit. */ 2506 memory_region_transaction_begin(); 2507 memory_region_update_pending = true; 2508 memory_region_transaction_commit(); 2509} 2510 2511static void memory_global_dirty_log_do_stop(void) 2512{ 2513 global_dirty_log = false; 2514 2515 /* Refresh DIRTY_LOG_MIGRATION bit. */ 2516 memory_region_transaction_begin(); 2517 memory_region_update_pending = true; 2518 memory_region_transaction_commit(); 2519 2520 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); 2521} 2522 2523static void memory_vm_change_state_handler(void *opaque, int running, 2524 RunState state) 2525{ 2526 if (running) { 2527 memory_global_dirty_log_do_stop(); 2528 2529 if (vmstate_change) { 2530 qemu_del_vm_change_state_handler(vmstate_change); 2531 vmstate_change = NULL; 2532 } 2533 } 2534} 2535 2536void memory_global_dirty_log_stop(void) 2537{ 2538 if (!runstate_is_running()) { 2539 if (vmstate_change) { 2540 return; 2541 } 2542 vmstate_change = qemu_add_vm_change_state_handler( 2543 memory_vm_change_state_handler, NULL); 2544 return; 2545 } 2546 2547 memory_global_dirty_log_do_stop(); 2548} 2549 2550static void listener_add_address_space(MemoryListener *listener, 2551 AddressSpace *as) 2552{ 2553 FlatView *view; 2554 FlatRange *fr; 2555 2556 if (listener->begin) { 2557 listener->begin(listener); 2558 } 2559 if (global_dirty_log) { 2560 if (listener->log_global_start) { 2561 listener->log_global_start(listener); 2562 } 2563 } 2564 2565 view = address_space_get_flatview(as); 2566 FOR_EACH_FLAT_RANGE(fr, view) { 2567 MemoryRegionSection section = section_from_flat_range(fr, view); 2568 2569 if (listener->region_add) { 2570 listener->region_add(listener, &section); 2571 } 2572 if (fr->dirty_log_mask && listener->log_start) { 2573 listener->log_start(listener, &section, 0, fr->dirty_log_mask); 2574 } 2575 } 2576 if (listener->commit) { 2577 listener->commit(listener); 2578 } 2579 flatview_unref(view); 2580} 2581 2582static void listener_del_address_space(MemoryListener *listener, 2583 AddressSpace *as) 2584{ 2585 FlatView *view; 2586 FlatRange *fr; 2587 2588 if (listener->begin) { 2589 listener->begin(listener); 2590 } 2591 view = address_space_get_flatview(as); 2592 FOR_EACH_FLAT_RANGE(fr, view) { 2593 MemoryRegionSection section = section_from_flat_range(fr, view); 2594 2595 if (fr->dirty_log_mask && listener->log_stop) { 2596 listener->log_stop(listener, &section, fr->dirty_log_mask, 0); 2597 } 2598 if (listener->region_del) { 2599 listener->region_del(listener, &section); 2600 } 2601 } 2602 if (listener->commit) { 2603 listener->commit(listener); 2604 } 2605 flatview_unref(view); 2606} 2607 2608void memory_listener_register(MemoryListener *listener, AddressSpace *as) 2609{ 2610 MemoryListener *other = NULL; 2611 2612 listener->address_space = as; 2613 if (QTAILQ_EMPTY(&memory_listeners) 2614 || listener->priority >= QTAILQ_LAST(&memory_listeners, 2615 memory_listeners)->priority) { 2616 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); 2617 } else { 2618 QTAILQ_FOREACH(other, &memory_listeners, link) { 2619 if (listener->priority < other->priority) { 2620 break; 2621 } 2622 } 2623 QTAILQ_INSERT_BEFORE(other, listener, link); 2624 } 2625 2626 if (QTAILQ_EMPTY(&as->listeners) 2627 || listener->priority >= QTAILQ_LAST(&as->listeners, 2628 memory_listeners)->priority) { 2629 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); 2630 } else { 2631 QTAILQ_FOREACH(other, &as->listeners, link_as) { 2632 if (listener->priority < other->priority) { 2633 break; 2634 } 2635 } 2636 QTAILQ_INSERT_BEFORE(other, listener, link_as); 2637 } 2638 2639 listener_add_address_space(listener, as); 2640} 2641 2642void memory_listener_unregister(MemoryListener *listener) 2643{ 2644 if (!listener->address_space) { 2645 return; 2646 } 2647 2648 listener_del_address_space(listener, listener->address_space); 2649 QTAILQ_REMOVE(&memory_listeners, listener, link); 2650 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); 2651 listener->address_space = NULL; 2652} 2653 2654bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr) 2655{ 2656 void *host; 2657 unsigned size = 0; 2658 unsigned offset = 0; 2659 Object *new_interface; 2660 2661 if (!mr || !mr->ops->request_ptr) { 2662 return false; 2663 } 2664 2665 /* 2666 * Avoid an update if the request_ptr call 2667 * memory_region_invalidate_mmio_ptr which seems to be likely when we use 2668 * a cache. 2669 */ 2670 memory_region_transaction_begin(); 2671 2672 host = mr->ops->request_ptr(mr->opaque, addr - mr->addr, &size, &offset); 2673 2674 if (!host || !size) { 2675 memory_region_transaction_commit(); 2676 return false; 2677 } 2678 2679 new_interface = object_new("mmio_interface"); 2680 qdev_prop_set_uint64(DEVICE(new_interface), "start", offset); 2681 qdev_prop_set_uint64(DEVICE(new_interface), "end", offset + size - 1); 2682 qdev_prop_set_bit(DEVICE(new_interface), "ro", true); 2683 qdev_prop_set_ptr(DEVICE(new_interface), "host_ptr", host); 2684 qdev_prop_set_ptr(DEVICE(new_interface), "subregion", mr); 2685 object_property_set_bool(OBJECT(new_interface), true, "realized", NULL); 2686 2687 memory_region_transaction_commit(); 2688 return true; 2689} 2690 2691typedef struct MMIOPtrInvalidate { 2692 MemoryRegion *mr; 2693 hwaddr offset; 2694 unsigned size; 2695 int busy; 2696 int allocated; 2697} MMIOPtrInvalidate; 2698 2699#define MAX_MMIO_INVALIDATE 10 2700static MMIOPtrInvalidate mmio_ptr_invalidate_list[MAX_MMIO_INVALIDATE]; 2701 2702static void memory_region_do_invalidate_mmio_ptr(CPUState *cpu, 2703 run_on_cpu_data data) 2704{ 2705 MMIOPtrInvalidate *invalidate_data = (MMIOPtrInvalidate *)data.host_ptr; 2706 MemoryRegion *mr = invalidate_data->mr; 2707 hwaddr offset = invalidate_data->offset; 2708 unsigned size = invalidate_data->size; 2709 MemoryRegionSection section = memory_region_find(mr, offset, size); 2710 2711 qemu_mutex_lock_iothread(); 2712 2713 /* Reset dirty so this doesn't happen later. */ 2714 cpu_physical_memory_test_and_clear_dirty(offset, size, 1); 2715 2716 if (section.mr != mr) { 2717 /* memory_region_find add a ref on section.mr */ 2718 memory_region_unref(section.mr); 2719 if (MMIO_INTERFACE(section.mr->owner)) { 2720 /* We found the interface just drop it. */ 2721 object_property_set_bool(section.mr->owner, false, "realized", 2722 NULL); 2723 object_unref(section.mr->owner); 2724 object_unparent(section.mr->owner); 2725 } 2726 } 2727 2728 qemu_mutex_unlock_iothread(); 2729 2730 if (invalidate_data->allocated) { 2731 g_free(invalidate_data); 2732 } else { 2733 invalidate_data->busy = 0; 2734 } 2735} 2736 2737void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset, 2738 unsigned size) 2739{ 2740 size_t i; 2741 MMIOPtrInvalidate *invalidate_data = NULL; 2742 2743 for (i = 0; i < MAX_MMIO_INVALIDATE; i++) { 2744 if (atomic_cmpxchg(&(mmio_ptr_invalidate_list[i].busy), 0, 1) == 0) { 2745 invalidate_data = &mmio_ptr_invalidate_list[i]; 2746 break; 2747 } 2748 } 2749 2750 if (!invalidate_data) { 2751 invalidate_data = g_malloc0(sizeof(MMIOPtrInvalidate)); 2752 invalidate_data->allocated = 1; 2753 } 2754 2755 invalidate_data->mr = mr; 2756 invalidate_data->offset = offset; 2757 invalidate_data->size = size; 2758 2759 async_safe_run_on_cpu(first_cpu, memory_region_do_invalidate_mmio_ptr, 2760 RUN_ON_CPU_HOST_PTR(invalidate_data)); 2761} 2762 2763void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) 2764{ 2765 memory_region_ref(root); 2766 as->root = root; 2767 as->current_map = NULL; 2768 as->ioeventfd_nb = 0; 2769 as->ioeventfds = NULL; 2770 QTAILQ_INIT(&as->listeners); 2771 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); 2772 as->name = g_strdup(name ? name : "anonymous"); 2773 address_space_update_topology(as); 2774 address_space_update_ioeventfds(as); 2775} 2776 2777static void do_address_space_destroy(AddressSpace *as) 2778{ 2779 assert(QTAILQ_EMPTY(&as->listeners)); 2780 2781 flatview_unref(as->current_map); 2782 g_free(as->name); 2783 g_free(as->ioeventfds); 2784 memory_region_unref(as->root); 2785} 2786 2787void address_space_destroy(AddressSpace *as) 2788{ 2789 MemoryRegion *root = as->root; 2790 2791 /* Flush out anything from MemoryListeners listening in on this */ 2792 memory_region_transaction_begin(); 2793 as->root = NULL; 2794 memory_region_transaction_commit(); 2795 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); 2796 2797 /* At this point, as->dispatch and as->current_map are dummy 2798 * entries that the guest should never use. Wait for the old 2799 * values to expire before freeing the data. 2800 */ 2801 as->root = root; 2802 call_rcu(as, do_address_space_destroy, rcu); 2803} 2804 2805static const char *memory_region_type(MemoryRegion *mr) 2806{ 2807 if (memory_region_is_ram_device(mr)) { 2808 return "ramd"; 2809 } else if (memory_region_is_romd(mr)) { 2810 return "romd"; 2811 } else if (memory_region_is_rom(mr)) { 2812 return "rom"; 2813 } else if (memory_region_is_ram(mr)) { 2814 return "ram"; 2815 } else { 2816 return "i/o"; 2817 } 2818} 2819 2820typedef struct MemoryRegionList MemoryRegionList; 2821 2822struct MemoryRegionList { 2823 const MemoryRegion *mr; 2824 QTAILQ_ENTRY(MemoryRegionList) mrqueue; 2825}; 2826 2827typedef QTAILQ_HEAD(mrqueue, MemoryRegionList) MemoryRegionListHead; 2828 2829#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ 2830 int128_sub((size), int128_one())) : 0) 2831#define MTREE_INDENT " " 2832 2833static void mtree_print_mr(fprintf_function mon_printf, void *f, 2834 const MemoryRegion *mr, unsigned int level, 2835 hwaddr base, 2836 MemoryRegionListHead *alias_print_queue) 2837{ 2838 MemoryRegionList *new_ml, *ml, *next_ml; 2839 MemoryRegionListHead submr_print_queue; 2840 const MemoryRegion *submr; 2841 unsigned int i; 2842 hwaddr cur_start, cur_end; 2843 2844 if (!mr) { 2845 return; 2846 } 2847 2848 for (i = 0; i < level; i++) { 2849 mon_printf(f, MTREE_INDENT); 2850 } 2851 2852 cur_start = base + mr->addr; 2853 cur_end = cur_start + MR_SIZE(mr->size); 2854 2855 /* 2856 * Try to detect overflow of memory region. This should never 2857 * happen normally. When it happens, we dump something to warn the 2858 * user who is observing this. 2859 */ 2860 if (cur_start < base || cur_end < cur_start) { 2861 mon_printf(f, "[DETECTED OVERFLOW!] "); 2862 } 2863 2864 if (mr->alias) { 2865 MemoryRegionList *ml; 2866 bool found = false; 2867 2868 /* check if the alias is already in the queue */ 2869 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) { 2870 if (ml->mr == mr->alias) { 2871 found = true; 2872 } 2873 } 2874 2875 if (!found) { 2876 ml = g_new(MemoryRegionList, 1); 2877 ml->mr = mr->alias; 2878 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue); 2879 } 2880 mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx 2881 " (prio %d, %s): alias %s @%s " TARGET_FMT_plx 2882 "-" TARGET_FMT_plx "%s\n", 2883 cur_start, cur_end, 2884 mr->priority, 2885 memory_region_type((MemoryRegion *)mr), 2886 memory_region_name(mr), 2887 memory_region_name(mr->alias), 2888 mr->alias_offset, 2889 mr->alias_offset + MR_SIZE(mr->size), 2890 mr->enabled ? "" : " [disabled]"); 2891 } else { 2892 mon_printf(f, 2893 TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %s): %s%s\n", 2894 cur_start, cur_end, 2895 mr->priority, 2896 memory_region_type((MemoryRegion *)mr), 2897 memory_region_name(mr), 2898 mr->enabled ? "" : " [disabled]"); 2899 } 2900 2901 QTAILQ_INIT(&submr_print_queue); 2902 2903 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { 2904 new_ml = g_new(MemoryRegionList, 1); 2905 new_ml->mr = submr; 2906 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { 2907 if (new_ml->mr->addr < ml->mr->addr || 2908 (new_ml->mr->addr == ml->mr->addr && 2909 new_ml->mr->priority > ml->mr->priority)) { 2910 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue); 2911 new_ml = NULL; 2912 break; 2913 } 2914 } 2915 if (new_ml) { 2916 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue); 2917 } 2918 } 2919 2920 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { 2921 mtree_print_mr(mon_printf, f, ml->mr, level + 1, cur_start, 2922 alias_print_queue); 2923 } 2924 2925 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) { 2926 g_free(ml); 2927 } 2928} 2929 2930struct FlatViewInfo { 2931 fprintf_function mon_printf; 2932 void *f; 2933 int counter; 2934 bool dispatch_tree; 2935}; 2936 2937static void mtree_print_flatview(gpointer key, gpointer value, 2938 gpointer user_data) 2939{ 2940 FlatView *view = key; 2941 GArray *fv_address_spaces = value; 2942 struct FlatViewInfo *fvi = user_data; 2943 fprintf_function p = fvi->mon_printf; 2944 void *f = fvi->f; 2945 FlatRange *range = &view->ranges[0]; 2946 MemoryRegion *mr; 2947 int n = view->nr; 2948 int i; 2949 AddressSpace *as; 2950 2951 p(f, "FlatView #%d\n", fvi->counter); 2952 ++fvi->counter; 2953 2954 for (i = 0; i < fv_address_spaces->len; ++i) { 2955 as = g_array_index(fv_address_spaces, AddressSpace*, i); 2956 p(f, " AS \"%s\", root: %s", as->name, memory_region_name(as->root)); 2957 if (as->root->alias) { 2958 p(f, ", alias %s", memory_region_name(as->root->alias)); 2959 } 2960 p(f, "\n"); 2961 } 2962 2963 p(f, " Root memory region: %s\n", 2964 view->root ? memory_region_name(view->root) : "(none)"); 2965 2966 if (n <= 0) { 2967 p(f, MTREE_INDENT "No rendered FlatView\n\n"); 2968 return; 2969 } 2970 2971 while (n--) { 2972 mr = range->mr; 2973 if (range->offset_in_region) { 2974 p(f, MTREE_INDENT TARGET_FMT_plx "-" 2975 TARGET_FMT_plx " (prio %d, %s): %s @" TARGET_FMT_plx "\n", 2976 int128_get64(range->addr.start), 2977 int128_get64(range->addr.start) + MR_SIZE(range->addr.size), 2978 mr->priority, 2979 range->readonly ? "rom" : memory_region_type(mr), 2980 memory_region_name(mr), 2981 range->offset_in_region); 2982 } else { 2983 p(f, MTREE_INDENT TARGET_FMT_plx "-" 2984 TARGET_FMT_plx " (prio %d, %s): %s\n", 2985 int128_get64(range->addr.start), 2986 int128_get64(range->addr.start) + MR_SIZE(range->addr.size), 2987 mr->priority, 2988 range->readonly ? "rom" : memory_region_type(mr), 2989 memory_region_name(mr)); 2990 } 2991 range++; 2992 } 2993 2994#if !defined(CONFIG_USER_ONLY) 2995 if (fvi->dispatch_tree && view->root) { 2996 mtree_print_dispatch(p, f, view->dispatch, view->root); 2997 } 2998#endif 2999 3000 p(f, "\n"); 3001} 3002 3003static gboolean mtree_info_flatview_free(gpointer key, gpointer value, 3004 gpointer user_data) 3005{ 3006 FlatView *view = key; 3007 GArray *fv_address_spaces = value; 3008 3009 g_array_unref(fv_address_spaces); 3010 flatview_unref(view); 3011 3012 return true; 3013} 3014 3015void mtree_info(fprintf_function mon_printf, void *f, bool flatview, 3016 bool dispatch_tree) 3017{ 3018 MemoryRegionListHead ml_head; 3019 MemoryRegionList *ml, *ml2; 3020 AddressSpace *as; 3021 3022 if (flatview) { 3023 FlatView *view; 3024 struct FlatViewInfo fvi = { 3025 .mon_printf = mon_printf, 3026 .f = f, 3027 .counter = 0, 3028 .dispatch_tree = dispatch_tree 3029 }; 3030 GArray *fv_address_spaces; 3031 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); 3032 3033 /* Gather all FVs in one table */ 3034 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 3035 view = address_space_get_flatview(as); 3036 3037 fv_address_spaces = g_hash_table_lookup(views, view); 3038 if (!fv_address_spaces) { 3039 fv_address_spaces = g_array_new(false, false, sizeof(as)); 3040 g_hash_table_insert(views, view, fv_address_spaces); 3041 } 3042 3043 g_array_append_val(fv_address_spaces, as); 3044 } 3045 3046 /* Print */ 3047 g_hash_table_foreach(views, mtree_print_flatview, &fvi); 3048 3049 /* Free */ 3050 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0); 3051 g_hash_table_unref(views); 3052 3053 return; 3054 } 3055 3056 QTAILQ_INIT(&ml_head); 3057 3058 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 3059 mon_printf(f, "address-space: %s\n", as->name); 3060 mtree_print_mr(mon_printf, f, as->root, 1, 0, &ml_head); 3061 mon_printf(f, "\n"); 3062 } 3063 3064 /* print aliased regions */ 3065 QTAILQ_FOREACH(ml, &ml_head, mrqueue) { 3066 mon_printf(f, "memory-region: %s\n", memory_region_name(ml->mr)); 3067 mtree_print_mr(mon_printf, f, ml->mr, 1, 0, &ml_head); 3068 mon_printf(f, "\n"); 3069 } 3070 3071 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) { 3072 g_free(ml); 3073 } 3074} 3075 3076void memory_region_init_ram(MemoryRegion *mr, 3077 struct Object *owner, 3078 const char *name, 3079 uint64_t size, 3080 Error **errp) 3081{ 3082 DeviceState *owner_dev; 3083 Error *err = NULL; 3084 3085 memory_region_init_ram_nomigrate(mr, owner, name, size, &err); 3086 if (err) { 3087 error_propagate(errp, err); 3088 return; 3089 } 3090 /* This will assert if owner is neither NULL nor a DeviceState. 3091 * We only want the owner here for the purposes of defining a 3092 * unique name for migration. TODO: Ideally we should implement 3093 * a naming scheme for Objects which are not DeviceStates, in 3094 * which case we can relax this restriction. 3095 */ 3096 owner_dev = DEVICE(owner); 3097 vmstate_register_ram(mr, owner_dev); 3098} 3099 3100void memory_region_init_rom(MemoryRegion *mr, 3101 struct Object *owner, 3102 const char *name, 3103 uint64_t size, 3104 Error **errp) 3105{ 3106 DeviceState *owner_dev; 3107 Error *err = NULL; 3108 3109 memory_region_init_rom_nomigrate(mr, owner, name, size, &err); 3110 if (err) { 3111 error_propagate(errp, err); 3112 return; 3113 } 3114 /* This will assert if owner is neither NULL nor a DeviceState. 3115 * We only want the owner here for the purposes of defining a 3116 * unique name for migration. TODO: Ideally we should implement 3117 * a naming scheme for Objects which are not DeviceStates, in 3118 * which case we can relax this restriction. 3119 */ 3120 owner_dev = DEVICE(owner); 3121 vmstate_register_ram(mr, owner_dev); 3122} 3123 3124void memory_region_init_rom_device(MemoryRegion *mr, 3125 struct Object *owner, 3126 const MemoryRegionOps *ops, 3127 void *opaque, 3128 const char *name, 3129 uint64_t size, 3130 Error **errp) 3131{ 3132 DeviceState *owner_dev; 3133 Error *err = NULL; 3134 3135 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, 3136 name, size, &err); 3137 if (err) { 3138 error_propagate(errp, err); 3139 return; 3140 } 3141 /* This will assert if owner is neither NULL nor a DeviceState. 3142 * We only want the owner here for the purposes of defining a 3143 * unique name for migration. TODO: Ideally we should implement 3144 * a naming scheme for Objects which are not DeviceStates, in 3145 * which case we can relax this restriction. 3146 */ 3147 owner_dev = DEVICE(owner); 3148 vmstate_register_ram(mr, owner_dev); 3149} 3150 3151static const TypeInfo memory_region_info = { 3152 .parent = TYPE_OBJECT, 3153 .name = TYPE_MEMORY_REGION, 3154 .instance_size = sizeof(MemoryRegion), 3155 .instance_init = memory_region_initfn, 3156 .instance_finalize = memory_region_finalize, 3157}; 3158 3159static const TypeInfo iommu_memory_region_info = { 3160 .parent = TYPE_MEMORY_REGION, 3161 .name = TYPE_IOMMU_MEMORY_REGION, 3162 .class_size = sizeof(IOMMUMemoryRegionClass), 3163 .instance_size = sizeof(IOMMUMemoryRegion), 3164 .instance_init = iommu_memory_region_initfn, 3165 .abstract = true, 3166}; 3167 3168static void memory_register_types(void) 3169{ 3170 type_register_static(&memory_region_info); 3171 type_register_static(&iommu_memory_region_info); 3172} 3173 3174type_init(memory_register_types)