qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio
at jcs-vmm 3248 lines 102 kB view raw
1/* 2 * Physical memory management 3 * 4 * Copyright 2011 Red Hat, Inc. and/or its affiliates 5 * 6 * Authors: 7 * Avi Kivity <avi@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 * Contributions after 2012-01-13 are licensed under the terms of the 13 * GNU GPL, version 2 or (at your option) any later version. 14 */ 15 16#include "qemu/osdep.h" 17#include "qapi/error.h" 18#include "cpu.h" 19#include "exec/memory.h" 20#include "exec/address-spaces.h" 21#include "qapi/visitor.h" 22#include "qemu/bitops.h" 23#include "qemu/error-report.h" 24#include "qemu/main-loop.h" 25#include "qemu/qemu-print.h" 26#include "qom/object.h" 27#include "trace-root.h" 28 29#include "exec/memory-internal.h" 30#include "exec/ram_addr.h" 31#include "sysemu/kvm.h" 32#include "sysemu/runstate.h" 33#include "sysemu/tcg.h" 34#include "sysemu/accel.h" 35#include "hw/boards.h" 36#include "migration/vmstate.h" 37 38//#define DEBUG_UNASSIGNED 39 40static unsigned memory_region_transaction_depth; 41static bool memory_region_update_pending; 42static bool ioeventfd_update_pending; 43bool global_dirty_log; 44 45static QTAILQ_HEAD(, MemoryListener) memory_listeners 46 = QTAILQ_HEAD_INITIALIZER(memory_listeners); 47 48static QTAILQ_HEAD(, AddressSpace) address_spaces 49 = QTAILQ_HEAD_INITIALIZER(address_spaces); 50 51static GHashTable *flat_views; 52 53typedef struct AddrRange AddrRange; 54 55/* 56 * Note that signed integers are needed for negative offsetting in aliases 57 * (large MemoryRegion::alias_offset). 58 */ 59struct AddrRange { 60 Int128 start; 61 Int128 size; 62}; 63 64static AddrRange addrrange_make(Int128 start, Int128 size) 65{ 66 return (AddrRange) { start, size }; 67} 68 69static bool addrrange_equal(AddrRange r1, AddrRange r2) 70{ 71 return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size); 72} 73 74static Int128 addrrange_end(AddrRange r) 75{ 76 return int128_add(r.start, r.size); 77} 78 79static AddrRange addrrange_shift(AddrRange range, Int128 delta) 80{ 81 int128_addto(&range.start, delta); 82 return range; 83} 84 85static bool addrrange_contains(AddrRange range, Int128 addr) 86{ 87 return int128_ge(addr, range.start) 88 && int128_lt(addr, addrrange_end(range)); 89} 90 91static bool addrrange_intersects(AddrRange r1, AddrRange r2) 92{ 93 return addrrange_contains(r1, r2.start) 94 || addrrange_contains(r2, r1.start); 95} 96 97static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2) 98{ 99 Int128 start = int128_max(r1.start, r2.start); 100 Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2)); 101 return addrrange_make(start, int128_sub(end, start)); 102} 103 104enum ListenerDirection { Forward, Reverse }; 105 106#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...) \ 107 do { \ 108 MemoryListener *_listener; \ 109 \ 110 switch (_direction) { \ 111 case Forward: \ 112 QTAILQ_FOREACH(_listener, &memory_listeners, link) { \ 113 if (_listener->_callback) { \ 114 _listener->_callback(_listener, ##_args); \ 115 } \ 116 } \ 117 break; \ 118 case Reverse: \ 119 QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners, link) { \ 120 if (_listener->_callback) { \ 121 _listener->_callback(_listener, ##_args); \ 122 } \ 123 } \ 124 break; \ 125 default: \ 126 abort(); \ 127 } \ 128 } while (0) 129 130#define MEMORY_LISTENER_CALL(_as, _callback, _direction, _section, _args...) \ 131 do { \ 132 MemoryListener *_listener; \ 133 \ 134 switch (_direction) { \ 135 case Forward: \ 136 QTAILQ_FOREACH(_listener, &(_as)->listeners, link_as) { \ 137 if (_listener->_callback) { \ 138 _listener->_callback(_listener, _section, ##_args); \ 139 } \ 140 } \ 141 break; \ 142 case Reverse: \ 143 QTAILQ_FOREACH_REVERSE(_listener, &(_as)->listeners, link_as) { \ 144 if (_listener->_callback) { \ 145 _listener->_callback(_listener, _section, ##_args); \ 146 } \ 147 } \ 148 break; \ 149 default: \ 150 abort(); \ 151 } \ 152 } while (0) 153 154/* No need to ref/unref .mr, the FlatRange keeps it alive. */ 155#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ 156 do { \ 157 MemoryRegionSection mrs = section_from_flat_range(fr, \ 158 address_space_to_flatview(as)); \ 159 MEMORY_LISTENER_CALL(as, callback, dir, &mrs, ##_args); \ 160 } while(0) 161 162struct CoalescedMemoryRange { 163 AddrRange addr; 164 QTAILQ_ENTRY(CoalescedMemoryRange) link; 165}; 166 167struct MemoryRegionIoeventfd { 168 AddrRange addr; 169 bool match_data; 170 uint64_t data; 171 EventNotifier *e; 172}; 173 174static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd *a, 175 MemoryRegionIoeventfd *b) 176{ 177 if (int128_lt(a->addr.start, b->addr.start)) { 178 return true; 179 } else if (int128_gt(a->addr.start, b->addr.start)) { 180 return false; 181 } else if (int128_lt(a->addr.size, b->addr.size)) { 182 return true; 183 } else if (int128_gt(a->addr.size, b->addr.size)) { 184 return false; 185 } else if (a->match_data < b->match_data) { 186 return true; 187 } else if (a->match_data > b->match_data) { 188 return false; 189 } else if (a->match_data) { 190 if (a->data < b->data) { 191 return true; 192 } else if (a->data > b->data) { 193 return false; 194 } 195 } 196 if (a->e < b->e) { 197 return true; 198 } else if (a->e > b->e) { 199 return false; 200 } 201 return false; 202} 203 204static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd *a, 205 MemoryRegionIoeventfd *b) 206{ 207 return !memory_region_ioeventfd_before(a, b) 208 && !memory_region_ioeventfd_before(b, a); 209} 210 211/* Range of memory in the global map. Addresses are absolute. */ 212struct FlatRange { 213 MemoryRegion *mr; 214 hwaddr offset_in_region; 215 AddrRange addr; 216 uint8_t dirty_log_mask; 217 bool romd_mode; 218 bool readonly; 219 bool nonvolatile; 220}; 221 222#define FOR_EACH_FLAT_RANGE(var, view) \ 223 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) 224 225static inline MemoryRegionSection 226section_from_flat_range(FlatRange *fr, FlatView *fv) 227{ 228 return (MemoryRegionSection) { 229 .mr = fr->mr, 230 .fv = fv, 231 .offset_within_region = fr->offset_in_region, 232 .size = fr->addr.size, 233 .offset_within_address_space = int128_get64(fr->addr.start), 234 .readonly = fr->readonly, 235 .nonvolatile = fr->nonvolatile, 236 }; 237} 238 239static bool flatrange_equal(FlatRange *a, FlatRange *b) 240{ 241 return a->mr == b->mr 242 && addrrange_equal(a->addr, b->addr) 243 && a->offset_in_region == b->offset_in_region 244 && a->romd_mode == b->romd_mode 245 && a->readonly == b->readonly 246 && a->nonvolatile == b->nonvolatile; 247} 248 249static FlatView *flatview_new(MemoryRegion *mr_root) 250{ 251 FlatView *view; 252 253 view = g_new0(FlatView, 1); 254 view->ref = 1; 255 view->root = mr_root; 256 memory_region_ref(mr_root); 257 trace_flatview_new(view, mr_root); 258 259 return view; 260} 261 262/* Insert a range into a given position. Caller is responsible for maintaining 263 * sorting order. 264 */ 265static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range) 266{ 267 if (view->nr == view->nr_allocated) { 268 view->nr_allocated = MAX(2 * view->nr, 10); 269 view->ranges = g_realloc(view->ranges, 270 view->nr_allocated * sizeof(*view->ranges)); 271 } 272 memmove(view->ranges + pos + 1, view->ranges + pos, 273 (view->nr - pos) * sizeof(FlatRange)); 274 view->ranges[pos] = *range; 275 memory_region_ref(range->mr); 276 ++view->nr; 277} 278 279static void flatview_destroy(FlatView *view) 280{ 281 int i; 282 283 trace_flatview_destroy(view, view->root); 284 if (view->dispatch) { 285 address_space_dispatch_free(view->dispatch); 286 } 287 for (i = 0; i < view->nr; i++) { 288 memory_region_unref(view->ranges[i].mr); 289 } 290 g_free(view->ranges); 291 memory_region_unref(view->root); 292 g_free(view); 293} 294 295static bool flatview_ref(FlatView *view) 296{ 297 return atomic_fetch_inc_nonzero(&view->ref) > 0; 298} 299 300void flatview_unref(FlatView *view) 301{ 302 if (atomic_fetch_dec(&view->ref) == 1) { 303 trace_flatview_destroy_rcu(view, view->root); 304 assert(view->root); 305 call_rcu(view, flatview_destroy, rcu); 306 } 307} 308 309static bool can_merge(FlatRange *r1, FlatRange *r2) 310{ 311 return int128_eq(addrrange_end(r1->addr), r2->addr.start) 312 && r1->mr == r2->mr 313 && int128_eq(int128_add(int128_make64(r1->offset_in_region), 314 r1->addr.size), 315 int128_make64(r2->offset_in_region)) 316 && r1->dirty_log_mask == r2->dirty_log_mask 317 && r1->romd_mode == r2->romd_mode 318 && r1->readonly == r2->readonly 319 && r1->nonvolatile == r2->nonvolatile; 320} 321 322/* Attempt to simplify a view by merging adjacent ranges */ 323static void flatview_simplify(FlatView *view) 324{ 325 unsigned i, j, k; 326 327 i = 0; 328 while (i < view->nr) { 329 j = i + 1; 330 while (j < view->nr 331 && can_merge(&view->ranges[j-1], &view->ranges[j])) { 332 int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size); 333 ++j; 334 } 335 ++i; 336 for (k = i; k < j; k++) { 337 memory_region_unref(view->ranges[k].mr); 338 } 339 memmove(&view->ranges[i], &view->ranges[j], 340 (view->nr - j) * sizeof(view->ranges[j])); 341 view->nr -= j - i; 342 } 343} 344 345static bool memory_region_big_endian(MemoryRegion *mr) 346{ 347#ifdef TARGET_WORDS_BIGENDIAN 348 return mr->ops->endianness != DEVICE_LITTLE_ENDIAN; 349#else 350 return mr->ops->endianness == DEVICE_BIG_ENDIAN; 351#endif 352} 353 354static void adjust_endianness(MemoryRegion *mr, uint64_t *data, MemOp op) 355{ 356 if ((op & MO_BSWAP) != devend_memop(mr->ops->endianness)) { 357 switch (op & MO_SIZE) { 358 case MO_8: 359 break; 360 case MO_16: 361 *data = bswap16(*data); 362 break; 363 case MO_32: 364 *data = bswap32(*data); 365 break; 366 case MO_64: 367 *data = bswap64(*data); 368 break; 369 default: 370 g_assert_not_reached(); 371 } 372 } 373} 374 375static inline void memory_region_shift_read_access(uint64_t *value, 376 signed shift, 377 uint64_t mask, 378 uint64_t tmp) 379{ 380 if (shift >= 0) { 381 *value |= (tmp & mask) << shift; 382 } else { 383 *value |= (tmp & mask) >> -shift; 384 } 385} 386 387static inline uint64_t memory_region_shift_write_access(uint64_t *value, 388 signed shift, 389 uint64_t mask) 390{ 391 uint64_t tmp; 392 393 if (shift >= 0) { 394 tmp = (*value >> shift) & mask; 395 } else { 396 tmp = (*value << -shift) & mask; 397 } 398 399 return tmp; 400} 401 402static hwaddr memory_region_to_absolute_addr(MemoryRegion *mr, hwaddr offset) 403{ 404 MemoryRegion *root; 405 hwaddr abs_addr = offset; 406 407 abs_addr += mr->addr; 408 for (root = mr; root->container; ) { 409 root = root->container; 410 abs_addr += root->addr; 411 } 412 413 return abs_addr; 414} 415 416static int get_cpu_index(void) 417{ 418 if (current_cpu) { 419 return current_cpu->cpu_index; 420 } 421 return -1; 422} 423 424static MemTxResult memory_region_read_accessor(MemoryRegion *mr, 425 hwaddr addr, 426 uint64_t *value, 427 unsigned size, 428 signed shift, 429 uint64_t mask, 430 MemTxAttrs attrs) 431{ 432 uint64_t tmp; 433 434 tmp = mr->ops->read(mr->opaque, addr, size); 435 if (mr->subpage) { 436 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); 437 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) { 438 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 439 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); 440 } 441 memory_region_shift_read_access(value, shift, mask, tmp); 442 return MEMTX_OK; 443} 444 445static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr, 446 hwaddr addr, 447 uint64_t *value, 448 unsigned size, 449 signed shift, 450 uint64_t mask, 451 MemTxAttrs attrs) 452{ 453 uint64_t tmp = 0; 454 MemTxResult r; 455 456 r = mr->ops->read_with_attrs(mr->opaque, addr, &tmp, size, attrs); 457 if (mr->subpage) { 458 trace_memory_region_subpage_read(get_cpu_index(), mr, addr, tmp, size); 459 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_READ)) { 460 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 461 trace_memory_region_ops_read(get_cpu_index(), mr, abs_addr, tmp, size); 462 } 463 memory_region_shift_read_access(value, shift, mask, tmp); 464 return r; 465} 466 467static MemTxResult memory_region_write_accessor(MemoryRegion *mr, 468 hwaddr addr, 469 uint64_t *value, 470 unsigned size, 471 signed shift, 472 uint64_t mask, 473 MemTxAttrs attrs) 474{ 475 uint64_t tmp = memory_region_shift_write_access(value, shift, mask); 476 477 if (mr->subpage) { 478 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); 479 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) { 480 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 481 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); 482 } 483 mr->ops->write(mr->opaque, addr, tmp, size); 484 return MEMTX_OK; 485} 486 487static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr, 488 hwaddr addr, 489 uint64_t *value, 490 unsigned size, 491 signed shift, 492 uint64_t mask, 493 MemTxAttrs attrs) 494{ 495 uint64_t tmp = memory_region_shift_write_access(value, shift, mask); 496 497 if (mr->subpage) { 498 trace_memory_region_subpage_write(get_cpu_index(), mr, addr, tmp, size); 499 } else if (trace_event_get_state_backends(TRACE_MEMORY_REGION_OPS_WRITE)) { 500 hwaddr abs_addr = memory_region_to_absolute_addr(mr, addr); 501 trace_memory_region_ops_write(get_cpu_index(), mr, abs_addr, tmp, size); 502 } 503 return mr->ops->write_with_attrs(mr->opaque, addr, tmp, size, attrs); 504} 505 506static MemTxResult access_with_adjusted_size(hwaddr addr, 507 uint64_t *value, 508 unsigned size, 509 unsigned access_size_min, 510 unsigned access_size_max, 511 MemTxResult (*access_fn) 512 (MemoryRegion *mr, 513 hwaddr addr, 514 uint64_t *value, 515 unsigned size, 516 signed shift, 517 uint64_t mask, 518 MemTxAttrs attrs), 519 MemoryRegion *mr, 520 MemTxAttrs attrs) 521{ 522 uint64_t access_mask; 523 unsigned access_size; 524 unsigned i; 525 MemTxResult r = MEMTX_OK; 526 527 if (!access_size_min) { 528 access_size_min = 1; 529 } 530 if (!access_size_max) { 531 access_size_max = 4; 532 } 533 534 /* FIXME: support unaligned access? */ 535 access_size = MAX(MIN(size, access_size_max), access_size_min); 536 access_mask = MAKE_64BIT_MASK(0, access_size * 8); 537 if (memory_region_big_endian(mr)) { 538 for (i = 0; i < size; i += access_size) { 539 r |= access_fn(mr, addr + i, value, access_size, 540 (size - access_size - i) * 8, access_mask, attrs); 541 } 542 } else { 543 for (i = 0; i < size; i += access_size) { 544 r |= access_fn(mr, addr + i, value, access_size, i * 8, 545 access_mask, attrs); 546 } 547 } 548 return r; 549} 550 551static AddressSpace *memory_region_to_address_space(MemoryRegion *mr) 552{ 553 AddressSpace *as; 554 555 while (mr->container) { 556 mr = mr->container; 557 } 558 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 559 if (mr == as->root) { 560 return as; 561 } 562 } 563 return NULL; 564} 565 566/* Render a memory region into the global view. Ranges in @view obscure 567 * ranges in @mr. 568 */ 569static void render_memory_region(FlatView *view, 570 MemoryRegion *mr, 571 Int128 base, 572 AddrRange clip, 573 bool readonly, 574 bool nonvolatile) 575{ 576 MemoryRegion *subregion; 577 unsigned i; 578 hwaddr offset_in_region; 579 Int128 remain; 580 Int128 now; 581 FlatRange fr; 582 AddrRange tmp; 583 584 if (!mr->enabled) { 585 return; 586 } 587 588 int128_addto(&base, int128_make64(mr->addr)); 589 readonly |= mr->readonly; 590 nonvolatile |= mr->nonvolatile; 591 592 tmp = addrrange_make(base, mr->size); 593 594 if (!addrrange_intersects(tmp, clip)) { 595 return; 596 } 597 598 clip = addrrange_intersection(tmp, clip); 599 600 if (mr->alias) { 601 int128_subfrom(&base, int128_make64(mr->alias->addr)); 602 int128_subfrom(&base, int128_make64(mr->alias_offset)); 603 render_memory_region(view, mr->alias, base, clip, 604 readonly, nonvolatile); 605 return; 606 } 607 608 /* Render subregions in priority order. */ 609 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) { 610 render_memory_region(view, subregion, base, clip, 611 readonly, nonvolatile); 612 } 613 614 if (!mr->terminates) { 615 return; 616 } 617 618 offset_in_region = int128_get64(int128_sub(clip.start, base)); 619 base = clip.start; 620 remain = clip.size; 621 622 fr.mr = mr; 623 fr.dirty_log_mask = memory_region_get_dirty_log_mask(mr); 624 fr.romd_mode = mr->romd_mode; 625 fr.readonly = readonly; 626 fr.nonvolatile = nonvolatile; 627 628 /* Render the region itself into any gaps left by the current view. */ 629 for (i = 0; i < view->nr && int128_nz(remain); ++i) { 630 if (int128_ge(base, addrrange_end(view->ranges[i].addr))) { 631 continue; 632 } 633 if (int128_lt(base, view->ranges[i].addr.start)) { 634 now = int128_min(remain, 635 int128_sub(view->ranges[i].addr.start, base)); 636 fr.offset_in_region = offset_in_region; 637 fr.addr = addrrange_make(base, now); 638 flatview_insert(view, i, &fr); 639 ++i; 640 int128_addto(&base, now); 641 offset_in_region += int128_get64(now); 642 int128_subfrom(&remain, now); 643 } 644 now = int128_sub(int128_min(int128_add(base, remain), 645 addrrange_end(view->ranges[i].addr)), 646 base); 647 int128_addto(&base, now); 648 offset_in_region += int128_get64(now); 649 int128_subfrom(&remain, now); 650 } 651 if (int128_nz(remain)) { 652 fr.offset_in_region = offset_in_region; 653 fr.addr = addrrange_make(base, remain); 654 flatview_insert(view, i, &fr); 655 } 656} 657 658static MemoryRegion *memory_region_get_flatview_root(MemoryRegion *mr) 659{ 660 while (mr->enabled) { 661 if (mr->alias) { 662 if (!mr->alias_offset && int128_ge(mr->size, mr->alias->size)) { 663 /* The alias is included in its entirety. Use it as 664 * the "real" root, so that we can share more FlatViews. 665 */ 666 mr = mr->alias; 667 continue; 668 } 669 } else if (!mr->terminates) { 670 unsigned int found = 0; 671 MemoryRegion *child, *next = NULL; 672 QTAILQ_FOREACH(child, &mr->subregions, subregions_link) { 673 if (child->enabled) { 674 if (++found > 1) { 675 next = NULL; 676 break; 677 } 678 if (!child->addr && int128_ge(mr->size, child->size)) { 679 /* A child is included in its entirety. If it's the only 680 * enabled one, use it in the hope of finding an alias down the 681 * way. This will also let us share FlatViews. 682 */ 683 next = child; 684 } 685 } 686 } 687 if (found == 0) { 688 return NULL; 689 } 690 if (next) { 691 mr = next; 692 continue; 693 } 694 } 695 696 return mr; 697 } 698 699 return NULL; 700} 701 702/* Render a memory topology into a list of disjoint absolute ranges. */ 703static FlatView *generate_memory_topology(MemoryRegion *mr) 704{ 705 int i; 706 FlatView *view; 707 708 view = flatview_new(mr); 709 710 if (mr) { 711 render_memory_region(view, mr, int128_zero(), 712 addrrange_make(int128_zero(), int128_2_64()), 713 false, false); 714 } 715 flatview_simplify(view); 716 717 view->dispatch = address_space_dispatch_new(view); 718 for (i = 0; i < view->nr; i++) { 719 MemoryRegionSection mrs = 720 section_from_flat_range(&view->ranges[i], view); 721 flatview_add_to_dispatch(view, &mrs); 722 } 723 address_space_dispatch_compact(view->dispatch); 724 g_hash_table_replace(flat_views, mr, view); 725 726 return view; 727} 728 729static void address_space_add_del_ioeventfds(AddressSpace *as, 730 MemoryRegionIoeventfd *fds_new, 731 unsigned fds_new_nb, 732 MemoryRegionIoeventfd *fds_old, 733 unsigned fds_old_nb) 734{ 735 unsigned iold, inew; 736 MemoryRegionIoeventfd *fd; 737 MemoryRegionSection section; 738 739 /* Generate a symmetric difference of the old and new fd sets, adding 740 * and deleting as necessary. 741 */ 742 743 iold = inew = 0; 744 while (iold < fds_old_nb || inew < fds_new_nb) { 745 if (iold < fds_old_nb 746 && (inew == fds_new_nb 747 || memory_region_ioeventfd_before(&fds_old[iold], 748 &fds_new[inew]))) { 749 fd = &fds_old[iold]; 750 section = (MemoryRegionSection) { 751 .fv = address_space_to_flatview(as), 752 .offset_within_address_space = int128_get64(fd->addr.start), 753 .size = fd->addr.size, 754 }; 755 MEMORY_LISTENER_CALL(as, eventfd_del, Forward, &section, 756 fd->match_data, fd->data, fd->e); 757 ++iold; 758 } else if (inew < fds_new_nb 759 && (iold == fds_old_nb 760 || memory_region_ioeventfd_before(&fds_new[inew], 761 &fds_old[iold]))) { 762 fd = &fds_new[inew]; 763 section = (MemoryRegionSection) { 764 .fv = address_space_to_flatview(as), 765 .offset_within_address_space = int128_get64(fd->addr.start), 766 .size = fd->addr.size, 767 }; 768 MEMORY_LISTENER_CALL(as, eventfd_add, Reverse, &section, 769 fd->match_data, fd->data, fd->e); 770 ++inew; 771 } else { 772 ++iold; 773 ++inew; 774 } 775 } 776} 777 778FlatView *address_space_get_flatview(AddressSpace *as) 779{ 780 FlatView *view; 781 782 RCU_READ_LOCK_GUARD(); 783 do { 784 view = address_space_to_flatview(as); 785 /* If somebody has replaced as->current_map concurrently, 786 * flatview_ref returns false. 787 */ 788 } while (!flatview_ref(view)); 789 return view; 790} 791 792static void address_space_update_ioeventfds(AddressSpace *as) 793{ 794 FlatView *view; 795 FlatRange *fr; 796 unsigned ioeventfd_nb = 0; 797 unsigned ioeventfd_max; 798 MemoryRegionIoeventfd *ioeventfds; 799 AddrRange tmp; 800 unsigned i; 801 802 /* 803 * It is likely that the number of ioeventfds hasn't changed much, so use 804 * the previous size as the starting value, with some headroom to avoid 805 * gratuitous reallocations. 806 */ 807 ioeventfd_max = QEMU_ALIGN_UP(as->ioeventfd_nb, 4); 808 ioeventfds = g_new(MemoryRegionIoeventfd, ioeventfd_max); 809 810 view = address_space_get_flatview(as); 811 FOR_EACH_FLAT_RANGE(fr, view) { 812 for (i = 0; i < fr->mr->ioeventfd_nb; ++i) { 813 tmp = addrrange_shift(fr->mr->ioeventfds[i].addr, 814 int128_sub(fr->addr.start, 815 int128_make64(fr->offset_in_region))); 816 if (addrrange_intersects(fr->addr, tmp)) { 817 ++ioeventfd_nb; 818 if (ioeventfd_nb > ioeventfd_max) { 819 ioeventfd_max = MAX(ioeventfd_max * 2, 4); 820 ioeventfds = g_realloc(ioeventfds, 821 ioeventfd_max * sizeof(*ioeventfds)); 822 } 823 ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i]; 824 ioeventfds[ioeventfd_nb-1].addr = tmp; 825 } 826 } 827 } 828 829 address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb, 830 as->ioeventfds, as->ioeventfd_nb); 831 832 g_free(as->ioeventfds); 833 as->ioeventfds = ioeventfds; 834 as->ioeventfd_nb = ioeventfd_nb; 835 flatview_unref(view); 836} 837 838/* 839 * Notify the memory listeners about the coalesced IO change events of 840 * range `cmr'. Only the part that has intersection of the specified 841 * FlatRange will be sent. 842 */ 843static void flat_range_coalesced_io_notify(FlatRange *fr, AddressSpace *as, 844 CoalescedMemoryRange *cmr, bool add) 845{ 846 AddrRange tmp; 847 848 tmp = addrrange_shift(cmr->addr, 849 int128_sub(fr->addr.start, 850 int128_make64(fr->offset_in_region))); 851 if (!addrrange_intersects(tmp, fr->addr)) { 852 return; 853 } 854 tmp = addrrange_intersection(tmp, fr->addr); 855 856 if (add) { 857 MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, coalesced_io_add, 858 int128_get64(tmp.start), 859 int128_get64(tmp.size)); 860 } else { 861 MEMORY_LISTENER_UPDATE_REGION(fr, as, Reverse, coalesced_io_del, 862 int128_get64(tmp.start), 863 int128_get64(tmp.size)); 864 } 865} 866 867static void flat_range_coalesced_io_del(FlatRange *fr, AddressSpace *as) 868{ 869 CoalescedMemoryRange *cmr; 870 871 QTAILQ_FOREACH(cmr, &fr->mr->coalesced, link) { 872 flat_range_coalesced_io_notify(fr, as, cmr, false); 873 } 874} 875 876static void flat_range_coalesced_io_add(FlatRange *fr, AddressSpace *as) 877{ 878 MemoryRegion *mr = fr->mr; 879 CoalescedMemoryRange *cmr; 880 881 if (QTAILQ_EMPTY(&mr->coalesced)) { 882 return; 883 } 884 885 QTAILQ_FOREACH(cmr, &mr->coalesced, link) { 886 flat_range_coalesced_io_notify(fr, as, cmr, true); 887 } 888} 889 890static void address_space_update_topology_pass(AddressSpace *as, 891 const FlatView *old_view, 892 const FlatView *new_view, 893 bool adding) 894{ 895 unsigned iold, inew; 896 FlatRange *frold, *frnew; 897 898 /* Generate a symmetric difference of the old and new memory maps. 899 * Kill ranges in the old map, and instantiate ranges in the new map. 900 */ 901 iold = inew = 0; 902 while (iold < old_view->nr || inew < new_view->nr) { 903 if (iold < old_view->nr) { 904 frold = &old_view->ranges[iold]; 905 } else { 906 frold = NULL; 907 } 908 if (inew < new_view->nr) { 909 frnew = &new_view->ranges[inew]; 910 } else { 911 frnew = NULL; 912 } 913 914 if (frold 915 && (!frnew 916 || int128_lt(frold->addr.start, frnew->addr.start) 917 || (int128_eq(frold->addr.start, frnew->addr.start) 918 && !flatrange_equal(frold, frnew)))) { 919 /* In old but not in new, or in both but attributes changed. */ 920 921 if (!adding) { 922 flat_range_coalesced_io_del(frold, as); 923 MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del); 924 } 925 926 ++iold; 927 } else if (frold && frnew && flatrange_equal(frold, frnew)) { 928 /* In both and unchanged (except logging may have changed) */ 929 930 if (adding) { 931 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop); 932 if (frnew->dirty_log_mask & ~frold->dirty_log_mask) { 933 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start, 934 frold->dirty_log_mask, 935 frnew->dirty_log_mask); 936 } 937 if (frold->dirty_log_mask & ~frnew->dirty_log_mask) { 938 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop, 939 frold->dirty_log_mask, 940 frnew->dirty_log_mask); 941 } 942 } 943 944 ++iold; 945 ++inew; 946 } else { 947 /* In new */ 948 949 if (adding) { 950 MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add); 951 flat_range_coalesced_io_add(frnew, as); 952 } 953 954 ++inew; 955 } 956 } 957} 958 959static void flatviews_init(void) 960{ 961 static FlatView *empty_view; 962 963 if (flat_views) { 964 return; 965 } 966 967 flat_views = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, 968 (GDestroyNotify) flatview_unref); 969 if (!empty_view) { 970 empty_view = generate_memory_topology(NULL); 971 /* We keep it alive forever in the global variable. */ 972 flatview_ref(empty_view); 973 } else { 974 g_hash_table_replace(flat_views, NULL, empty_view); 975 flatview_ref(empty_view); 976 } 977} 978 979static void flatviews_reset(void) 980{ 981 AddressSpace *as; 982 983 if (flat_views) { 984 g_hash_table_unref(flat_views); 985 flat_views = NULL; 986 } 987 flatviews_init(); 988 989 /* Render unique FVs */ 990 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 991 MemoryRegion *physmr = memory_region_get_flatview_root(as->root); 992 993 if (g_hash_table_lookup(flat_views, physmr)) { 994 continue; 995 } 996 997 generate_memory_topology(physmr); 998 } 999} 1000 1001static void address_space_set_flatview(AddressSpace *as) 1002{ 1003 FlatView *old_view = address_space_to_flatview(as); 1004 MemoryRegion *physmr = memory_region_get_flatview_root(as->root); 1005 FlatView *new_view = g_hash_table_lookup(flat_views, physmr); 1006 1007 assert(new_view); 1008 1009 if (old_view == new_view) { 1010 return; 1011 } 1012 1013 if (old_view) { 1014 flatview_ref(old_view); 1015 } 1016 1017 flatview_ref(new_view); 1018 1019 if (!QTAILQ_EMPTY(&as->listeners)) { 1020 FlatView tmpview = { .nr = 0 }, *old_view2 = old_view; 1021 1022 if (!old_view2) { 1023 old_view2 = &tmpview; 1024 } 1025 address_space_update_topology_pass(as, old_view2, new_view, false); 1026 address_space_update_topology_pass(as, old_view2, new_view, true); 1027 } 1028 1029 /* Writes are protected by the BQL. */ 1030 atomic_rcu_set(&as->current_map, new_view); 1031 if (old_view) { 1032 flatview_unref(old_view); 1033 } 1034 1035 /* Note that all the old MemoryRegions are still alive up to this 1036 * point. This relieves most MemoryListeners from the need to 1037 * ref/unref the MemoryRegions they get---unless they use them 1038 * outside the iothread mutex, in which case precise reference 1039 * counting is necessary. 1040 */ 1041 if (old_view) { 1042 flatview_unref(old_view); 1043 } 1044} 1045 1046static void address_space_update_topology(AddressSpace *as) 1047{ 1048 MemoryRegion *physmr = memory_region_get_flatview_root(as->root); 1049 1050 flatviews_init(); 1051 if (!g_hash_table_lookup(flat_views, physmr)) { 1052 generate_memory_topology(physmr); 1053 } 1054 address_space_set_flatview(as); 1055} 1056 1057void memory_region_transaction_begin(void) 1058{ 1059 qemu_flush_coalesced_mmio_buffer(); 1060 ++memory_region_transaction_depth; 1061} 1062 1063void memory_region_transaction_commit(void) 1064{ 1065 AddressSpace *as; 1066 1067 assert(memory_region_transaction_depth); 1068 assert(qemu_mutex_iothread_locked()); 1069 1070 --memory_region_transaction_depth; 1071 if (!memory_region_transaction_depth) { 1072 if (memory_region_update_pending) { 1073 flatviews_reset(); 1074 1075 MEMORY_LISTENER_CALL_GLOBAL(begin, Forward); 1076 1077 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 1078 address_space_set_flatview(as); 1079 address_space_update_ioeventfds(as); 1080 } 1081 memory_region_update_pending = false; 1082 ioeventfd_update_pending = false; 1083 MEMORY_LISTENER_CALL_GLOBAL(commit, Forward); 1084 } else if (ioeventfd_update_pending) { 1085 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 1086 address_space_update_ioeventfds(as); 1087 } 1088 ioeventfd_update_pending = false; 1089 } 1090 } 1091} 1092 1093static void memory_region_destructor_none(MemoryRegion *mr) 1094{ 1095} 1096 1097static void memory_region_destructor_ram(MemoryRegion *mr) 1098{ 1099 qemu_ram_free(mr->ram_block); 1100} 1101 1102static bool memory_region_need_escape(char c) 1103{ 1104 return c == '/' || c == '[' || c == '\\' || c == ']'; 1105} 1106 1107static char *memory_region_escape_name(const char *name) 1108{ 1109 const char *p; 1110 char *escaped, *q; 1111 uint8_t c; 1112 size_t bytes = 0; 1113 1114 for (p = name; *p; p++) { 1115 bytes += memory_region_need_escape(*p) ? 4 : 1; 1116 } 1117 if (bytes == p - name) { 1118 return g_memdup(name, bytes + 1); 1119 } 1120 1121 escaped = g_malloc(bytes + 1); 1122 for (p = name, q = escaped; *p; p++) { 1123 c = *p; 1124 if (unlikely(memory_region_need_escape(c))) { 1125 *q++ = '\\'; 1126 *q++ = 'x'; 1127 *q++ = "0123456789abcdef"[c >> 4]; 1128 c = "0123456789abcdef"[c & 15]; 1129 } 1130 *q++ = c; 1131 } 1132 *q = 0; 1133 return escaped; 1134} 1135 1136static void memory_region_do_init(MemoryRegion *mr, 1137 Object *owner, 1138 const char *name, 1139 uint64_t size) 1140{ 1141 mr->size = int128_make64(size); 1142 if (size == UINT64_MAX) { 1143 mr->size = int128_2_64(); 1144 } 1145 mr->name = g_strdup(name); 1146 mr->owner = owner; 1147 mr->ram_block = NULL; 1148 1149 if (name) { 1150 char *escaped_name = memory_region_escape_name(name); 1151 char *name_array = g_strdup_printf("%s[*]", escaped_name); 1152 1153 if (!owner) { 1154 owner = container_get(qdev_get_machine(), "/unattached"); 1155 } 1156 1157 object_property_add_child(owner, name_array, OBJECT(mr), &error_abort); 1158 object_unref(OBJECT(mr)); 1159 g_free(name_array); 1160 g_free(escaped_name); 1161 } 1162} 1163 1164void memory_region_init(MemoryRegion *mr, 1165 Object *owner, 1166 const char *name, 1167 uint64_t size) 1168{ 1169 object_initialize(mr, sizeof(*mr), TYPE_MEMORY_REGION); 1170 memory_region_do_init(mr, owner, name, size); 1171} 1172 1173static void memory_region_get_container(Object *obj, Visitor *v, 1174 const char *name, void *opaque, 1175 Error **errp) 1176{ 1177 MemoryRegion *mr = MEMORY_REGION(obj); 1178 gchar *path = (gchar *)""; 1179 1180 if (mr->container) { 1181 path = object_get_canonical_path(OBJECT(mr->container)); 1182 } 1183 visit_type_str(v, name, &path, errp); 1184 if (mr->container) { 1185 g_free(path); 1186 } 1187} 1188 1189static Object *memory_region_resolve_container(Object *obj, void *opaque, 1190 const char *part) 1191{ 1192 MemoryRegion *mr = MEMORY_REGION(obj); 1193 1194 return OBJECT(mr->container); 1195} 1196 1197static void memory_region_get_priority(Object *obj, Visitor *v, 1198 const char *name, void *opaque, 1199 Error **errp) 1200{ 1201 MemoryRegion *mr = MEMORY_REGION(obj); 1202 int32_t value = mr->priority; 1203 1204 visit_type_int32(v, name, &value, errp); 1205} 1206 1207static void memory_region_get_size(Object *obj, Visitor *v, const char *name, 1208 void *opaque, Error **errp) 1209{ 1210 MemoryRegion *mr = MEMORY_REGION(obj); 1211 uint64_t value = memory_region_size(mr); 1212 1213 visit_type_uint64(v, name, &value, errp); 1214} 1215 1216static void memory_region_initfn(Object *obj) 1217{ 1218 MemoryRegion *mr = MEMORY_REGION(obj); 1219 ObjectProperty *op; 1220 1221 mr->ops = &unassigned_mem_ops; 1222 mr->enabled = true; 1223 mr->romd_mode = true; 1224 mr->global_locking = true; 1225 mr->destructor = memory_region_destructor_none; 1226 QTAILQ_INIT(&mr->subregions); 1227 QTAILQ_INIT(&mr->coalesced); 1228 1229 op = object_property_add(OBJECT(mr), "container", 1230 "link<" TYPE_MEMORY_REGION ">", 1231 memory_region_get_container, 1232 NULL, /* memory_region_set_container */ 1233 NULL, NULL, &error_abort); 1234 op->resolve = memory_region_resolve_container; 1235 1236 object_property_add_uint64_ptr(OBJECT(mr), "addr", 1237 &mr->addr, OBJ_PROP_FLAG_READ, &error_abort); 1238 object_property_add(OBJECT(mr), "priority", "uint32", 1239 memory_region_get_priority, 1240 NULL, /* memory_region_set_priority */ 1241 NULL, NULL, &error_abort); 1242 object_property_add(OBJECT(mr), "size", "uint64", 1243 memory_region_get_size, 1244 NULL, /* memory_region_set_size, */ 1245 NULL, NULL, &error_abort); 1246} 1247 1248static void iommu_memory_region_initfn(Object *obj) 1249{ 1250 MemoryRegion *mr = MEMORY_REGION(obj); 1251 1252 mr->is_iommu = true; 1253} 1254 1255static uint64_t unassigned_mem_read(void *opaque, hwaddr addr, 1256 unsigned size) 1257{ 1258#ifdef DEBUG_UNASSIGNED 1259 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr); 1260#endif 1261 return 0; 1262} 1263 1264static void unassigned_mem_write(void *opaque, hwaddr addr, 1265 uint64_t val, unsigned size) 1266{ 1267#ifdef DEBUG_UNASSIGNED 1268 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val); 1269#endif 1270} 1271 1272static bool unassigned_mem_accepts(void *opaque, hwaddr addr, 1273 unsigned size, bool is_write, 1274 MemTxAttrs attrs) 1275{ 1276 return false; 1277} 1278 1279const MemoryRegionOps unassigned_mem_ops = { 1280 .valid.accepts = unassigned_mem_accepts, 1281 .endianness = DEVICE_NATIVE_ENDIAN, 1282}; 1283 1284static uint64_t memory_region_ram_device_read(void *opaque, 1285 hwaddr addr, unsigned size) 1286{ 1287 MemoryRegion *mr = opaque; 1288 uint64_t data = (uint64_t)~0; 1289 1290 switch (size) { 1291 case 1: 1292 data = *(uint8_t *)(mr->ram_block->host + addr); 1293 break; 1294 case 2: 1295 data = *(uint16_t *)(mr->ram_block->host + addr); 1296 break; 1297 case 4: 1298 data = *(uint32_t *)(mr->ram_block->host + addr); 1299 break; 1300 case 8: 1301 data = *(uint64_t *)(mr->ram_block->host + addr); 1302 break; 1303 } 1304 1305 trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); 1306 1307 return data; 1308} 1309 1310static void memory_region_ram_device_write(void *opaque, hwaddr addr, 1311 uint64_t data, unsigned size) 1312{ 1313 MemoryRegion *mr = opaque; 1314 1315 trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); 1316 1317 switch (size) { 1318 case 1: 1319 *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; 1320 break; 1321 case 2: 1322 *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; 1323 break; 1324 case 4: 1325 *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; 1326 break; 1327 case 8: 1328 *(uint64_t *)(mr->ram_block->host + addr) = data; 1329 break; 1330 } 1331} 1332 1333static const MemoryRegionOps ram_device_mem_ops = { 1334 .read = memory_region_ram_device_read, 1335 .write = memory_region_ram_device_write, 1336 .endianness = DEVICE_HOST_ENDIAN, 1337 .valid = { 1338 .min_access_size = 1, 1339 .max_access_size = 8, 1340 .unaligned = true, 1341 }, 1342 .impl = { 1343 .min_access_size = 1, 1344 .max_access_size = 8, 1345 .unaligned = true, 1346 }, 1347}; 1348 1349bool memory_region_access_valid(MemoryRegion *mr, 1350 hwaddr addr, 1351 unsigned size, 1352 bool is_write, 1353 MemTxAttrs attrs) 1354{ 1355 int access_size_min, access_size_max; 1356 int access_size, i; 1357 1358 if (!mr->ops->valid.unaligned && (addr & (size - 1))) { 1359 return false; 1360 } 1361 1362 if (!mr->ops->valid.accepts) { 1363 return true; 1364 } 1365 1366 access_size_min = mr->ops->valid.min_access_size; 1367 if (!mr->ops->valid.min_access_size) { 1368 access_size_min = 1; 1369 } 1370 1371 access_size_max = mr->ops->valid.max_access_size; 1372 if (!mr->ops->valid.max_access_size) { 1373 access_size_max = 4; 1374 } 1375 1376 access_size = MAX(MIN(size, access_size_max), access_size_min); 1377 for (i = 0; i < size; i += access_size) { 1378 if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size, 1379 is_write, attrs)) { 1380 return false; 1381 } 1382 } 1383 1384 return true; 1385} 1386 1387static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, 1388 hwaddr addr, 1389 uint64_t *pval, 1390 unsigned size, 1391 MemTxAttrs attrs) 1392{ 1393 *pval = 0; 1394 1395 if (mr->ops->read) { 1396 return access_with_adjusted_size(addr, pval, size, 1397 mr->ops->impl.min_access_size, 1398 mr->ops->impl.max_access_size, 1399 memory_region_read_accessor, 1400 mr, attrs); 1401 } else { 1402 return access_with_adjusted_size(addr, pval, size, 1403 mr->ops->impl.min_access_size, 1404 mr->ops->impl.max_access_size, 1405 memory_region_read_with_attrs_accessor, 1406 mr, attrs); 1407 } 1408} 1409 1410MemTxResult memory_region_dispatch_read(MemoryRegion *mr, 1411 hwaddr addr, 1412 uint64_t *pval, 1413 MemOp op, 1414 MemTxAttrs attrs) 1415{ 1416 unsigned size = memop_size(op); 1417 MemTxResult r; 1418 1419 if (!memory_region_access_valid(mr, addr, size, false, attrs)) { 1420 *pval = unassigned_mem_read(mr, addr, size); 1421 return MEMTX_DECODE_ERROR; 1422 } 1423 1424 r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); 1425 adjust_endianness(mr, pval, op); 1426 return r; 1427} 1428 1429/* Return true if an eventfd was signalled */ 1430static bool memory_region_dispatch_write_eventfds(MemoryRegion *mr, 1431 hwaddr addr, 1432 uint64_t data, 1433 unsigned size, 1434 MemTxAttrs attrs) 1435{ 1436 MemoryRegionIoeventfd ioeventfd = { 1437 .addr = addrrange_make(int128_make64(addr), int128_make64(size)), 1438 .data = data, 1439 }; 1440 unsigned i; 1441 1442 for (i = 0; i < mr->ioeventfd_nb; i++) { 1443 ioeventfd.match_data = mr->ioeventfds[i].match_data; 1444 ioeventfd.e = mr->ioeventfds[i].e; 1445 1446 if (memory_region_ioeventfd_equal(&ioeventfd, &mr->ioeventfds[i])) { 1447 event_notifier_set(ioeventfd.e); 1448 return true; 1449 } 1450 } 1451 1452 return false; 1453} 1454 1455MemTxResult memory_region_dispatch_write(MemoryRegion *mr, 1456 hwaddr addr, 1457 uint64_t data, 1458 MemOp op, 1459 MemTxAttrs attrs) 1460{ 1461 unsigned size = memop_size(op); 1462 1463 if (!memory_region_access_valid(mr, addr, size, true, attrs)) { 1464 unassigned_mem_write(mr, addr, data, size); 1465 return MEMTX_DECODE_ERROR; 1466 } 1467 1468 adjust_endianness(mr, &data, op); 1469 1470 if ((!kvm_eventfds_enabled()) && 1471 memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { 1472 return MEMTX_OK; 1473 } 1474 1475 if (mr->ops->write) { 1476 return access_with_adjusted_size(addr, &data, size, 1477 mr->ops->impl.min_access_size, 1478 mr->ops->impl.max_access_size, 1479 memory_region_write_accessor, mr, 1480 attrs); 1481 } else { 1482 return 1483 access_with_adjusted_size(addr, &data, size, 1484 mr->ops->impl.min_access_size, 1485 mr->ops->impl.max_access_size, 1486 memory_region_write_with_attrs_accessor, 1487 mr, attrs); 1488 } 1489} 1490 1491void memory_region_init_io(MemoryRegion *mr, 1492 Object *owner, 1493 const MemoryRegionOps *ops, 1494 void *opaque, 1495 const char *name, 1496 uint64_t size) 1497{ 1498 memory_region_init(mr, owner, name, size); 1499 mr->ops = ops ? ops : &unassigned_mem_ops; 1500 mr->opaque = opaque; 1501 mr->terminates = true; 1502} 1503 1504void memory_region_init_ram_nomigrate(MemoryRegion *mr, 1505 Object *owner, 1506 const char *name, 1507 uint64_t size, 1508 Error **errp) 1509{ 1510 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); 1511} 1512 1513void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, 1514 Object *owner, 1515 const char *name, 1516 uint64_t size, 1517 bool share, 1518 Error **errp) 1519{ 1520 Error *err = NULL; 1521 memory_region_init(mr, owner, name, size); 1522 mr->ram = true; 1523 mr->terminates = true; 1524 mr->destructor = memory_region_destructor_ram; 1525 mr->ram_block = qemu_ram_alloc(size, share, mr, &err); 1526 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1527 if (err) { 1528 mr->size = int128_zero(); 1529 object_unparent(OBJECT(mr)); 1530 error_propagate(errp, err); 1531 } 1532} 1533 1534void memory_region_init_resizeable_ram(MemoryRegion *mr, 1535 Object *owner, 1536 const char *name, 1537 uint64_t size, 1538 uint64_t max_size, 1539 void (*resized)(const char*, 1540 uint64_t length, 1541 void *host), 1542 Error **errp) 1543{ 1544 Error *err = NULL; 1545 memory_region_init(mr, owner, name, size); 1546 mr->ram = true; 1547 mr->terminates = true; 1548 mr->destructor = memory_region_destructor_ram; 1549 mr->ram_block = qemu_ram_alloc_resizeable(size, max_size, resized, 1550 mr, &err); 1551 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1552 if (err) { 1553 mr->size = int128_zero(); 1554 object_unparent(OBJECT(mr)); 1555 error_propagate(errp, err); 1556 } 1557} 1558 1559#ifdef CONFIG_POSIX 1560void memory_region_init_ram_from_file(MemoryRegion *mr, 1561 struct Object *owner, 1562 const char *name, 1563 uint64_t size, 1564 uint64_t align, 1565 uint32_t ram_flags, 1566 const char *path, 1567 Error **errp) 1568{ 1569 Error *err = NULL; 1570 memory_region_init(mr, owner, name, size); 1571 mr->ram = true; 1572 mr->terminates = true; 1573 mr->destructor = memory_region_destructor_ram; 1574 mr->align = align; 1575 mr->ram_block = qemu_ram_alloc_from_file(size, mr, ram_flags, path, &err); 1576 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1577 if (err) { 1578 mr->size = int128_zero(); 1579 object_unparent(OBJECT(mr)); 1580 error_propagate(errp, err); 1581 } 1582} 1583 1584void memory_region_init_ram_from_fd(MemoryRegion *mr, 1585 struct Object *owner, 1586 const char *name, 1587 uint64_t size, 1588 bool share, 1589 int fd, 1590 Error **errp) 1591{ 1592 Error *err = NULL; 1593 memory_region_init(mr, owner, name, size); 1594 mr->ram = true; 1595 mr->terminates = true; 1596 mr->destructor = memory_region_destructor_ram; 1597 mr->ram_block = qemu_ram_alloc_from_fd(size, mr, 1598 share ? RAM_SHARED : 0, 1599 fd, &err); 1600 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1601 if (err) { 1602 mr->size = int128_zero(); 1603 object_unparent(OBJECT(mr)); 1604 error_propagate(errp, err); 1605 } 1606} 1607#endif 1608 1609void memory_region_init_ram_ptr(MemoryRegion *mr, 1610 Object *owner, 1611 const char *name, 1612 uint64_t size, 1613 void *ptr) 1614{ 1615 memory_region_init(mr, owner, name, size); 1616 mr->ram = true; 1617 mr->terminates = true; 1618 mr->destructor = memory_region_destructor_ram; 1619 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1620 1621 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ 1622 assert(ptr != NULL); 1623 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); 1624} 1625 1626void memory_region_init_ram_device_ptr(MemoryRegion *mr, 1627 Object *owner, 1628 const char *name, 1629 uint64_t size, 1630 void *ptr) 1631{ 1632 memory_region_init(mr, owner, name, size); 1633 mr->ram = true; 1634 mr->terminates = true; 1635 mr->ram_device = true; 1636 mr->ops = &ram_device_mem_ops; 1637 mr->opaque = mr; 1638 mr->destructor = memory_region_destructor_ram; 1639 mr->dirty_log_mask = tcg_enabled() ? (1 << DIRTY_MEMORY_CODE) : 0; 1640 /* qemu_ram_alloc_from_ptr cannot fail with ptr != NULL. */ 1641 assert(ptr != NULL); 1642 mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); 1643} 1644 1645void memory_region_init_alias(MemoryRegion *mr, 1646 Object *owner, 1647 const char *name, 1648 MemoryRegion *orig, 1649 hwaddr offset, 1650 uint64_t size) 1651{ 1652 memory_region_init(mr, owner, name, size); 1653 mr->alias = orig; 1654 mr->alias_offset = offset; 1655} 1656 1657void memory_region_init_rom_nomigrate(MemoryRegion *mr, 1658 struct Object *owner, 1659 const char *name, 1660 uint64_t size, 1661 Error **errp) 1662{ 1663 memory_region_init_ram_shared_nomigrate(mr, owner, name, size, false, errp); 1664 mr->readonly = true; 1665} 1666 1667void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, 1668 Object *owner, 1669 const MemoryRegionOps *ops, 1670 void *opaque, 1671 const char *name, 1672 uint64_t size, 1673 Error **errp) 1674{ 1675 Error *err = NULL; 1676 assert(ops); 1677 memory_region_init(mr, owner, name, size); 1678 mr->ops = ops; 1679 mr->opaque = opaque; 1680 mr->terminates = true; 1681 mr->rom_device = true; 1682 mr->destructor = memory_region_destructor_ram; 1683 mr->ram_block = qemu_ram_alloc(size, false, mr, &err); 1684 if (err) { 1685 mr->size = int128_zero(); 1686 object_unparent(OBJECT(mr)); 1687 error_propagate(errp, err); 1688 } 1689} 1690 1691void memory_region_init_iommu(void *_iommu_mr, 1692 size_t instance_size, 1693 const char *mrtypename, 1694 Object *owner, 1695 const char *name, 1696 uint64_t size) 1697{ 1698 struct IOMMUMemoryRegion *iommu_mr; 1699 struct MemoryRegion *mr; 1700 1701 object_initialize(_iommu_mr, instance_size, mrtypename); 1702 mr = MEMORY_REGION(_iommu_mr); 1703 memory_region_do_init(mr, owner, name, size); 1704 iommu_mr = IOMMU_MEMORY_REGION(mr); 1705 mr->terminates = true; /* then re-forwards */ 1706 QLIST_INIT(&iommu_mr->iommu_notify); 1707 iommu_mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; 1708} 1709 1710static void memory_region_finalize(Object *obj) 1711{ 1712 MemoryRegion *mr = MEMORY_REGION(obj); 1713 1714 assert(!mr->container); 1715 1716 /* We know the region is not visible in any address space (it 1717 * does not have a container and cannot be a root either because 1718 * it has no references, so we can blindly clear mr->enabled. 1719 * memory_region_set_enabled instead could trigger a transaction 1720 * and cause an infinite loop. 1721 */ 1722 mr->enabled = false; 1723 memory_region_transaction_begin(); 1724 while (!QTAILQ_EMPTY(&mr->subregions)) { 1725 MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); 1726 memory_region_del_subregion(mr, subregion); 1727 } 1728 memory_region_transaction_commit(); 1729 1730 mr->destructor(mr); 1731 memory_region_clear_coalescing(mr); 1732 g_free((char *)mr->name); 1733 g_free(mr->ioeventfds); 1734} 1735 1736Object *memory_region_owner(MemoryRegion *mr) 1737{ 1738 Object *obj = OBJECT(mr); 1739 return obj->parent; 1740} 1741 1742void memory_region_ref(MemoryRegion *mr) 1743{ 1744 /* MMIO callbacks most likely will access data that belongs 1745 * to the owner, hence the need to ref/unref the owner whenever 1746 * the memory region is in use. 1747 * 1748 * The memory region is a child of its owner. As long as the 1749 * owner doesn't call unparent itself on the memory region, 1750 * ref-ing the owner will also keep the memory region alive. 1751 * Memory regions without an owner are supposed to never go away; 1752 * we do not ref/unref them because it slows down DMA sensibly. 1753 */ 1754 if (mr && mr->owner) { 1755 object_ref(mr->owner); 1756 } 1757} 1758 1759void memory_region_unref(MemoryRegion *mr) 1760{ 1761 if (mr && mr->owner) { 1762 object_unref(mr->owner); 1763 } 1764} 1765 1766uint64_t memory_region_size(MemoryRegion *mr) 1767{ 1768 if (int128_eq(mr->size, int128_2_64())) { 1769 return UINT64_MAX; 1770 } 1771 return int128_get64(mr->size); 1772} 1773 1774const char *memory_region_name(const MemoryRegion *mr) 1775{ 1776 if (!mr->name) { 1777 ((MemoryRegion *)mr)->name = 1778 object_get_canonical_path_component(OBJECT(mr)); 1779 } 1780 return mr->name; 1781} 1782 1783bool memory_region_is_ram_device(MemoryRegion *mr) 1784{ 1785 return mr->ram_device; 1786} 1787 1788uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) 1789{ 1790 uint8_t mask = mr->dirty_log_mask; 1791 if (global_dirty_log && mr->ram_block) { 1792 mask |= (1 << DIRTY_MEMORY_MIGRATION); 1793 } 1794 return mask; 1795} 1796 1797bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) 1798{ 1799 return memory_region_get_dirty_log_mask(mr) & (1 << client); 1800} 1801 1802static int memory_region_update_iommu_notify_flags(IOMMUMemoryRegion *iommu_mr, 1803 Error **errp) 1804{ 1805 IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; 1806 IOMMUNotifier *iommu_notifier; 1807 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1808 int ret = 0; 1809 1810 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { 1811 flags |= iommu_notifier->notifier_flags; 1812 } 1813 1814 if (flags != iommu_mr->iommu_notify_flags && imrc->notify_flag_changed) { 1815 ret = imrc->notify_flag_changed(iommu_mr, 1816 iommu_mr->iommu_notify_flags, 1817 flags, errp); 1818 } 1819 1820 if (!ret) { 1821 iommu_mr->iommu_notify_flags = flags; 1822 } 1823 return ret; 1824} 1825 1826int memory_region_register_iommu_notifier(MemoryRegion *mr, 1827 IOMMUNotifier *n, Error **errp) 1828{ 1829 IOMMUMemoryRegion *iommu_mr; 1830 int ret; 1831 1832 if (mr->alias) { 1833 return memory_region_register_iommu_notifier(mr->alias, n, errp); 1834 } 1835 1836 /* We need to register for at least one bitfield */ 1837 iommu_mr = IOMMU_MEMORY_REGION(mr); 1838 assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); 1839 assert(n->start <= n->end); 1840 assert(n->iommu_idx >= 0 && 1841 n->iommu_idx < memory_region_iommu_num_indexes(iommu_mr)); 1842 1843 QLIST_INSERT_HEAD(&iommu_mr->iommu_notify, n, node); 1844 ret = memory_region_update_iommu_notify_flags(iommu_mr, errp); 1845 if (ret) { 1846 QLIST_REMOVE(n, node); 1847 } 1848 return ret; 1849} 1850 1851uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr) 1852{ 1853 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1854 1855 if (imrc->get_min_page_size) { 1856 return imrc->get_min_page_size(iommu_mr); 1857 } 1858 return TARGET_PAGE_SIZE; 1859} 1860 1861void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) 1862{ 1863 MemoryRegion *mr = MEMORY_REGION(iommu_mr); 1864 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1865 hwaddr addr, granularity; 1866 IOMMUTLBEntry iotlb; 1867 1868 /* If the IOMMU has its own replay callback, override */ 1869 if (imrc->replay) { 1870 imrc->replay(iommu_mr, n); 1871 return; 1872 } 1873 1874 granularity = memory_region_iommu_get_min_page_size(iommu_mr); 1875 1876 for (addr = 0; addr < memory_region_size(mr); addr += granularity) { 1877 iotlb = imrc->translate(iommu_mr, addr, IOMMU_NONE, n->iommu_idx); 1878 if (iotlb.perm != IOMMU_NONE) { 1879 n->notify(n, &iotlb); 1880 } 1881 1882 /* if (2^64 - MR size) < granularity, it's possible to get an 1883 * infinite loop here. This should catch such a wraparound */ 1884 if ((addr + granularity) < addr) { 1885 break; 1886 } 1887 } 1888} 1889 1890void memory_region_unregister_iommu_notifier(MemoryRegion *mr, 1891 IOMMUNotifier *n) 1892{ 1893 IOMMUMemoryRegion *iommu_mr; 1894 1895 if (mr->alias) { 1896 memory_region_unregister_iommu_notifier(mr->alias, n); 1897 return; 1898 } 1899 QLIST_REMOVE(n, node); 1900 iommu_mr = IOMMU_MEMORY_REGION(mr); 1901 memory_region_update_iommu_notify_flags(iommu_mr, NULL); 1902} 1903 1904void memory_region_notify_one(IOMMUNotifier *notifier, 1905 IOMMUTLBEntry *entry) 1906{ 1907 IOMMUNotifierFlag request_flags; 1908 hwaddr entry_end = entry->iova + entry->addr_mask; 1909 1910 /* 1911 * Skip the notification if the notification does not overlap 1912 * with registered range. 1913 */ 1914 if (notifier->start > entry_end || notifier->end < entry->iova) { 1915 return; 1916 } 1917 1918 assert(entry->iova >= notifier->start && entry_end <= notifier->end); 1919 1920 if (entry->perm & IOMMU_RW) { 1921 request_flags = IOMMU_NOTIFIER_MAP; 1922 } else { 1923 request_flags = IOMMU_NOTIFIER_UNMAP; 1924 } 1925 1926 if (notifier->notifier_flags & request_flags) { 1927 notifier->notify(notifier, entry); 1928 } 1929} 1930 1931void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, 1932 int iommu_idx, 1933 IOMMUTLBEntry entry) 1934{ 1935 IOMMUNotifier *iommu_notifier; 1936 1937 assert(memory_region_is_iommu(MEMORY_REGION(iommu_mr))); 1938 1939 IOMMU_NOTIFIER_FOREACH(iommu_notifier, iommu_mr) { 1940 if (iommu_notifier->iommu_idx == iommu_idx) { 1941 memory_region_notify_one(iommu_notifier, &entry); 1942 } 1943 } 1944} 1945 1946int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, 1947 enum IOMMUMemoryRegionAttr attr, 1948 void *data) 1949{ 1950 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1951 1952 if (!imrc->get_attr) { 1953 return -EINVAL; 1954 } 1955 1956 return imrc->get_attr(iommu_mr, attr, data); 1957} 1958 1959int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, 1960 MemTxAttrs attrs) 1961{ 1962 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1963 1964 if (!imrc->attrs_to_index) { 1965 return 0; 1966 } 1967 1968 return imrc->attrs_to_index(iommu_mr, attrs); 1969} 1970 1971int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr) 1972{ 1973 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_GET_CLASS(iommu_mr); 1974 1975 if (!imrc->num_indexes) { 1976 return 1; 1977 } 1978 1979 return imrc->num_indexes(iommu_mr); 1980} 1981 1982void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) 1983{ 1984 uint8_t mask = 1 << client; 1985 uint8_t old_logging; 1986 1987 assert(client == DIRTY_MEMORY_VGA); 1988 old_logging = mr->vga_logging_count; 1989 mr->vga_logging_count += log ? 1 : -1; 1990 if (!!old_logging == !!mr->vga_logging_count) { 1991 return; 1992 } 1993 1994 memory_region_transaction_begin(); 1995 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask); 1996 memory_region_update_pending |= mr->enabled; 1997 memory_region_transaction_commit(); 1998} 1999 2000void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, 2001 hwaddr size) 2002{ 2003 assert(mr->ram_block); 2004 cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, 2005 size, 2006 memory_region_get_dirty_log_mask(mr)); 2007} 2008 2009static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) 2010{ 2011 MemoryListener *listener; 2012 AddressSpace *as; 2013 FlatView *view; 2014 FlatRange *fr; 2015 2016 /* If the same address space has multiple log_sync listeners, we 2017 * visit that address space's FlatView multiple times. But because 2018 * log_sync listeners are rare, it's still cheaper than walking each 2019 * address space once. 2020 */ 2021 QTAILQ_FOREACH(listener, &memory_listeners, link) { 2022 if (!listener->log_sync) { 2023 continue; 2024 } 2025 as = listener->address_space; 2026 view = address_space_get_flatview(as); 2027 FOR_EACH_FLAT_RANGE(fr, view) { 2028 if (fr->dirty_log_mask && (!mr || fr->mr == mr)) { 2029 MemoryRegionSection mrs = section_from_flat_range(fr, view); 2030 listener->log_sync(listener, &mrs); 2031 } 2032 } 2033 flatview_unref(view); 2034 } 2035} 2036 2037void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, 2038 hwaddr len) 2039{ 2040 MemoryRegionSection mrs; 2041 MemoryListener *listener; 2042 AddressSpace *as; 2043 FlatView *view; 2044 FlatRange *fr; 2045 hwaddr sec_start, sec_end, sec_size; 2046 2047 QTAILQ_FOREACH(listener, &memory_listeners, link) { 2048 if (!listener->log_clear) { 2049 continue; 2050 } 2051 as = listener->address_space; 2052 view = address_space_get_flatview(as); 2053 FOR_EACH_FLAT_RANGE(fr, view) { 2054 if (!fr->dirty_log_mask || fr->mr != mr) { 2055 /* 2056 * Clear dirty bitmap operation only applies to those 2057 * regions whose dirty logging is at least enabled 2058 */ 2059 continue; 2060 } 2061 2062 mrs = section_from_flat_range(fr, view); 2063 2064 sec_start = MAX(mrs.offset_within_region, start); 2065 sec_end = mrs.offset_within_region + int128_get64(mrs.size); 2066 sec_end = MIN(sec_end, start + len); 2067 2068 if (sec_start >= sec_end) { 2069 /* 2070 * If this memory region section has no intersection 2071 * with the requested range, skip. 2072 */ 2073 continue; 2074 } 2075 2076 /* Valid case; shrink the section if needed */ 2077 mrs.offset_within_address_space += 2078 sec_start - mrs.offset_within_region; 2079 mrs.offset_within_region = sec_start; 2080 sec_size = sec_end - sec_start; 2081 mrs.size = int128_make64(sec_size); 2082 listener->log_clear(listener, &mrs); 2083 } 2084 flatview_unref(view); 2085 } 2086} 2087 2088DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, 2089 hwaddr addr, 2090 hwaddr size, 2091 unsigned client) 2092{ 2093 DirtyBitmapSnapshot *snapshot; 2094 assert(mr->ram_block); 2095 memory_region_sync_dirty_bitmap(mr); 2096 snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); 2097 memory_global_after_dirty_log_sync(); 2098 return snapshot; 2099} 2100 2101bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap, 2102 hwaddr addr, hwaddr size) 2103{ 2104 assert(mr->ram_block); 2105 return cpu_physical_memory_snapshot_get_dirty(snap, 2106 memory_region_get_ram_addr(mr) + addr, size); 2107} 2108 2109void memory_region_set_readonly(MemoryRegion *mr, bool readonly) 2110{ 2111 if (mr->readonly != readonly) { 2112 memory_region_transaction_begin(); 2113 mr->readonly = readonly; 2114 memory_region_update_pending |= mr->enabled; 2115 memory_region_transaction_commit(); 2116 } 2117} 2118 2119void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile) 2120{ 2121 if (mr->nonvolatile != nonvolatile) { 2122 memory_region_transaction_begin(); 2123 mr->nonvolatile = nonvolatile; 2124 memory_region_update_pending |= mr->enabled; 2125 memory_region_transaction_commit(); 2126 } 2127} 2128 2129void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode) 2130{ 2131 if (mr->romd_mode != romd_mode) { 2132 memory_region_transaction_begin(); 2133 mr->romd_mode = romd_mode; 2134 memory_region_update_pending |= mr->enabled; 2135 memory_region_transaction_commit(); 2136 } 2137} 2138 2139void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, 2140 hwaddr size, unsigned client) 2141{ 2142 assert(mr->ram_block); 2143 cpu_physical_memory_test_and_clear_dirty( 2144 memory_region_get_ram_addr(mr) + addr, size, client); 2145} 2146 2147int memory_region_get_fd(MemoryRegion *mr) 2148{ 2149 int fd; 2150 2151 RCU_READ_LOCK_GUARD(); 2152 while (mr->alias) { 2153 mr = mr->alias; 2154 } 2155 fd = mr->ram_block->fd; 2156 2157 return fd; 2158} 2159 2160void *memory_region_get_ram_ptr(MemoryRegion *mr) 2161{ 2162 void *ptr; 2163 uint64_t offset = 0; 2164 2165 RCU_READ_LOCK_GUARD(); 2166 while (mr->alias) { 2167 offset += mr->alias_offset; 2168 mr = mr->alias; 2169 } 2170 assert(mr->ram_block); 2171 ptr = qemu_map_ram_ptr(mr->ram_block, offset); 2172 2173 return ptr; 2174} 2175 2176MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset) 2177{ 2178 RAMBlock *block; 2179 2180 block = qemu_ram_block_from_host(ptr, false, offset); 2181 if (!block) { 2182 return NULL; 2183 } 2184 2185 return block->mr; 2186} 2187 2188ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr) 2189{ 2190 return mr->ram_block ? mr->ram_block->offset : RAM_ADDR_INVALID; 2191} 2192 2193void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, Error **errp) 2194{ 2195 assert(mr->ram_block); 2196 2197 qemu_ram_resize(mr->ram_block, newsize, errp); 2198} 2199 2200 2201void memory_region_do_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size) 2202{ 2203 /* 2204 * Might be extended case needed to cover 2205 * different types of memory regions 2206 */ 2207 if (mr->ram_block && mr->dirty_log_mask) { 2208 qemu_ram_writeback(mr->ram_block, addr, size); 2209 } 2210} 2211 2212/* 2213 * Call proper memory listeners about the change on the newly 2214 * added/removed CoalescedMemoryRange. 2215 */ 2216static void memory_region_update_coalesced_range(MemoryRegion *mr, 2217 CoalescedMemoryRange *cmr, 2218 bool add) 2219{ 2220 AddressSpace *as; 2221 FlatView *view; 2222 FlatRange *fr; 2223 2224 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 2225 view = address_space_get_flatview(as); 2226 FOR_EACH_FLAT_RANGE(fr, view) { 2227 if (fr->mr == mr) { 2228 flat_range_coalesced_io_notify(fr, as, cmr, add); 2229 } 2230 } 2231 flatview_unref(view); 2232 } 2233} 2234 2235void memory_region_set_coalescing(MemoryRegion *mr) 2236{ 2237 memory_region_clear_coalescing(mr); 2238 memory_region_add_coalescing(mr, 0, int128_get64(mr->size)); 2239} 2240 2241void memory_region_add_coalescing(MemoryRegion *mr, 2242 hwaddr offset, 2243 uint64_t size) 2244{ 2245 CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr)); 2246 2247 cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size)); 2248 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link); 2249 memory_region_update_coalesced_range(mr, cmr, true); 2250 memory_region_set_flush_coalesced(mr); 2251} 2252 2253void memory_region_clear_coalescing(MemoryRegion *mr) 2254{ 2255 CoalescedMemoryRange *cmr; 2256 2257 if (QTAILQ_EMPTY(&mr->coalesced)) { 2258 return; 2259 } 2260 2261 qemu_flush_coalesced_mmio_buffer(); 2262 mr->flush_coalesced_mmio = false; 2263 2264 while (!QTAILQ_EMPTY(&mr->coalesced)) { 2265 cmr = QTAILQ_FIRST(&mr->coalesced); 2266 QTAILQ_REMOVE(&mr->coalesced, cmr, link); 2267 memory_region_update_coalesced_range(mr, cmr, false); 2268 g_free(cmr); 2269 } 2270} 2271 2272void memory_region_set_flush_coalesced(MemoryRegion *mr) 2273{ 2274 mr->flush_coalesced_mmio = true; 2275} 2276 2277void memory_region_clear_flush_coalesced(MemoryRegion *mr) 2278{ 2279 qemu_flush_coalesced_mmio_buffer(); 2280 if (QTAILQ_EMPTY(&mr->coalesced)) { 2281 mr->flush_coalesced_mmio = false; 2282 } 2283} 2284 2285void memory_region_clear_global_locking(MemoryRegion *mr) 2286{ 2287 mr->global_locking = false; 2288} 2289 2290static bool userspace_eventfd_warning; 2291 2292void memory_region_add_eventfd(MemoryRegion *mr, 2293 hwaddr addr, 2294 unsigned size, 2295 bool match_data, 2296 uint64_t data, 2297 EventNotifier *e) 2298{ 2299 MemoryRegionIoeventfd mrfd = { 2300 .addr.start = int128_make64(addr), 2301 .addr.size = int128_make64(size), 2302 .match_data = match_data, 2303 .data = data, 2304 .e = e, 2305 }; 2306 unsigned i; 2307 2308 if (kvm_enabled() && (!(kvm_eventfds_enabled() || 2309 userspace_eventfd_warning))) { 2310 userspace_eventfd_warning = true; 2311 error_report("Using eventfd without MMIO binding in KVM. " 2312 "Suboptimal performance expected"); 2313 } 2314 2315 if (size) { 2316 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); 2317 } 2318 memory_region_transaction_begin(); 2319 for (i = 0; i < mr->ioeventfd_nb; ++i) { 2320 if (memory_region_ioeventfd_before(&mrfd, &mr->ioeventfds[i])) { 2321 break; 2322 } 2323 } 2324 ++mr->ioeventfd_nb; 2325 mr->ioeventfds = g_realloc(mr->ioeventfds, 2326 sizeof(*mr->ioeventfds) * mr->ioeventfd_nb); 2327 memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i], 2328 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i)); 2329 mr->ioeventfds[i] = mrfd; 2330 ioeventfd_update_pending |= mr->enabled; 2331 memory_region_transaction_commit(); 2332} 2333 2334void memory_region_del_eventfd(MemoryRegion *mr, 2335 hwaddr addr, 2336 unsigned size, 2337 bool match_data, 2338 uint64_t data, 2339 EventNotifier *e) 2340{ 2341 MemoryRegionIoeventfd mrfd = { 2342 .addr.start = int128_make64(addr), 2343 .addr.size = int128_make64(size), 2344 .match_data = match_data, 2345 .data = data, 2346 .e = e, 2347 }; 2348 unsigned i; 2349 2350 if (size) { 2351 adjust_endianness(mr, &mrfd.data, size_memop(size) | MO_TE); 2352 } 2353 memory_region_transaction_begin(); 2354 for (i = 0; i < mr->ioeventfd_nb; ++i) { 2355 if (memory_region_ioeventfd_equal(&mrfd, &mr->ioeventfds[i])) { 2356 break; 2357 } 2358 } 2359 assert(i != mr->ioeventfd_nb); 2360 memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1], 2361 sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1))); 2362 --mr->ioeventfd_nb; 2363 mr->ioeventfds = g_realloc(mr->ioeventfds, 2364 sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1); 2365 ioeventfd_update_pending |= mr->enabled; 2366 memory_region_transaction_commit(); 2367} 2368 2369static void memory_region_update_container_subregions(MemoryRegion *subregion) 2370{ 2371 MemoryRegion *mr = subregion->container; 2372 MemoryRegion *other; 2373 2374 memory_region_transaction_begin(); 2375 2376 memory_region_ref(subregion); 2377 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { 2378 if (subregion->priority >= other->priority) { 2379 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); 2380 goto done; 2381 } 2382 } 2383 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link); 2384done: 2385 memory_region_update_pending |= mr->enabled && subregion->enabled; 2386 memory_region_transaction_commit(); 2387} 2388 2389static void memory_region_add_subregion_common(MemoryRegion *mr, 2390 hwaddr offset, 2391 MemoryRegion *subregion) 2392{ 2393 assert(!subregion->container); 2394 subregion->container = mr; 2395 subregion->addr = offset; 2396 memory_region_update_container_subregions(subregion); 2397} 2398 2399void memory_region_add_subregion(MemoryRegion *mr, 2400 hwaddr offset, 2401 MemoryRegion *subregion) 2402{ 2403 subregion->priority = 0; 2404 memory_region_add_subregion_common(mr, offset, subregion); 2405} 2406 2407void memory_region_add_subregion_overlap(MemoryRegion *mr, 2408 hwaddr offset, 2409 MemoryRegion *subregion, 2410 int priority) 2411{ 2412 subregion->priority = priority; 2413 memory_region_add_subregion_common(mr, offset, subregion); 2414} 2415 2416void memory_region_del_subregion(MemoryRegion *mr, 2417 MemoryRegion *subregion) 2418{ 2419 memory_region_transaction_begin(); 2420 assert(subregion->container == mr); 2421 subregion->container = NULL; 2422 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); 2423 memory_region_unref(subregion); 2424 memory_region_update_pending |= mr->enabled && subregion->enabled; 2425 memory_region_transaction_commit(); 2426} 2427 2428void memory_region_set_enabled(MemoryRegion *mr, bool enabled) 2429{ 2430 if (enabled == mr->enabled) { 2431 return; 2432 } 2433 memory_region_transaction_begin(); 2434 mr->enabled = enabled; 2435 memory_region_update_pending = true; 2436 memory_region_transaction_commit(); 2437} 2438 2439void memory_region_set_size(MemoryRegion *mr, uint64_t size) 2440{ 2441 Int128 s = int128_make64(size); 2442 2443 if (size == UINT64_MAX) { 2444 s = int128_2_64(); 2445 } 2446 if (int128_eq(s, mr->size)) { 2447 return; 2448 } 2449 memory_region_transaction_begin(); 2450 mr->size = s; 2451 memory_region_update_pending = true; 2452 memory_region_transaction_commit(); 2453} 2454 2455static void memory_region_readd_subregion(MemoryRegion *mr) 2456{ 2457 MemoryRegion *container = mr->container; 2458 2459 if (container) { 2460 memory_region_transaction_begin(); 2461 memory_region_ref(mr); 2462 memory_region_del_subregion(container, mr); 2463 mr->container = container; 2464 memory_region_update_container_subregions(mr); 2465 memory_region_unref(mr); 2466 memory_region_transaction_commit(); 2467 } 2468} 2469 2470void memory_region_set_address(MemoryRegion *mr, hwaddr addr) 2471{ 2472 if (addr != mr->addr) { 2473 mr->addr = addr; 2474 memory_region_readd_subregion(mr); 2475 } 2476} 2477 2478void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset) 2479{ 2480 assert(mr->alias); 2481 2482 if (offset == mr->alias_offset) { 2483 return; 2484 } 2485 2486 memory_region_transaction_begin(); 2487 mr->alias_offset = offset; 2488 memory_region_update_pending |= mr->enabled; 2489 memory_region_transaction_commit(); 2490} 2491 2492uint64_t memory_region_get_alignment(const MemoryRegion *mr) 2493{ 2494 return mr->align; 2495} 2496 2497static int cmp_flatrange_addr(const void *addr_, const void *fr_) 2498{ 2499 const AddrRange *addr = addr_; 2500 const FlatRange *fr = fr_; 2501 2502 if (int128_le(addrrange_end(*addr), fr->addr.start)) { 2503 return -1; 2504 } else if (int128_ge(addr->start, addrrange_end(fr->addr))) { 2505 return 1; 2506 } 2507 return 0; 2508} 2509 2510static FlatRange *flatview_lookup(FlatView *view, AddrRange addr) 2511{ 2512 return bsearch(&addr, view->ranges, view->nr, 2513 sizeof(FlatRange), cmp_flatrange_addr); 2514} 2515 2516bool memory_region_is_mapped(MemoryRegion *mr) 2517{ 2518 return mr->container ? true : false; 2519} 2520 2521/* Same as memory_region_find, but it does not add a reference to the 2522 * returned region. It must be called from an RCU critical section. 2523 */ 2524static MemoryRegionSection memory_region_find_rcu(MemoryRegion *mr, 2525 hwaddr addr, uint64_t size) 2526{ 2527 MemoryRegionSection ret = { .mr = NULL }; 2528 MemoryRegion *root; 2529 AddressSpace *as; 2530 AddrRange range; 2531 FlatView *view; 2532 FlatRange *fr; 2533 2534 addr += mr->addr; 2535 for (root = mr; root->container; ) { 2536 root = root->container; 2537 addr += root->addr; 2538 } 2539 2540 as = memory_region_to_address_space(root); 2541 if (!as) { 2542 return ret; 2543 } 2544 range = addrrange_make(int128_make64(addr), int128_make64(size)); 2545 2546 view = address_space_to_flatview(as); 2547 fr = flatview_lookup(view, range); 2548 if (!fr) { 2549 return ret; 2550 } 2551 2552 while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) { 2553 --fr; 2554 } 2555 2556 ret.mr = fr->mr; 2557 ret.fv = view; 2558 range = addrrange_intersection(range, fr->addr); 2559 ret.offset_within_region = fr->offset_in_region; 2560 ret.offset_within_region += int128_get64(int128_sub(range.start, 2561 fr->addr.start)); 2562 ret.size = range.size; 2563 ret.offset_within_address_space = int128_get64(range.start); 2564 ret.readonly = fr->readonly; 2565 ret.nonvolatile = fr->nonvolatile; 2566 return ret; 2567} 2568 2569MemoryRegionSection memory_region_find(MemoryRegion *mr, 2570 hwaddr addr, uint64_t size) 2571{ 2572 MemoryRegionSection ret; 2573 RCU_READ_LOCK_GUARD(); 2574 ret = memory_region_find_rcu(mr, addr, size); 2575 if (ret.mr) { 2576 memory_region_ref(ret.mr); 2577 } 2578 return ret; 2579} 2580 2581bool memory_region_present(MemoryRegion *container, hwaddr addr) 2582{ 2583 MemoryRegion *mr; 2584 2585 RCU_READ_LOCK_GUARD(); 2586 mr = memory_region_find_rcu(container, addr, 1).mr; 2587 return mr && mr != container; 2588} 2589 2590void memory_global_dirty_log_sync(void) 2591{ 2592 memory_region_sync_dirty_bitmap(NULL); 2593} 2594 2595void memory_global_after_dirty_log_sync(void) 2596{ 2597 MEMORY_LISTENER_CALL_GLOBAL(log_global_after_sync, Forward); 2598} 2599 2600static VMChangeStateEntry *vmstate_change; 2601 2602void memory_global_dirty_log_start(void) 2603{ 2604 if (vmstate_change) { 2605 qemu_del_vm_change_state_handler(vmstate_change); 2606 vmstate_change = NULL; 2607 } 2608 2609 global_dirty_log = true; 2610 2611 MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); 2612 2613 /* Refresh DIRTY_MEMORY_MIGRATION bit. */ 2614 memory_region_transaction_begin(); 2615 memory_region_update_pending = true; 2616 memory_region_transaction_commit(); 2617} 2618 2619static void memory_global_dirty_log_do_stop(void) 2620{ 2621 global_dirty_log = false; 2622 2623 /* Refresh DIRTY_MEMORY_MIGRATION bit. */ 2624 memory_region_transaction_begin(); 2625 memory_region_update_pending = true; 2626 memory_region_transaction_commit(); 2627 2628 MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse); 2629} 2630 2631static void memory_vm_change_state_handler(void *opaque, int running, 2632 RunState state) 2633{ 2634 if (running) { 2635 memory_global_dirty_log_do_stop(); 2636 2637 if (vmstate_change) { 2638 qemu_del_vm_change_state_handler(vmstate_change); 2639 vmstate_change = NULL; 2640 } 2641 } 2642} 2643 2644void memory_global_dirty_log_stop(void) 2645{ 2646 if (!runstate_is_running()) { 2647 if (vmstate_change) { 2648 return; 2649 } 2650 vmstate_change = qemu_add_vm_change_state_handler( 2651 memory_vm_change_state_handler, NULL); 2652 return; 2653 } 2654 2655 memory_global_dirty_log_do_stop(); 2656} 2657 2658static void listener_add_address_space(MemoryListener *listener, 2659 AddressSpace *as) 2660{ 2661 FlatView *view; 2662 FlatRange *fr; 2663 2664 if (listener->begin) { 2665 listener->begin(listener); 2666 } 2667 if (global_dirty_log) { 2668 if (listener->log_global_start) { 2669 listener->log_global_start(listener); 2670 } 2671 } 2672 2673 view = address_space_get_flatview(as); 2674 FOR_EACH_FLAT_RANGE(fr, view) { 2675 MemoryRegionSection section = section_from_flat_range(fr, view); 2676 2677 if (listener->region_add) { 2678 listener->region_add(listener, &section); 2679 } 2680 if (fr->dirty_log_mask && listener->log_start) { 2681 listener->log_start(listener, &section, 0, fr->dirty_log_mask); 2682 } 2683 } 2684 if (listener->commit) { 2685 listener->commit(listener); 2686 } 2687 flatview_unref(view); 2688} 2689 2690static void listener_del_address_space(MemoryListener *listener, 2691 AddressSpace *as) 2692{ 2693 FlatView *view; 2694 FlatRange *fr; 2695 2696 if (listener->begin) { 2697 listener->begin(listener); 2698 } 2699 view = address_space_get_flatview(as); 2700 FOR_EACH_FLAT_RANGE(fr, view) { 2701 MemoryRegionSection section = section_from_flat_range(fr, view); 2702 2703 if (fr->dirty_log_mask && listener->log_stop) { 2704 listener->log_stop(listener, &section, fr->dirty_log_mask, 0); 2705 } 2706 if (listener->region_del) { 2707 listener->region_del(listener, &section); 2708 } 2709 } 2710 if (listener->commit) { 2711 listener->commit(listener); 2712 } 2713 flatview_unref(view); 2714} 2715 2716void memory_listener_register(MemoryListener *listener, AddressSpace *as) 2717{ 2718 MemoryListener *other = NULL; 2719 2720 listener->address_space = as; 2721 if (QTAILQ_EMPTY(&memory_listeners) 2722 || listener->priority >= QTAILQ_LAST(&memory_listeners)->priority) { 2723 QTAILQ_INSERT_TAIL(&memory_listeners, listener, link); 2724 } else { 2725 QTAILQ_FOREACH(other, &memory_listeners, link) { 2726 if (listener->priority < other->priority) { 2727 break; 2728 } 2729 } 2730 QTAILQ_INSERT_BEFORE(other, listener, link); 2731 } 2732 2733 if (QTAILQ_EMPTY(&as->listeners) 2734 || listener->priority >= QTAILQ_LAST(&as->listeners)->priority) { 2735 QTAILQ_INSERT_TAIL(&as->listeners, listener, link_as); 2736 } else { 2737 QTAILQ_FOREACH(other, &as->listeners, link_as) { 2738 if (listener->priority < other->priority) { 2739 break; 2740 } 2741 } 2742 QTAILQ_INSERT_BEFORE(other, listener, link_as); 2743 } 2744 2745 listener_add_address_space(listener, as); 2746} 2747 2748void memory_listener_unregister(MemoryListener *listener) 2749{ 2750 if (!listener->address_space) { 2751 return; 2752 } 2753 2754 listener_del_address_space(listener, listener->address_space); 2755 QTAILQ_REMOVE(&memory_listeners, listener, link); 2756 QTAILQ_REMOVE(&listener->address_space->listeners, listener, link_as); 2757 listener->address_space = NULL; 2758} 2759 2760void address_space_remove_listeners(AddressSpace *as) 2761{ 2762 while (!QTAILQ_EMPTY(&as->listeners)) { 2763 memory_listener_unregister(QTAILQ_FIRST(&as->listeners)); 2764 } 2765} 2766 2767void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) 2768{ 2769 memory_region_ref(root); 2770 as->root = root; 2771 as->current_map = NULL; 2772 as->ioeventfd_nb = 0; 2773 as->ioeventfds = NULL; 2774 QTAILQ_INIT(&as->listeners); 2775 QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); 2776 as->name = g_strdup(name ? name : "anonymous"); 2777 address_space_update_topology(as); 2778 address_space_update_ioeventfds(as); 2779} 2780 2781static void do_address_space_destroy(AddressSpace *as) 2782{ 2783 assert(QTAILQ_EMPTY(&as->listeners)); 2784 2785 flatview_unref(as->current_map); 2786 g_free(as->name); 2787 g_free(as->ioeventfds); 2788 memory_region_unref(as->root); 2789} 2790 2791void address_space_destroy(AddressSpace *as) 2792{ 2793 MemoryRegion *root = as->root; 2794 2795 /* Flush out anything from MemoryListeners listening in on this */ 2796 memory_region_transaction_begin(); 2797 as->root = NULL; 2798 memory_region_transaction_commit(); 2799 QTAILQ_REMOVE(&address_spaces, as, address_spaces_link); 2800 2801 /* At this point, as->dispatch and as->current_map are dummy 2802 * entries that the guest should never use. Wait for the old 2803 * values to expire before freeing the data. 2804 */ 2805 as->root = root; 2806 call_rcu(as, do_address_space_destroy, rcu); 2807} 2808 2809static const char *memory_region_type(MemoryRegion *mr) 2810{ 2811 if (mr->alias) { 2812 return memory_region_type(mr->alias); 2813 } 2814 if (memory_region_is_ram_device(mr)) { 2815 return "ramd"; 2816 } else if (memory_region_is_romd(mr)) { 2817 return "romd"; 2818 } else if (memory_region_is_rom(mr)) { 2819 return "rom"; 2820 } else if (memory_region_is_ram(mr)) { 2821 return "ram"; 2822 } else { 2823 return "i/o"; 2824 } 2825} 2826 2827typedef struct MemoryRegionList MemoryRegionList; 2828 2829struct MemoryRegionList { 2830 const MemoryRegion *mr; 2831 QTAILQ_ENTRY(MemoryRegionList) mrqueue; 2832}; 2833 2834typedef QTAILQ_HEAD(, MemoryRegionList) MemoryRegionListHead; 2835 2836#define MR_SIZE(size) (int128_nz(size) ? (hwaddr)int128_get64( \ 2837 int128_sub((size), int128_one())) : 0) 2838#define MTREE_INDENT " " 2839 2840static void mtree_expand_owner(const char *label, Object *obj) 2841{ 2842 DeviceState *dev = (DeviceState *) object_dynamic_cast(obj, TYPE_DEVICE); 2843 2844 qemu_printf(" %s:{%s", label, dev ? "dev" : "obj"); 2845 if (dev && dev->id) { 2846 qemu_printf(" id=%s", dev->id); 2847 } else { 2848 gchar *canonical_path = object_get_canonical_path(obj); 2849 if (canonical_path) { 2850 qemu_printf(" path=%s", canonical_path); 2851 g_free(canonical_path); 2852 } else { 2853 qemu_printf(" type=%s", object_get_typename(obj)); 2854 } 2855 } 2856 qemu_printf("}"); 2857} 2858 2859static void mtree_print_mr_owner(const MemoryRegion *mr) 2860{ 2861 Object *owner = mr->owner; 2862 Object *parent = memory_region_owner((MemoryRegion *)mr); 2863 2864 if (!owner && !parent) { 2865 qemu_printf(" orphan"); 2866 return; 2867 } 2868 if (owner) { 2869 mtree_expand_owner("owner", owner); 2870 } 2871 if (parent && parent != owner) { 2872 mtree_expand_owner("parent", parent); 2873 } 2874} 2875 2876static void mtree_print_mr(const MemoryRegion *mr, unsigned int level, 2877 hwaddr base, 2878 MemoryRegionListHead *alias_print_queue, 2879 bool owner) 2880{ 2881 MemoryRegionList *new_ml, *ml, *next_ml; 2882 MemoryRegionListHead submr_print_queue; 2883 const MemoryRegion *submr; 2884 unsigned int i; 2885 hwaddr cur_start, cur_end; 2886 2887 if (!mr) { 2888 return; 2889 } 2890 2891 for (i = 0; i < level; i++) { 2892 qemu_printf(MTREE_INDENT); 2893 } 2894 2895 cur_start = base + mr->addr; 2896 cur_end = cur_start + MR_SIZE(mr->size); 2897 2898 /* 2899 * Try to detect overflow of memory region. This should never 2900 * happen normally. When it happens, we dump something to warn the 2901 * user who is observing this. 2902 */ 2903 if (cur_start < base || cur_end < cur_start) { 2904 qemu_printf("[DETECTED OVERFLOW!] "); 2905 } 2906 2907 if (mr->alias) { 2908 MemoryRegionList *ml; 2909 bool found = false; 2910 2911 /* check if the alias is already in the queue */ 2912 QTAILQ_FOREACH(ml, alias_print_queue, mrqueue) { 2913 if (ml->mr == mr->alias) { 2914 found = true; 2915 } 2916 } 2917 2918 if (!found) { 2919 ml = g_new(MemoryRegionList, 1); 2920 ml->mr = mr->alias; 2921 QTAILQ_INSERT_TAIL(alias_print_queue, ml, mrqueue); 2922 } 2923 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx 2924 " (prio %d, %s%s): alias %s @%s " TARGET_FMT_plx 2925 "-" TARGET_FMT_plx "%s", 2926 cur_start, cur_end, 2927 mr->priority, 2928 mr->nonvolatile ? "nv-" : "", 2929 memory_region_type((MemoryRegion *)mr), 2930 memory_region_name(mr), 2931 memory_region_name(mr->alias), 2932 mr->alias_offset, 2933 mr->alias_offset + MR_SIZE(mr->size), 2934 mr->enabled ? "" : " [disabled]"); 2935 if (owner) { 2936 mtree_print_mr_owner(mr); 2937 } 2938 } else { 2939 qemu_printf(TARGET_FMT_plx "-" TARGET_FMT_plx 2940 " (prio %d, %s%s): %s%s", 2941 cur_start, cur_end, 2942 mr->priority, 2943 mr->nonvolatile ? "nv-" : "", 2944 memory_region_type((MemoryRegion *)mr), 2945 memory_region_name(mr), 2946 mr->enabled ? "" : " [disabled]"); 2947 if (owner) { 2948 mtree_print_mr_owner(mr); 2949 } 2950 } 2951 qemu_printf("\n"); 2952 2953 QTAILQ_INIT(&submr_print_queue); 2954 2955 QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) { 2956 new_ml = g_new(MemoryRegionList, 1); 2957 new_ml->mr = submr; 2958 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { 2959 if (new_ml->mr->addr < ml->mr->addr || 2960 (new_ml->mr->addr == ml->mr->addr && 2961 new_ml->mr->priority > ml->mr->priority)) { 2962 QTAILQ_INSERT_BEFORE(ml, new_ml, mrqueue); 2963 new_ml = NULL; 2964 break; 2965 } 2966 } 2967 if (new_ml) { 2968 QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, mrqueue); 2969 } 2970 } 2971 2972 QTAILQ_FOREACH(ml, &submr_print_queue, mrqueue) { 2973 mtree_print_mr(ml->mr, level + 1, cur_start, 2974 alias_print_queue, owner); 2975 } 2976 2977 QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, mrqueue, next_ml) { 2978 g_free(ml); 2979 } 2980} 2981 2982struct FlatViewInfo { 2983 int counter; 2984 bool dispatch_tree; 2985 bool owner; 2986 AccelClass *ac; 2987}; 2988 2989static void mtree_print_flatview(gpointer key, gpointer value, 2990 gpointer user_data) 2991{ 2992 FlatView *view = key; 2993 GArray *fv_address_spaces = value; 2994 struct FlatViewInfo *fvi = user_data; 2995 FlatRange *range = &view->ranges[0]; 2996 MemoryRegion *mr; 2997 int n = view->nr; 2998 int i; 2999 AddressSpace *as; 3000 3001 qemu_printf("FlatView #%d\n", fvi->counter); 3002 ++fvi->counter; 3003 3004 for (i = 0; i < fv_address_spaces->len; ++i) { 3005 as = g_array_index(fv_address_spaces, AddressSpace*, i); 3006 qemu_printf(" AS \"%s\", root: %s", 3007 as->name, memory_region_name(as->root)); 3008 if (as->root->alias) { 3009 qemu_printf(", alias %s", memory_region_name(as->root->alias)); 3010 } 3011 qemu_printf("\n"); 3012 } 3013 3014 qemu_printf(" Root memory region: %s\n", 3015 view->root ? memory_region_name(view->root) : "(none)"); 3016 3017 if (n <= 0) { 3018 qemu_printf(MTREE_INDENT "No rendered FlatView\n\n"); 3019 return; 3020 } 3021 3022 while (n--) { 3023 mr = range->mr; 3024 if (range->offset_in_region) { 3025 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx 3026 " (prio %d, %s%s): %s @" TARGET_FMT_plx, 3027 int128_get64(range->addr.start), 3028 int128_get64(range->addr.start) 3029 + MR_SIZE(range->addr.size), 3030 mr->priority, 3031 range->nonvolatile ? "nv-" : "", 3032 range->readonly ? "rom" : memory_region_type(mr), 3033 memory_region_name(mr), 3034 range->offset_in_region); 3035 } else { 3036 qemu_printf(MTREE_INDENT TARGET_FMT_plx "-" TARGET_FMT_plx 3037 " (prio %d, %s%s): %s", 3038 int128_get64(range->addr.start), 3039 int128_get64(range->addr.start) 3040 + MR_SIZE(range->addr.size), 3041 mr->priority, 3042 range->nonvolatile ? "nv-" : "", 3043 range->readonly ? "rom" : memory_region_type(mr), 3044 memory_region_name(mr)); 3045 } 3046 if (fvi->owner) { 3047 mtree_print_mr_owner(mr); 3048 } 3049 3050 if (fvi->ac) { 3051 for (i = 0; i < fv_address_spaces->len; ++i) { 3052 as = g_array_index(fv_address_spaces, AddressSpace*, i); 3053 if (fvi->ac->has_memory(current_machine, as, 3054 int128_get64(range->addr.start), 3055 MR_SIZE(range->addr.size) + 1)) { 3056 qemu_printf(" %s", fvi->ac->name); 3057 } 3058 } 3059 } 3060 qemu_printf("\n"); 3061 range++; 3062 } 3063 3064#if !defined(CONFIG_USER_ONLY) 3065 if (fvi->dispatch_tree && view->root) { 3066 mtree_print_dispatch(view->dispatch, view->root); 3067 } 3068#endif 3069 3070 qemu_printf("\n"); 3071} 3072 3073static gboolean mtree_info_flatview_free(gpointer key, gpointer value, 3074 gpointer user_data) 3075{ 3076 FlatView *view = key; 3077 GArray *fv_address_spaces = value; 3078 3079 g_array_unref(fv_address_spaces); 3080 flatview_unref(view); 3081 3082 return true; 3083} 3084 3085void mtree_info(bool flatview, bool dispatch_tree, bool owner) 3086{ 3087 MemoryRegionListHead ml_head; 3088 MemoryRegionList *ml, *ml2; 3089 AddressSpace *as; 3090 3091 if (flatview) { 3092 FlatView *view; 3093 struct FlatViewInfo fvi = { 3094 .counter = 0, 3095 .dispatch_tree = dispatch_tree, 3096 .owner = owner, 3097 }; 3098 GArray *fv_address_spaces; 3099 GHashTable *views = g_hash_table_new(g_direct_hash, g_direct_equal); 3100 AccelClass *ac = ACCEL_GET_CLASS(current_accel()); 3101 3102 if (ac->has_memory) { 3103 fvi.ac = ac; 3104 } 3105 3106 /* Gather all FVs in one table */ 3107 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 3108 view = address_space_get_flatview(as); 3109 3110 fv_address_spaces = g_hash_table_lookup(views, view); 3111 if (!fv_address_spaces) { 3112 fv_address_spaces = g_array_new(false, false, sizeof(as)); 3113 g_hash_table_insert(views, view, fv_address_spaces); 3114 } 3115 3116 g_array_append_val(fv_address_spaces, as); 3117 } 3118 3119 /* Print */ 3120 g_hash_table_foreach(views, mtree_print_flatview, &fvi); 3121 3122 /* Free */ 3123 g_hash_table_foreach_remove(views, mtree_info_flatview_free, 0); 3124 g_hash_table_unref(views); 3125 3126 return; 3127 } 3128 3129 QTAILQ_INIT(&ml_head); 3130 3131 QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) { 3132 qemu_printf("address-space: %s\n", as->name); 3133 mtree_print_mr(as->root, 1, 0, &ml_head, owner); 3134 qemu_printf("\n"); 3135 } 3136 3137 /* print aliased regions */ 3138 QTAILQ_FOREACH(ml, &ml_head, mrqueue) { 3139 qemu_printf("memory-region: %s\n", memory_region_name(ml->mr)); 3140 mtree_print_mr(ml->mr, 1, 0, &ml_head, owner); 3141 qemu_printf("\n"); 3142 } 3143 3144 QTAILQ_FOREACH_SAFE(ml, &ml_head, mrqueue, ml2) { 3145 g_free(ml); 3146 } 3147} 3148 3149void memory_region_init_ram(MemoryRegion *mr, 3150 struct Object *owner, 3151 const char *name, 3152 uint64_t size, 3153 Error **errp) 3154{ 3155 DeviceState *owner_dev; 3156 Error *err = NULL; 3157 3158 memory_region_init_ram_nomigrate(mr, owner, name, size, &err); 3159 if (err) { 3160 error_propagate(errp, err); 3161 return; 3162 } 3163 /* This will assert if owner is neither NULL nor a DeviceState. 3164 * We only want the owner here for the purposes of defining a 3165 * unique name for migration. TODO: Ideally we should implement 3166 * a naming scheme for Objects which are not DeviceStates, in 3167 * which case we can relax this restriction. 3168 */ 3169 owner_dev = DEVICE(owner); 3170 vmstate_register_ram(mr, owner_dev); 3171} 3172 3173void memory_region_init_rom(MemoryRegion *mr, 3174 struct Object *owner, 3175 const char *name, 3176 uint64_t size, 3177 Error **errp) 3178{ 3179 DeviceState *owner_dev; 3180 Error *err = NULL; 3181 3182 memory_region_init_rom_nomigrate(mr, owner, name, size, &err); 3183 if (err) { 3184 error_propagate(errp, err); 3185 return; 3186 } 3187 /* This will assert if owner is neither NULL nor a DeviceState. 3188 * We only want the owner here for the purposes of defining a 3189 * unique name for migration. TODO: Ideally we should implement 3190 * a naming scheme for Objects which are not DeviceStates, in 3191 * which case we can relax this restriction. 3192 */ 3193 owner_dev = DEVICE(owner); 3194 vmstate_register_ram(mr, owner_dev); 3195} 3196 3197void memory_region_init_rom_device(MemoryRegion *mr, 3198 struct Object *owner, 3199 const MemoryRegionOps *ops, 3200 void *opaque, 3201 const char *name, 3202 uint64_t size, 3203 Error **errp) 3204{ 3205 DeviceState *owner_dev; 3206 Error *err = NULL; 3207 3208 memory_region_init_rom_device_nomigrate(mr, owner, ops, opaque, 3209 name, size, &err); 3210 if (err) { 3211 error_propagate(errp, err); 3212 return; 3213 } 3214 /* This will assert if owner is neither NULL nor a DeviceState. 3215 * We only want the owner here for the purposes of defining a 3216 * unique name for migration. TODO: Ideally we should implement 3217 * a naming scheme for Objects which are not DeviceStates, in 3218 * which case we can relax this restriction. 3219 */ 3220 owner_dev = DEVICE(owner); 3221 vmstate_register_ram(mr, owner_dev); 3222} 3223 3224static const TypeInfo memory_region_info = { 3225 .parent = TYPE_OBJECT, 3226 .name = TYPE_MEMORY_REGION, 3227 .class_size = sizeof(MemoryRegionClass), 3228 .instance_size = sizeof(MemoryRegion), 3229 .instance_init = memory_region_initfn, 3230 .instance_finalize = memory_region_finalize, 3231}; 3232 3233static const TypeInfo iommu_memory_region_info = { 3234 .parent = TYPE_MEMORY_REGION, 3235 .name = TYPE_IOMMU_MEMORY_REGION, 3236 .class_size = sizeof(IOMMUMemoryRegionClass), 3237 .instance_size = sizeof(IOMMUMemoryRegion), 3238 .instance_init = iommu_memory_region_initfn, 3239 .abstract = true, 3240}; 3241 3242static void memory_register_types(void) 3243{ 3244 type_register_static(&memory_region_info); 3245 type_register_static(&iommu_memory_region_info); 3246} 3247 3248type_init(memory_register_types)