qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

virtio-balloon: Rip out qemu_balloon_inhibit()

The only remaining special case is postcopy. It cannot handle
concurrent discards yet, which would result in requesting already sent
pages from the source. Special-case it in virtio-balloon instead.

Introduce migration_in_incoming_postcopy(), to find out if incoming
postcopy is active.

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Juan Quintela <quintela@redhat.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20200626072248.78761-7-david@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

authored by

David Hildenbrand and committed by
Michael S. Tsirkin
06df2e69 b030958c

+17 -45
-18
balloon.c
··· 36 36 static QEMUBalloonEvent *balloon_event_fn; 37 37 static QEMUBalloonStatus *balloon_stat_fn; 38 38 static void *balloon_opaque; 39 - static int balloon_inhibit_count; 40 - 41 - bool qemu_balloon_is_inhibited(void) 42 - { 43 - return atomic_read(&balloon_inhibit_count) > 0 || 44 - ram_block_discard_is_disabled(); 45 - } 46 - 47 - void qemu_balloon_inhibit(bool state) 48 - { 49 - if (state) { 50 - atomic_inc(&balloon_inhibit_count); 51 - } else { 52 - atomic_dec(&balloon_inhibit_count); 53 - } 54 - 55 - assert(atomic_read(&balloon_inhibit_count) >= 0); 56 - } 57 39 58 40 static bool have_balloon(Error **errp) 59 41 {
+8 -2
hw/virtio/virtio-balloon.c
··· 63 63 return pbp->base_gpa == base_gpa; 64 64 } 65 65 66 + static bool virtio_balloon_inhibited(void) 67 + { 68 + /* Postcopy cannot deal with concurrent discards, so it's special. */ 69 + return ram_block_discard_is_disabled() || migration_in_incoming_postcopy(); 70 + } 71 + 66 72 static void balloon_inflate_page(VirtIOBalloon *balloon, 67 73 MemoryRegion *mr, hwaddr mr_offset, 68 74 PartiallyBalloonedPage *pbp) ··· 336 342 * accessible by another device or process, or if the guest is 337 343 * expecting it to retain a non-zero value. 338 344 */ 339 - if (qemu_balloon_is_inhibited() || dev->poison_val) { 345 + if (virtio_balloon_inhibited() || dev->poison_val) { 340 346 goto skip_element; 341 347 } 342 348 ··· 421 427 422 428 trace_virtio_balloon_handle_output(memory_region_name(section.mr), 423 429 pa); 424 - if (!qemu_balloon_is_inhibited()) { 430 + if (!virtio_balloon_inhibited()) { 425 431 if (vq == s->ivq) { 426 432 balloon_inflate_page(s, section.mr, 427 433 section.offset_within_region, &pbp);
+2
include/migration/misc.h
··· 69 69 /* ...and after the device transmission */ 70 70 bool migration_in_postcopy_after_devices(MigrationState *); 71 71 void migration_global_dump(Monitor *mon); 72 + /* True if incomming migration entered POSTCOPY_INCOMING_DISCARD */ 73 + bool migration_in_incoming_postcopy(void); 72 74 73 75 /* migration/block-dirty-bitmap.c */ 74 76 void dirty_bitmap_mig_init(void);
-2
include/sysemu/balloon.h
··· 23 23 int qemu_add_balloon_handler(QEMUBalloonEvent *event_func, 24 24 QEMUBalloonStatus *stat_func, void *opaque); 25 25 void qemu_remove_balloon_handler(void *opaque); 26 - bool qemu_balloon_is_inhibited(void); 27 - void qemu_balloon_inhibit(bool state); 28 26 29 27 #endif
+7
migration/migration.c
··· 1772 1772 return migration_in_postcopy() && s->postcopy_after_devices; 1773 1773 } 1774 1774 1775 + bool migration_in_incoming_postcopy(void) 1776 + { 1777 + PostcopyState ps = postcopy_state_get(); 1778 + 1779 + return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END; 1780 + } 1781 + 1775 1782 bool migration_is_idle(void) 1776 1783 { 1777 1784 MigrationState *s = current_migration;
-23
migration/postcopy-ram.c
··· 27 27 #include "qemu/notify.h" 28 28 #include "qemu/rcu.h" 29 29 #include "sysemu/sysemu.h" 30 - #include "sysemu/balloon.h" 31 30 #include "qemu/error-report.h" 32 31 #include "trace.h" 33 32 #include "hw/boards.h" ··· 521 520 } 522 521 523 522 /* 524 - * Manage a single vote to the QEMU balloon inhibitor for all postcopy usage, 525 - * last caller wins. 526 - */ 527 - static void postcopy_balloon_inhibit(bool state) 528 - { 529 - static bool cur_state = false; 530 - 531 - if (state != cur_state) { 532 - qemu_balloon_inhibit(state); 533 - cur_state = state; 534 - } 535 - } 536 - 537 - /* 538 523 * At the end of a migration where postcopy_ram_incoming_init was called. 539 524 */ 540 525 int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) ··· 564 549 close(mis->userfault_event_fd); 565 550 mis->have_fault_thread = false; 566 551 } 567 - 568 - postcopy_balloon_inhibit(false); 569 552 570 553 if (enable_mlock) { 571 554 if (os_mlock() < 0) { ··· 1159 1142 return -e; 1160 1143 } 1161 1144 memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); 1162 - 1163 - /* 1164 - * Ballooning can mark pages as absent while we're postcopying 1165 - * that would cause false userfaults. 1166 - */ 1167 - postcopy_balloon_inhibit(true); 1168 1145 1169 1146 trace_postcopy_ram_enable_notify(); 1170 1147