qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

migration: consolidate VMStateField.start

The member VMStateField.start is used for two things, partial data
migration for VBUFFER data (basically provide migration for a
sub-buffer) and for locating next in QTAILQ.

The implementation of the VBUFFER feature is broken when VMSTATE_ALLOC
is used. This however goes unnoticed because actually partial migration
for VBUFFER is not used at all.

Let's consolidate the usage of VMStateField.start by removing support
for partial migration for VBUFFER.

Signed-off-by: Halil Pasic <pasic@linux.vnet.ibm.com>

Message-Id: <20170203175217.45562-1-pasic@linux.vnet.ibm.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

authored by

Halil Pasic and committed by
Dr. David Alan Gilbert
59046ec2 0827b9e9

+27 -34
+1 -1
hw/char/exynos4210_uart.c
··· 561 561 .fields = (VMStateField[]) { 562 562 VMSTATE_UINT32(sp, Exynos4210UartFIFO), 563 563 VMSTATE_UINT32(rp, Exynos4210UartFIFO), 564 - VMSTATE_VBUFFER_UINT32(data, Exynos4210UartFIFO, 1, NULL, 0, size), 564 + VMSTATE_VBUFFER_UINT32(data, Exynos4210UartFIFO, 1, NULL, size), 565 565 VMSTATE_END_OF_LIST() 566 566 } 567 567 };
+1 -1
hw/display/g364fb.c
··· 464 464 .minimum_version_id = 1, 465 465 .post_load = g364fb_post_load, 466 466 .fields = (VMStateField[]) { 467 - VMSTATE_VBUFFER_UINT32(vram, G364State, 1, NULL, 0, vram_size), 467 + VMSTATE_VBUFFER_UINT32(vram, G364State, 1, NULL, vram_size), 468 468 VMSTATE_BUFFER_UNSAFE(color_palette, G364State, 0, 256 * 3), 469 469 VMSTATE_BUFFER_UNSAFE(cursor_palette, G364State, 0, 9), 470 470 VMSTATE_UINT16_ARRAY(cursor, G364State, 512),
+4 -4
hw/dma/pl330.c
··· 173 173 .version_id = 1, 174 174 .minimum_version_id = 1, 175 175 .fields = (VMStateField[]) { 176 - VMSTATE_VBUFFER_UINT32(buf, PL330Fifo, 1, NULL, 0, buf_size), 177 - VMSTATE_VBUFFER_UINT32(tag, PL330Fifo, 1, NULL, 0, buf_size), 176 + VMSTATE_VBUFFER_UINT32(buf, PL330Fifo, 1, NULL, buf_size), 177 + VMSTATE_VBUFFER_UINT32(tag, PL330Fifo, 1, NULL, buf_size), 178 178 VMSTATE_UINT32(head, PL330Fifo), 179 179 VMSTATE_UINT32(num, PL330Fifo), 180 180 VMSTATE_UINT32(buf_size, PL330Fifo), ··· 282 282 VMSTATE_STRUCT(manager, PL330State, 0, vmstate_pl330_chan, PL330Chan), 283 283 VMSTATE_STRUCT_VARRAY_UINT32(chan, PL330State, num_chnls, 0, 284 284 vmstate_pl330_chan, PL330Chan), 285 - VMSTATE_VBUFFER_UINT32(lo_seqn, PL330State, 1, NULL, 0, num_chnls), 286 - VMSTATE_VBUFFER_UINT32(hi_seqn, PL330State, 1, NULL, 0, num_chnls), 285 + VMSTATE_VBUFFER_UINT32(lo_seqn, PL330State, 1, NULL, num_chnls), 286 + VMSTATE_VBUFFER_UINT32(hi_seqn, PL330State, 1, NULL, num_chnls), 287 287 VMSTATE_STRUCT(fifo, PL330State, 0, vmstate_pl330_fifo, PL330Fifo), 288 288 VMSTATE_STRUCT(read_queue, PL330State, 0, vmstate_pl330_queue, 289 289 PL330Queue),
+1 -1
hw/intc/exynos4210_gic.c
··· 393 393 .version_id = 2, 394 394 .minimum_version_id = 2, 395 395 .fields = (VMStateField[]) { 396 - VMSTATE_VBUFFER_UINT32(level, Exynos4210IRQGateState, 1, NULL, 0, n_in), 396 + VMSTATE_VBUFFER_UINT32(level, Exynos4210IRQGateState, 1, NULL, n_in), 397 397 VMSTATE_END_OF_LIST() 398 398 } 399 399 };
+2 -4
hw/ipmi/isa_ipmi_bt.c
··· 471 471 VMSTATE_BOOL(bt.use_irq, ISAIPMIBTDevice), 472 472 VMSTATE_BOOL(bt.irqs_enabled, ISAIPMIBTDevice), 473 473 VMSTATE_UINT32(bt.outpos, ISAIPMIBTDevice), 474 - VMSTATE_VBUFFER_UINT32(bt.outmsg, ISAIPMIBTDevice, 1, NULL, 0, 475 - bt.outlen), 476 - VMSTATE_VBUFFER_UINT32(bt.inmsg, ISAIPMIBTDevice, 1, NULL, 0, 477 - bt.inlen), 474 + VMSTATE_VBUFFER_UINT32(bt.outmsg, ISAIPMIBTDevice, 1, NULL, bt.outlen), 475 + VMSTATE_VBUFFER_UINT32(bt.inmsg, ISAIPMIBTDevice, 1, NULL, bt.inlen), 478 476 VMSTATE_UINT8(bt.control_reg, ISAIPMIBTDevice), 479 477 VMSTATE_UINT8(bt.mask_reg, ISAIPMIBTDevice), 480 478 VMSTATE_UINT8(bt.waiting_rsp, ISAIPMIBTDevice),
+1 -1
hw/net/vmxnet3.c
··· 2397 2397 .pre_load = vmxnet3_mcast_list_pre_load, 2398 2398 .needed = vmxnet3_mc_list_needed, 2399 2399 .fields = (VMStateField[]) { 2400 - VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, 0, 2400 + VMSTATE_VBUFFER_UINT32(mcast_list, VMXNET3State, 0, NULL, 2401 2401 mcast_list_buff_size), 2402 2402 VMSTATE_END_OF_LIST() 2403 2403 }
+1 -1
hw/nvram/mac_nvram.c
··· 82 82 .version_id = 1, 83 83 .minimum_version_id = 1, 84 84 .fields = (VMStateField[]) { 85 - VMSTATE_VBUFFER_UINT32(data, MacIONVRAMState, 0, NULL, 0, size), 85 + VMSTATE_VBUFFER_UINT32(data, MacIONVRAMState, 0, NULL, size), 86 86 VMSTATE_END_OF_LIST() 87 87 } 88 88 };
+1 -1
hw/nvram/spapr_nvram.c
··· 224 224 .post_load = spapr_nvram_post_load, 225 225 .fields = (VMStateField[]) { 226 226 VMSTATE_UINT32(size, sPAPRNVRAM), 227 - VMSTATE_VBUFFER_ALLOC_UINT32(buf, sPAPRNVRAM, 1, NULL, 0, size), 227 + VMSTATE_VBUFFER_ALLOC_UINT32(buf, sPAPRNVRAM, 1, NULL, size), 228 228 VMSTATE_END_OF_LIST() 229 229 }, 230 230 };
+1 -1
hw/sd/sdhci.c
··· 1253 1253 VMSTATE_UINT16(data_count, SDHCIState), 1254 1254 VMSTATE_UINT64(admasysaddr, SDHCIState), 1255 1255 VMSTATE_UINT8(stopped_state, SDHCIState), 1256 - VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, 0, buf_maxsz), 1256 + VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, buf_maxsz), 1257 1257 VMSTATE_TIMER_PTR(insert_timer, SDHCIState), 1258 1258 VMSTATE_TIMER_PTR(transfer_timer, SDHCIState), 1259 1259 VMSTATE_END_OF_LIST()
+1 -1
hw/timer/m48t59.c
··· 563 563 .fields = (VMStateField[]) { 564 564 VMSTATE_UINT8(lock, M48t59State), 565 565 VMSTATE_UINT16(addr, M48t59State), 566 - VMSTATE_VBUFFER_UINT32(buffer, M48t59State, 0, NULL, 0, size), 566 + VMSTATE_VBUFFER_UINT32(buffer, M48t59State, 0, NULL, size), 567 567 VMSTATE_END_OF_LIST() 568 568 } 569 569 };
+8 -13
include/migration/vmstate.h
··· 587 587 .offset = vmstate_offset_buffer(_state, _field) + _start, \ 588 588 } 589 589 590 - #define VMSTATE_VBUFFER_MULTIPLY(_field, _state, _version, _test, _start, _field_size, _multiply) { \ 590 + #define VMSTATE_VBUFFER_MULTIPLY(_field, _state, _version, _test, \ 591 + _field_size, _multiply) { \ 591 592 .name = (stringify(_field)), \ 592 593 .version_id = (_version), \ 593 594 .field_exists = (_test), \ ··· 596 597 .info = &vmstate_info_buffer, \ 597 598 .flags = VMS_VBUFFER|VMS_POINTER|VMS_MULTIPLY, \ 598 599 .offset = offsetof(_state, _field), \ 599 - .start = (_start), \ 600 600 } 601 601 602 - #define VMSTATE_VBUFFER(_field, _state, _version, _test, _start, _field_size) { \ 602 + #define VMSTATE_VBUFFER(_field, _state, _version, _test, _field_size) { \ 603 603 .name = (stringify(_field)), \ 604 604 .version_id = (_version), \ 605 605 .field_exists = (_test), \ ··· 607 607 .info = &vmstate_info_buffer, \ 608 608 .flags = VMS_VBUFFER|VMS_POINTER, \ 609 609 .offset = offsetof(_state, _field), \ 610 - .start = (_start), \ 611 610 } 612 611 613 - #define VMSTATE_VBUFFER_UINT32(_field, _state, _version, _test, _start, _field_size) { \ 612 + #define VMSTATE_VBUFFER_UINT32(_field, _state, _version, _test, _field_size) { \ 614 613 .name = (stringify(_field)), \ 615 614 .version_id = (_version), \ 616 615 .field_exists = (_test), \ ··· 618 617 .info = &vmstate_info_buffer, \ 619 618 .flags = VMS_VBUFFER|VMS_POINTER, \ 620 619 .offset = offsetof(_state, _field), \ 621 - .start = (_start), \ 622 620 } 623 621 624 - #define VMSTATE_VBUFFER_ALLOC_UINT32(_field, _state, _version, _test, _start, _field_size) { \ 622 + #define VMSTATE_VBUFFER_ALLOC_UINT32(_field, _state, _version, \ 623 + _test, _field_size) { \ 625 624 .name = (stringify(_field)), \ 626 625 .version_id = (_version), \ 627 626 .field_exists = (_test), \ ··· 629 628 .info = &vmstate_info_buffer, \ 630 629 .flags = VMS_VBUFFER|VMS_POINTER|VMS_ALLOC, \ 631 630 .offset = offsetof(_state, _field), \ 632 - .start = (_start), \ 633 631 } 634 632 635 633 #define VMSTATE_BUFFER_UNSAFE_INFO_TEST(_field, _state, _test, _version, _info, _size) { \ ··· 948 946 VMSTATE_BUFFER_START_MIDDLE_V(_f, _s, _start, 0) 949 947 950 948 #define VMSTATE_PARTIAL_VBUFFER(_f, _s, _size) \ 951 - VMSTATE_VBUFFER(_f, _s, 0, NULL, 0, _size) 949 + VMSTATE_VBUFFER(_f, _s, 0, NULL, _size) 952 950 953 951 #define VMSTATE_PARTIAL_VBUFFER_UINT32(_f, _s, _size) \ 954 - VMSTATE_VBUFFER_UINT32(_f, _s, 0, NULL, 0, _size) 955 - 956 - #define VMSTATE_SUB_VBUFFER(_f, _s, _start, _size) \ 957 - VMSTATE_VBUFFER(_f, _s, 0, NULL, _start, _size) 952 + VMSTATE_VBUFFER_UINT32(_f, _s, 0, NULL, _size) 958 953 959 954 #define VMSTATE_BUFFER_TEST(_f, _s, _test) \ 960 955 VMSTATE_STATIC_BUFFER(_f, _s, 0, _test, 0, sizeof(typeof_field(_s, _f)))
+1 -1
migration/savevm.c
··· 356 356 .pre_save = configuration_pre_save, 357 357 .fields = (VMStateField[]) { 358 358 VMSTATE_UINT32(len, SaveState), 359 - VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, 0, len), 359 + VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len), 360 360 VMSTATE_END_OF_LIST() 361 361 }, 362 362 .subsections = (const VMStateDescription*[]) {
+2 -2
migration/vmstate.c
··· 68 68 } 69 69 } 70 70 if (size) { 71 - *((void **)base_addr + field->start) = g_malloc(size); 71 + *(void **)base_addr = g_malloc(size); 72 72 } 73 73 } 74 - base_addr = *(void **)base_addr + field->start; 74 + base_addr = *(void **)base_addr; 75 75 } 76 76 77 77 return base_addr;
+1 -1
target/s390x/machine.c
··· 180 180 VMSTATE_UINT8(env.cpu_state, S390CPU), 181 181 VMSTATE_UINT8(env.sigp_order, S390CPU), 182 182 VMSTATE_UINT32_V(irqstate_saved_size, S390CPU, 4), 183 - VMSTATE_VBUFFER_UINT32(irqstate, S390CPU, 4, NULL, 0, 183 + VMSTATE_VBUFFER_UINT32(irqstate, S390CPU, 4, NULL, 184 184 irqstate_saved_size), 185 185 VMSTATE_END_OF_LIST() 186 186 },
+1 -1
util/fifo8.c
··· 118 118 .version_id = 1, 119 119 .minimum_version_id = 1, 120 120 .fields = (VMStateField[]) { 121 - VMSTATE_VBUFFER_UINT32(data, Fifo8, 1, NULL, 0, capacity), 121 + VMSTATE_VBUFFER_UINT32(data, Fifo8, 1, NULL, capacity), 122 122 VMSTATE_UINT32(head, Fifo8), 123 123 VMSTATE_UINT32(num, Fifo8), 124 124 VMSTATE_END_OF_LIST()