qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

migration/block: rename BLOCK_SIZE macro

Both <linux/fs.h> and <sys/mount.h> define BLOCK_SIZE macros. Avoiding
using that name in block/migration.c.

I noticed this when including <liburing.h> (Linux io_uring) from
"block/aio.h" and compilation failed. Although patches adding that
include haven't been sent yet, it makes sense to rename the macro now in
case someone else stumbles on it in the meantime.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>

authored by

Stefan Hajnoczi and committed by
Juan Quintela
a152bd00 26daeba4

+20 -19
+20 -19
migration/block.c
··· 27 27 #include "migration/vmstate.h" 28 28 #include "sysemu/block-backend.h" 29 29 30 - #define BLOCK_SIZE (1 << 20) 31 - #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS) 30 + #define BLK_MIG_BLOCK_SIZE (1 << 20) 31 + #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS) 32 32 33 33 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01 34 34 #define BLK_MIG_FLAG_EOS 0x02 ··· 133 133 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK; 134 134 135 135 if (block_mig_state.zero_blocks && 136 - buffer_is_zero(blk->buf, BLOCK_SIZE)) { 136 + buffer_is_zero(blk->buf, BLK_MIG_BLOCK_SIZE)) { 137 137 flags |= BLK_MIG_FLAG_ZERO_BLOCK; 138 138 } 139 139 ··· 154 154 return; 155 155 } 156 156 157 - qemu_put_buffer(f, blk->buf, BLOCK_SIZE); 157 + qemu_put_buffer(f, blk->buf, BLK_MIG_BLOCK_SIZE); 158 158 } 159 159 160 160 int blk_mig_active(void) ··· 309 309 } 310 310 311 311 blk = g_new(BlkMigBlock, 1); 312 - blk->buf = g_malloc(BLOCK_SIZE); 312 + blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE); 313 313 blk->bmds = bmds; 314 314 blk->sector = cur_sector; 315 315 blk->nr_sectors = nr_sectors; ··· 350 350 351 351 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { 352 352 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk), 353 - BLOCK_SIZE, NULL, NULL); 353 + BLK_MIG_BLOCK_SIZE, 354 + NULL, NULL); 354 355 if (!bmds->dirty_bitmap) { 355 356 ret = -errno; 356 357 goto fail; ··· 548 549 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap); 549 550 550 551 blk = g_new(BlkMigBlock, 1); 551 - blk->buf = g_malloc(BLOCK_SIZE); 552 + blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE); 552 553 blk->bmds = bmds; 553 554 blk->sector = sector; 554 555 blk->nr_sectors = nr_sectors; ··· 770 771 771 772 /* control the rate of transfer */ 772 773 blk_mig_lock(); 773 - while (block_mig_state.read_done * BLOCK_SIZE < 774 + while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE < 774 775 qemu_file_get_rate_limit(f) && 775 776 block_mig_state.submitted < MAX_PARALLEL_IO && 776 777 (block_mig_state.submitted + block_mig_state.read_done) < ··· 874 875 qemu_mutex_unlock_iothread(); 875 876 876 877 blk_mig_lock(); 877 - pending += block_mig_state.submitted * BLOCK_SIZE + 878 - block_mig_state.read_done * BLOCK_SIZE; 878 + pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE + 879 + block_mig_state.read_done * BLK_MIG_BLOCK_SIZE; 879 880 blk_mig_unlock(); 880 881 881 882 /* Report at least one block pending during bulk phase */ 882 883 if (pending <= max_size && !block_mig_state.bulk_completed) { 883 - pending = max_size + BLOCK_SIZE; 884 + pending = max_size + BLK_MIG_BLOCK_SIZE; 884 885 } 885 886 886 887 DPRINTF("Enter save live pending %" PRIu64 "\n", pending); ··· 901 902 int nr_sectors; 902 903 int ret; 903 904 BlockDriverInfo bdi; 904 - int cluster_size = BLOCK_SIZE; 905 + int cluster_size = BLK_MIG_BLOCK_SIZE; 905 906 906 907 do { 907 908 addr = qemu_get_be64(f); ··· 939 940 940 941 ret = bdrv_get_info(blk_bs(blk), &bdi); 941 942 if (ret == 0 && bdi.cluster_size > 0 && 942 - bdi.cluster_size <= BLOCK_SIZE && 943 - BLOCK_SIZE % bdi.cluster_size == 0) { 943 + bdi.cluster_size <= BLK_MIG_BLOCK_SIZE && 944 + BLK_MIG_BLOCK_SIZE % bdi.cluster_size == 0) { 944 945 cluster_size = bdi.cluster_size; 945 946 } else { 946 - cluster_size = BLOCK_SIZE; 947 + cluster_size = BLK_MIG_BLOCK_SIZE; 947 948 } 948 949 } 949 950 ··· 962 963 int64_t cur_addr; 963 964 uint8_t *cur_buf; 964 965 965 - buf = g_malloc(BLOCK_SIZE); 966 - qemu_get_buffer(f, buf, BLOCK_SIZE); 967 - for (i = 0; i < BLOCK_SIZE / cluster_size; i++) { 966 + buf = g_malloc(BLK_MIG_BLOCK_SIZE); 967 + qemu_get_buffer(f, buf, BLK_MIG_BLOCK_SIZE); 968 + for (i = 0; i < BLK_MIG_BLOCK_SIZE / cluster_size; i++) { 968 969 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size; 969 970 cur_buf = buf + i * cluster_size; 970 971 971 972 if ((!block_mig_state.zero_blocks || 972 - cluster_size < BLOCK_SIZE) && 973 + cluster_size < BLK_MIG_BLOCK_SIZE) && 973 974 buffer_is_zero(cur_buf, cluster_size)) { 974 975 ret = blk_pwrite_zeroes(blk, cur_addr, 975 976 cluster_size,