qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging

# gpg: Signature made Mon 18 Dec 2017 21:05:53 GMT
# gpg: using RSA key 0xBDBE7B27C0DE3057
# gpg: Good signature from "Jeffrey Cody <jcody@redhat.com>"
# gpg: aka "Jeffrey Cody <jeff@codyprime.org>"
# gpg: aka "Jeffrey Cody <codyprime@gmail.com>"
# Primary key fingerprint: 9957 4B4D 3474 90E7 9D98 D624 BDBE 7B27 C0DE 3057

* remotes/cody/tags/block-pull-request:
block/curl: fix minor memory leaks
block/curl: check error return of curl_global_init()
block/sheepdog: code beautification
block/sheepdog: remove spurious NULL check
blockjob: kick jobs on set-speed
backup: use copy_bitmap in incremental backup
backup: simplify non-dirty bits progress processing
backup: init copy_bitmap from sync_bitmap for incremental
backup: move from done_bitmap to copy_bitmap
hbitmap: add next_zero function

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+309 -141
+65 -51
block/backup.c
··· 40 40 BlockdevOnError on_target_error; 41 41 CoRwlock flush_rwlock; 42 42 uint64_t bytes_read; 43 - unsigned long *done_bitmap; 44 43 int64_t cluster_size; 45 44 bool compress; 46 45 NotifierWithReturn before_write; 47 46 QLIST_HEAD(, CowRequest) inflight_reqs; 47 + 48 + HBitmap *copy_bitmap; 48 49 } BackupBlockJob; 49 50 50 51 /* See if in-flight requests overlap and wait for them to complete */ ··· 109 110 cow_request_begin(&cow_request, job, start, end); 110 111 111 112 for (; start < end; start += job->cluster_size) { 112 - if (test_bit(start / job->cluster_size, job->done_bitmap)) { 113 + if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) { 113 114 trace_backup_do_cow_skip(job, start); 114 115 continue; /* already copied */ 115 116 } 117 + hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1); 116 118 117 119 trace_backup_do_cow_process(job, start); 118 120 ··· 132 134 if (error_is_read) { 133 135 *error_is_read = true; 134 136 } 137 + hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); 135 138 goto out; 136 139 } 137 140 ··· 148 151 if (error_is_read) { 149 152 *error_is_read = false; 150 153 } 154 + hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1); 151 155 goto out; 152 156 } 153 157 154 - set_bit(start / job->cluster_size, job->done_bitmap); 155 - 156 158 /* Publish progress, guest I/O counts as progress too. Note that the 157 159 * offset field is an opaque progress value, it is not a disk offset. 158 160 */ ··· 260 262 } 261 263 262 264 len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size); 263 - bitmap_zero(backup_job->done_bitmap, len); 265 + hbitmap_set(backup_job->copy_bitmap, 0, len); 264 266 } 265 267 266 268 void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset, ··· 360 362 361 363 static int coroutine_fn backup_run_incremental(BackupBlockJob *job) 362 364 { 365 + int ret; 363 366 bool error_is_read; 364 - int ret = 0; 365 - int clusters_per_iter; 366 - uint32_t granularity; 367 - int64_t offset; 368 367 int64_t cluster; 369 - int64_t end; 370 - int64_t last_cluster = -1; 368 + HBitmapIter hbi; 369 + 370 + hbitmap_iter_init(&hbi, job->copy_bitmap, 0); 371 + while ((cluster = hbitmap_iter_next(&hbi)) != -1) { 372 + do { 373 + if (yield_and_check(job)) { 374 + return 0; 375 + } 376 + ret = backup_do_cow(job, cluster * job->cluster_size, 377 + job->cluster_size, &error_is_read, false); 378 + if (ret < 0 && backup_error_action(job, error_is_read, -ret) == 379 + BLOCK_ERROR_ACTION_REPORT) 380 + { 381 + return ret; 382 + } 383 + } while (ret < 0); 384 + } 385 + 386 + return 0; 387 + } 388 + 389 + /* init copy_bitmap from sync_bitmap */ 390 + static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) 391 + { 371 392 BdrvDirtyBitmapIter *dbi; 393 + int64_t offset; 394 + int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap), 395 + job->cluster_size); 372 396 373 - granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap); 374 - clusters_per_iter = MAX((granularity / job->cluster_size), 1); 375 397 dbi = bdrv_dirty_iter_new(job->sync_bitmap); 376 - 377 - /* Find the next dirty sector(s) */ 378 - while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) { 379 - cluster = offset / job->cluster_size; 398 + while ((offset = bdrv_dirty_iter_next(dbi)) != -1) { 399 + int64_t cluster = offset / job->cluster_size; 400 + int64_t next_cluster; 380 401 381 - /* Fake progress updates for any clusters we skipped */ 382 - if (cluster != last_cluster + 1) { 383 - job->common.offset += ((cluster - last_cluster - 1) * 384 - job->cluster_size); 402 + offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap); 403 + if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) { 404 + hbitmap_set(job->copy_bitmap, cluster, end - cluster); 405 + break; 385 406 } 386 407 387 - for (end = cluster + clusters_per_iter; cluster < end; cluster++) { 388 - do { 389 - if (yield_and_check(job)) { 390 - goto out; 391 - } 392 - ret = backup_do_cow(job, cluster * job->cluster_size, 393 - job->cluster_size, &error_is_read, 394 - false); 395 - if ((ret < 0) && 396 - backup_error_action(job, error_is_read, -ret) == 397 - BLOCK_ERROR_ACTION_REPORT) { 398 - goto out; 399 - } 400 - } while (ret < 0); 408 + offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset); 409 + if (offset == -1) { 410 + hbitmap_set(job->copy_bitmap, cluster, end - cluster); 411 + break; 401 412 } 402 413 403 - /* If the bitmap granularity is smaller than the backup granularity, 404 - * we need to advance the iterator pointer to the next cluster. */ 405 - if (granularity < job->cluster_size) { 406 - bdrv_set_dirty_iter(dbi, cluster * job->cluster_size); 414 + next_cluster = DIV_ROUND_UP(offset, job->cluster_size); 415 + hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster); 416 + if (next_cluster >= end) { 417 + break; 407 418 } 408 419 409 - last_cluster = cluster - 1; 420 + bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); 410 421 } 411 422 412 - /* Play some final catchup with the progress meter */ 413 - end = DIV_ROUND_UP(job->common.len, job->cluster_size); 414 - if (last_cluster + 1 < end) { 415 - job->common.offset += ((end - last_cluster - 1) * job->cluster_size); 416 - } 423 + job->common.offset = job->common.len - 424 + hbitmap_count(job->copy_bitmap) * job->cluster_size; 417 425 418 - out: 419 426 bdrv_dirty_iter_free(dbi); 420 - return ret; 421 427 } 422 428 423 429 static void coroutine_fn backup_run(void *opaque) ··· 425 431 BackupBlockJob *job = opaque; 426 432 BackupCompleteData *data; 427 433 BlockDriverState *bs = blk_bs(job->common.blk); 428 - int64_t offset; 434 + int64_t offset, nb_clusters; 429 435 int ret = 0; 430 436 431 437 QLIST_INIT(&job->inflight_reqs); 432 438 qemu_co_rwlock_init(&job->flush_rwlock); 433 439 434 - job->done_bitmap = bitmap_new(DIV_ROUND_UP(job->common.len, 435 - job->cluster_size)); 440 + nb_clusters = DIV_ROUND_UP(job->common.len, job->cluster_size); 441 + job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); 442 + if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { 443 + backup_incremental_init_copy_bitmap(job); 444 + } else { 445 + hbitmap_set(job->copy_bitmap, 0, nb_clusters); 446 + } 447 + 436 448 437 449 job->before_write.notify = backup_before_write_notify; 438 450 bdrv_add_before_write_notifier(bs, &job->before_write); 439 451 440 452 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) { 453 + /* All bits are set in copy_bitmap to allow any cluster to be copied. 454 + * This does not actually require them to be copied. */ 441 455 while (!block_job_is_cancelled(&job->common)) { 442 456 /* Yield until the job is cancelled. We just let our before_write 443 457 * notify callback service CoW requests. */ ··· 512 526 /* wait until pending backup_do_cow() calls have completed */ 513 527 qemu_co_rwlock_wrlock(&job->flush_rwlock); 514 528 qemu_co_rwlock_unlock(&job->flush_rwlock); 515 - g_free(job->done_bitmap); 529 + hbitmap_free(job->copy_bitmap); 516 530 517 531 data = g_malloc(sizeof(*data)); 518 532 data->ret = ret;
+18 -6
block/curl.c
··· 89 89 90 90 struct BDRVCURLState; 91 91 92 + static bool libcurl_initialized; 93 + 92 94 typedef struct CURLAIOCB { 93 95 Coroutine *co; 94 96 QEMUIOVector *qiov; ··· 686 688 double d; 687 689 const char *secretid; 688 690 const char *protocol_delimiter; 691 + int ret; 689 692 690 - static int inited = 0; 691 693 692 694 if (flags & BDRV_O_RDWR) { 693 695 error_setg(errp, "curl block device does not support writes"); 694 696 return -EROFS; 697 + } 698 + 699 + if (!libcurl_initialized) { 700 + ret = curl_global_init(CURL_GLOBAL_ALL); 701 + if (ret) { 702 + error_setg(errp, "libcurl initialization failed with %d", ret); 703 + return -EIO; 704 + } 705 + libcurl_initialized = true; 695 706 } 696 707 697 708 qemu_mutex_init(&s->mutex); ··· 772 783 } 773 784 } 774 785 775 - if (!inited) { 776 - curl_global_init(CURL_GLOBAL_ALL); 777 - inited = 1; 778 - } 779 - 780 786 DPRINTF("CURL: Opening %s\n", file); 781 787 QSIMPLEQ_INIT(&s->free_state_waitq); 782 788 s->aio_context = bdrv_get_aio_context(bs); ··· 851 857 qemu_mutex_destroy(&s->mutex); 852 858 g_free(s->cookie); 853 859 g_free(s->url); 860 + g_free(s->username); 861 + g_free(s->proxyusername); 862 + g_free(s->proxypassword); 854 863 qemu_opts_del(opts); 855 864 return -EINVAL; 856 865 } ··· 949 958 950 959 g_free(s->cookie); 951 960 g_free(s->url); 961 + g_free(s->username); 962 + g_free(s->proxyusername); 963 + g_free(s->proxypassword); 952 964 } 953 965 954 966 static int64_t curl_getlength(BlockDriverState *bs)
+5
block/dirty-bitmap.c
··· 715 715 { 716 716 return hbitmap_sha256(bitmap->bitmap, errp); 717 717 } 718 + 719 + int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset) 720 + { 721 + return hbitmap_next_zero(bitmap->bitmap, offset); 722 + }
+83 -83
block/sheepdog.c
··· 400 400 int cache_flags; 401 401 } BDRVSheepdogReopenState; 402 402 403 - static const char * sd_strerror(int err) 403 + static const char *sd_strerror(int err) 404 404 { 405 405 int i; 406 406 ··· 1632 1632 if (!tag) { 1633 1633 tag = ""; 1634 1634 } 1635 - if (tag && strlen(tag) >= SD_MAX_VDI_TAG_LEN) { 1635 + if (strlen(tag) >= SD_MAX_VDI_TAG_LEN) { 1636 1636 error_setg(errp, "value of parameter 'tag' is too long"); 1637 1637 ret = -EINVAL; 1638 1638 goto err_no_fd; ··· 3078 3078 }; 3079 3079 3080 3080 static BlockDriver bdrv_sheepdog = { 3081 - .format_name = "sheepdog", 3082 - .protocol_name = "sheepdog", 3083 - .instance_size = sizeof(BDRVSheepdogState), 3084 - .bdrv_parse_filename = sd_parse_filename, 3085 - .bdrv_file_open = sd_open, 3086 - .bdrv_reopen_prepare = sd_reopen_prepare, 3087 - .bdrv_reopen_commit = sd_reopen_commit, 3088 - .bdrv_reopen_abort = sd_reopen_abort, 3089 - .bdrv_close = sd_close, 3090 - .bdrv_create = sd_create, 3091 - .bdrv_has_zero_init = bdrv_has_zero_init_1, 3092 - .bdrv_getlength = sd_getlength, 3081 + .format_name = "sheepdog", 3082 + .protocol_name = "sheepdog", 3083 + .instance_size = sizeof(BDRVSheepdogState), 3084 + .bdrv_parse_filename = sd_parse_filename, 3085 + .bdrv_file_open = sd_open, 3086 + .bdrv_reopen_prepare = sd_reopen_prepare, 3087 + .bdrv_reopen_commit = sd_reopen_commit, 3088 + .bdrv_reopen_abort = sd_reopen_abort, 3089 + .bdrv_close = sd_close, 3090 + .bdrv_create = sd_create, 3091 + .bdrv_has_zero_init = bdrv_has_zero_init_1, 3092 + .bdrv_getlength = sd_getlength, 3093 3093 .bdrv_get_allocated_file_size = sd_get_allocated_file_size, 3094 - .bdrv_truncate = sd_truncate, 3094 + .bdrv_truncate = sd_truncate, 3095 3095 3096 - .bdrv_co_readv = sd_co_readv, 3097 - .bdrv_co_writev = sd_co_writev, 3098 - .bdrv_co_flush_to_disk = sd_co_flush_to_disk, 3099 - .bdrv_co_pdiscard = sd_co_pdiscard, 3100 - .bdrv_co_get_block_status = sd_co_get_block_status, 3096 + .bdrv_co_readv = sd_co_readv, 3097 + .bdrv_co_writev = sd_co_writev, 3098 + .bdrv_co_flush_to_disk = sd_co_flush_to_disk, 3099 + .bdrv_co_pdiscard = sd_co_pdiscard, 3100 + .bdrv_co_get_block_status = sd_co_get_block_status, 3101 3101 3102 - .bdrv_snapshot_create = sd_snapshot_create, 3103 - .bdrv_snapshot_goto = sd_snapshot_goto, 3104 - .bdrv_snapshot_delete = sd_snapshot_delete, 3105 - .bdrv_snapshot_list = sd_snapshot_list, 3102 + .bdrv_snapshot_create = sd_snapshot_create, 3103 + .bdrv_snapshot_goto = sd_snapshot_goto, 3104 + .bdrv_snapshot_delete = sd_snapshot_delete, 3105 + .bdrv_snapshot_list = sd_snapshot_list, 3106 3106 3107 - .bdrv_save_vmstate = sd_save_vmstate, 3108 - .bdrv_load_vmstate = sd_load_vmstate, 3107 + .bdrv_save_vmstate = sd_save_vmstate, 3108 + .bdrv_load_vmstate = sd_load_vmstate, 3109 3109 3110 - .bdrv_detach_aio_context = sd_detach_aio_context, 3111 - .bdrv_attach_aio_context = sd_attach_aio_context, 3110 + .bdrv_detach_aio_context = sd_detach_aio_context, 3111 + .bdrv_attach_aio_context = sd_attach_aio_context, 3112 3112 3113 - .create_opts = &sd_create_opts, 3113 + .create_opts = &sd_create_opts, 3114 3114 }; 3115 3115 3116 3116 static BlockDriver bdrv_sheepdog_tcp = { 3117 - .format_name = "sheepdog", 3118 - .protocol_name = "sheepdog+tcp", 3119 - .instance_size = sizeof(BDRVSheepdogState), 3120 - .bdrv_parse_filename = sd_parse_filename, 3121 - .bdrv_file_open = sd_open, 3122 - .bdrv_reopen_prepare = sd_reopen_prepare, 3123 - .bdrv_reopen_commit = sd_reopen_commit, 3124 - .bdrv_reopen_abort = sd_reopen_abort, 3125 - .bdrv_close = sd_close, 3126 - .bdrv_create = sd_create, 3127 - .bdrv_has_zero_init = bdrv_has_zero_init_1, 3128 - .bdrv_getlength = sd_getlength, 3117 + .format_name = "sheepdog", 3118 + .protocol_name = "sheepdog+tcp", 3119 + .instance_size = sizeof(BDRVSheepdogState), 3120 + .bdrv_parse_filename = sd_parse_filename, 3121 + .bdrv_file_open = sd_open, 3122 + .bdrv_reopen_prepare = sd_reopen_prepare, 3123 + .bdrv_reopen_commit = sd_reopen_commit, 3124 + .bdrv_reopen_abort = sd_reopen_abort, 3125 + .bdrv_close = sd_close, 3126 + .bdrv_create = sd_create, 3127 + .bdrv_has_zero_init = bdrv_has_zero_init_1, 3128 + .bdrv_getlength = sd_getlength, 3129 3129 .bdrv_get_allocated_file_size = sd_get_allocated_file_size, 3130 - .bdrv_truncate = sd_truncate, 3130 + .bdrv_truncate = sd_truncate, 3131 3131 3132 - .bdrv_co_readv = sd_co_readv, 3133 - .bdrv_co_writev = sd_co_writev, 3134 - .bdrv_co_flush_to_disk = sd_co_flush_to_disk, 3135 - .bdrv_co_pdiscard = sd_co_pdiscard, 3136 - .bdrv_co_get_block_status = sd_co_get_block_status, 3132 + .bdrv_co_readv = sd_co_readv, 3133 + .bdrv_co_writev = sd_co_writev, 3134 + .bdrv_co_flush_to_disk = sd_co_flush_to_disk, 3135 + .bdrv_co_pdiscard = sd_co_pdiscard, 3136 + .bdrv_co_get_block_status = sd_co_get_block_status, 3137 3137 3138 - .bdrv_snapshot_create = sd_snapshot_create, 3139 - .bdrv_snapshot_goto = sd_snapshot_goto, 3140 - .bdrv_snapshot_delete = sd_snapshot_delete, 3141 - .bdrv_snapshot_list = sd_snapshot_list, 3138 + .bdrv_snapshot_create = sd_snapshot_create, 3139 + .bdrv_snapshot_goto = sd_snapshot_goto, 3140 + .bdrv_snapshot_delete = sd_snapshot_delete, 3141 + .bdrv_snapshot_list = sd_snapshot_list, 3142 3142 3143 - .bdrv_save_vmstate = sd_save_vmstate, 3144 - .bdrv_load_vmstate = sd_load_vmstate, 3143 + .bdrv_save_vmstate = sd_save_vmstate, 3144 + .bdrv_load_vmstate = sd_load_vmstate, 3145 3145 3146 - .bdrv_detach_aio_context = sd_detach_aio_context, 3147 - .bdrv_attach_aio_context = sd_attach_aio_context, 3146 + .bdrv_detach_aio_context = sd_detach_aio_context, 3147 + .bdrv_attach_aio_context = sd_attach_aio_context, 3148 3148 3149 - .create_opts = &sd_create_opts, 3149 + .create_opts = &sd_create_opts, 3150 3150 }; 3151 3151 3152 3152 static BlockDriver bdrv_sheepdog_unix = { 3153 - .format_name = "sheepdog", 3154 - .protocol_name = "sheepdog+unix", 3155 - .instance_size = sizeof(BDRVSheepdogState), 3156 - .bdrv_parse_filename = sd_parse_filename, 3157 - .bdrv_file_open = sd_open, 3158 - .bdrv_reopen_prepare = sd_reopen_prepare, 3159 - .bdrv_reopen_commit = sd_reopen_commit, 3160 - .bdrv_reopen_abort = sd_reopen_abort, 3161 - .bdrv_close = sd_close, 3162 - .bdrv_create = sd_create, 3163 - .bdrv_has_zero_init = bdrv_has_zero_init_1, 3164 - .bdrv_getlength = sd_getlength, 3153 + .format_name = "sheepdog", 3154 + .protocol_name = "sheepdog+unix", 3155 + .instance_size = sizeof(BDRVSheepdogState), 3156 + .bdrv_parse_filename = sd_parse_filename, 3157 + .bdrv_file_open = sd_open, 3158 + .bdrv_reopen_prepare = sd_reopen_prepare, 3159 + .bdrv_reopen_commit = sd_reopen_commit, 3160 + .bdrv_reopen_abort = sd_reopen_abort, 3161 + .bdrv_close = sd_close, 3162 + .bdrv_create = sd_create, 3163 + .bdrv_has_zero_init = bdrv_has_zero_init_1, 3164 + .bdrv_getlength = sd_getlength, 3165 3165 .bdrv_get_allocated_file_size = sd_get_allocated_file_size, 3166 - .bdrv_truncate = sd_truncate, 3166 + .bdrv_truncate = sd_truncate, 3167 3167 3168 - .bdrv_co_readv = sd_co_readv, 3169 - .bdrv_co_writev = sd_co_writev, 3170 - .bdrv_co_flush_to_disk = sd_co_flush_to_disk, 3171 - .bdrv_co_pdiscard = sd_co_pdiscard, 3172 - .bdrv_co_get_block_status = sd_co_get_block_status, 3168 + .bdrv_co_readv = sd_co_readv, 3169 + .bdrv_co_writev = sd_co_writev, 3170 + .bdrv_co_flush_to_disk = sd_co_flush_to_disk, 3171 + .bdrv_co_pdiscard = sd_co_pdiscard, 3172 + .bdrv_co_get_block_status = sd_co_get_block_status, 3173 3173 3174 - .bdrv_snapshot_create = sd_snapshot_create, 3175 - .bdrv_snapshot_goto = sd_snapshot_goto, 3176 - .bdrv_snapshot_delete = sd_snapshot_delete, 3177 - .bdrv_snapshot_list = sd_snapshot_list, 3174 + .bdrv_snapshot_create = sd_snapshot_create, 3175 + .bdrv_snapshot_goto = sd_snapshot_goto, 3176 + .bdrv_snapshot_delete = sd_snapshot_delete, 3177 + .bdrv_snapshot_list = sd_snapshot_list, 3178 3178 3179 - .bdrv_save_vmstate = sd_save_vmstate, 3180 - .bdrv_load_vmstate = sd_load_vmstate, 3179 + .bdrv_save_vmstate = sd_save_vmstate, 3180 + .bdrv_load_vmstate = sd_load_vmstate, 3181 3181 3182 - .bdrv_detach_aio_context = sd_detach_aio_context, 3183 - .bdrv_attach_aio_context = sd_attach_aio_context, 3182 + .bdrv_detach_aio_context = sd_detach_aio_context, 3183 + .bdrv_attach_aio_context = sd_attach_aio_context, 3184 3184 3185 - .create_opts = &sd_create_opts, 3185 + .create_opts = &sd_create_opts, 3186 3186 }; 3187 3187 3188 3188 static void bdrv_sheepdog_init(void)
+29 -1
blockjob.c
··· 59 59 60 60 static void block_job_event_cancelled(BlockJob *job); 61 61 static void block_job_event_completed(BlockJob *job, const char *msg); 62 + static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)); 62 63 63 64 /* Transactional group of block jobs */ 64 65 struct BlockJobTxn { ··· 480 481 } 481 482 } 482 483 484 + /* Assumes the block_job_mutex is held */ 485 + static bool block_job_timer_pending(BlockJob *job) 486 + { 487 + return timer_pending(&job->sleep_timer); 488 + } 489 + 483 490 void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp) 484 491 { 485 492 Error *local_err = NULL; 493 + int64_t old_speed = job->speed; 486 494 487 495 if (!job->driver->set_speed) { 488 496 error_setg(errp, QERR_UNSUPPORTED); ··· 495 503 } 496 504 497 505 job->speed = speed; 506 + if (speed <= old_speed) { 507 + return; 508 + } 509 + 510 + /* kick only if a timer is pending */ 511 + block_job_enter_cond(job, block_job_timer_pending); 498 512 } 499 513 500 514 void block_job_complete(BlockJob *job, Error **errp) ··· 821 835 } 822 836 } 823 837 824 - void block_job_enter(BlockJob *job) 838 + /* 839 + * Conditionally enter a block_job pending a call to fn() while 840 + * under the block_job_lock critical section. 841 + */ 842 + static void block_job_enter_cond(BlockJob *job, bool(*fn)(BlockJob *job)) 825 843 { 826 844 if (!block_job_started(job)) { 827 845 return; ··· 836 854 return; 837 855 } 838 856 857 + if (fn && !fn(job)) { 858 + block_job_unlock(); 859 + return; 860 + } 861 + 839 862 assert(!job->deferred_to_main_loop); 840 863 timer_del(&job->sleep_timer); 841 864 job->busy = true; 842 865 block_job_unlock(); 843 866 aio_co_wake(job->co); 867 + } 868 + 869 + void block_job_enter(BlockJob *job) 870 + { 871 + block_job_enter_cond(job, NULL); 844 872 } 845 873 846 874 bool block_job_is_cancelled(BlockJob *job)
+1
include/block/dirty-bitmap.h
··· 91 91 BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs, 92 92 BdrvDirtyBitmap *bitmap); 93 93 char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp); 94 + int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t start); 94 95 95 96 #endif
+8
include/qemu/hbitmap.h
··· 292 292 */ 293 293 unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); 294 294 295 + /* hbitmap_next_zero: 296 + * @hb: The HBitmap to operate on 297 + * @start: The bit to start from. 298 + * 299 + * Find next not dirty bit. 300 + */ 301 + int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start); 302 + 295 303 /* hbitmap_create_meta: 296 304 * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap. 297 305 * The caller owns the created bitmap and must call hbitmap_free_meta(hb) to
+61
tests/test-hbitmap.c
··· 925 925 hbitmap_iter_next(&hbi); 926 926 } 927 927 928 + static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) 929 + { 930 + int64_t ret1 = hbitmap_next_zero(data->hb, start); 931 + int64_t ret2 = start; 932 + for ( ; ret2 < data->size && hbitmap_get(data->hb, ret2); ret2++) { 933 + ; 934 + } 935 + if (ret2 == data->size) { 936 + ret2 = -1; 937 + } 938 + 939 + g_assert_cmpint(ret1, ==, ret2); 940 + } 941 + 942 + static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity) 943 + { 944 + hbitmap_test_init(data, L3, granularity); 945 + test_hbitmap_next_zero_check(data, 0); 946 + test_hbitmap_next_zero_check(data, L3 - 1); 947 + 948 + hbitmap_set(data->hb, L2, 1); 949 + test_hbitmap_next_zero_check(data, 0); 950 + test_hbitmap_next_zero_check(data, L2 - 1); 951 + test_hbitmap_next_zero_check(data, L2); 952 + test_hbitmap_next_zero_check(data, L2 + 1); 953 + 954 + hbitmap_set(data->hb, L2 + 5, L1); 955 + test_hbitmap_next_zero_check(data, 0); 956 + test_hbitmap_next_zero_check(data, L2 + 1); 957 + test_hbitmap_next_zero_check(data, L2 + 2); 958 + test_hbitmap_next_zero_check(data, L2 + 5); 959 + test_hbitmap_next_zero_check(data, L2 + L1 - 1); 960 + test_hbitmap_next_zero_check(data, L2 + L1); 961 + 962 + hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); 963 + test_hbitmap_next_zero_check(data, L2 * 2 - L1); 964 + test_hbitmap_next_zero_check(data, L2 * 2 - 2); 965 + test_hbitmap_next_zero_check(data, L2 * 2 - 1); 966 + test_hbitmap_next_zero_check(data, L2 * 2); 967 + test_hbitmap_next_zero_check(data, L3 - 1); 968 + 969 + hbitmap_set(data->hb, 0, L3); 970 + test_hbitmap_next_zero_check(data, 0); 971 + } 972 + 973 + static void test_hbitmap_next_zero_0(TestHBitmapData *data, const void *unused) 974 + { 975 + test_hbitmap_next_zero_do(data, 0); 976 + } 977 + 978 + static void test_hbitmap_next_zero_4(TestHBitmapData *data, const void *unused) 979 + { 980 + test_hbitmap_next_zero_do(data, 4); 981 + } 982 + 928 983 int main(int argc, char **argv) 929 984 { 930 985 g_test_init(&argc, &argv, NULL); ··· 985 1040 986 1041 hbitmap_test_add("/hbitmap/iter/iter_and_reset", 987 1042 test_hbitmap_iter_and_reset); 1043 + 1044 + hbitmap_test_add("/hbitmap/next_zero/next_zero_0", 1045 + test_hbitmap_next_zero_0); 1046 + hbitmap_test_add("/hbitmap/next_zero/next_zero_4", 1047 + test_hbitmap_next_zero_4); 1048 + 988 1049 g_test_run(); 989 1050 990 1051 return 0;
+39
util/hbitmap.c
··· 188 188 } 189 189 } 190 190 191 + int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start) 192 + { 193 + size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; 194 + unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1]; 195 + uint64_t sz = hb->sizes[HBITMAP_LEVELS - 1]; 196 + unsigned long cur = last_lev[pos]; 197 + unsigned start_bit_offset = 198 + (start >> hb->granularity) & (BITS_PER_LONG - 1); 199 + int64_t res; 200 + 201 + cur |= (1UL << start_bit_offset) - 1; 202 + assert((start >> hb->granularity) < hb->size); 203 + 204 + if (cur == (unsigned long)-1) { 205 + do { 206 + pos++; 207 + } while (pos < sz && last_lev[pos] == (unsigned long)-1); 208 + 209 + if (pos >= sz) { 210 + return -1; 211 + } 212 + 213 + cur = last_lev[pos]; 214 + } 215 + 216 + res = (pos << BITS_PER_LEVEL) + ctol(cur); 217 + if (res >= hb->size) { 218 + return -1; 219 + } 220 + 221 + res = res << hb->granularity; 222 + if (res < start) { 223 + assert(((start - res) >> hb->granularity) == 0); 224 + return start; 225 + } 226 + 227 + return res; 228 + } 229 + 191 230 bool hbitmap_empty(const HBitmap *hb) 192 231 { 193 232 return hb->count == 0;