qemu with hax to log dma reads & writes jcs.org/2018/11/12/vfio

Merge remote-tracking branch 'remotes/jnsnow/tags/bitmaps-pull-request' into staging

Pull request

# gpg: Signature made Wed 18 Mar 2020 20:23:28 GMT
# gpg: using RSA key F9B7ABDBBCACDF95BE76CBD07DEF8106AAFC390E
# gpg: Good signature from "John Snow (John Huston) <jsnow@redhat.com>" [full]
# Primary key fingerprint: FAEB 9711 A12C F475 812F 18F2 88A9 064D 1835 61EB
# Subkey fingerprint: F9B7 ABDB BCAC DF95 BE76 CBD0 7DEF 8106 AAFC 390E

* remotes/jnsnow/tags/bitmaps-pull-request:
block/qcow2-bitmap: use bdrv_dirty_bitmap_next_dirty
nbd/server: use bdrv_dirty_bitmap_next_dirty_area
nbd/server: introduce NBDExtentArray
block/dirty-bitmap: improve _next_dirty_area API
block/dirty-bitmap: add _next_dirty API
block/dirty-bitmap: switch _next_dirty_area and _next_zero to int64_t
hbitmap: drop meta bitmaps as they are unused
hbitmap: unpublish hbitmap_iter_skip_words
hbitmap: move hbitmap_iter_next_word to hbitmap.c
hbitmap: assert that we don't create bitmap larger than INT64_MAX
build: Silence clang warning on older glib autoptr usage

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>

+393 -459
+12 -4
block/dirty-bitmap.c
··· 860 860 return hbitmap_sha256(bitmap->bitmap, errp); 861 861 } 862 862 863 - int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset, 864 - uint64_t bytes) 863 + int64_t bdrv_dirty_bitmap_next_dirty(BdrvDirtyBitmap *bitmap, int64_t offset, 864 + int64_t bytes) 865 + { 866 + return hbitmap_next_dirty(bitmap->bitmap, offset, bytes); 867 + } 868 + 869 + int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, 870 + int64_t bytes) 865 871 { 866 872 return hbitmap_next_zero(bitmap->bitmap, offset, bytes); 867 873 } 868 874 869 875 bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, 870 - uint64_t *offset, uint64_t *bytes) 876 + int64_t start, int64_t end, int64_t max_dirty_count, 877 + int64_t *dirty_start, int64_t *dirty_count) 871 878 { 872 - return hbitmap_next_dirty_area(bitmap->bitmap, offset, bytes); 879 + return hbitmap_next_dirty_area(bitmap->bitmap, start, end, max_dirty_count, 880 + dirty_start, dirty_count); 873 881 } 874 882 875 883 /**
+5 -10
block/qcow2-bitmap.c
··· 1288 1288 uint64_t bm_size = bdrv_dirty_bitmap_size(bitmap); 1289 1289 const char *bm_name = bdrv_dirty_bitmap_name(bitmap); 1290 1290 uint8_t *buf = NULL; 1291 - BdrvDirtyBitmapIter *dbi; 1292 1291 uint64_t *tb; 1293 1292 uint64_t tb_size = 1294 1293 size_to_clusters(s, ··· 1307 1306 return NULL; 1308 1307 } 1309 1308 1310 - dbi = bdrv_dirty_iter_new(bitmap); 1311 1309 buf = g_malloc(s->cluster_size); 1312 1310 limit = bytes_covered_by_bitmap_cluster(s, bitmap); 1313 1311 assert(DIV_ROUND_UP(bm_size, limit) == tb_size); 1314 1312 1315 - while ((offset = bdrv_dirty_iter_next(dbi)) >= 0) { 1313 + offset = 0; 1314 + while ((offset = bdrv_dirty_bitmap_next_dirty(bitmap, offset, INT64_MAX)) 1315 + >= 0) 1316 + { 1316 1317 uint64_t cluster = offset / limit; 1317 1318 uint64_t end, write_size; 1318 1319 int64_t off; ··· 1355 1356 goto fail; 1356 1357 } 1357 1358 1358 - if (end >= bm_size) { 1359 - break; 1360 - } 1361 - 1362 - bdrv_set_dirty_iter(dbi, end); 1359 + offset = end; 1363 1360 } 1364 1361 1365 1362 *bitmap_table_size = tb_size; 1366 1363 g_free(buf); 1367 - bdrv_dirty_iter_free(dbi); 1368 1364 1369 1365 return tb; 1370 1366 1371 1367 fail: 1372 1368 clear_bitmap_table(bs, tb, tb_size); 1373 1369 g_free(buf); 1374 - bdrv_dirty_iter_free(dbi); 1375 1370 g_free(tb); 1376 1371 1377 1372 return NULL;
+20
configure
··· 3855 3855 fi 3856 3856 fi 3857 3857 3858 + # Silence clang warnings triggered by glib < 2.57.2 3859 + cat > $TMPC << EOF 3860 + #include <glib.h> 3861 + typedef struct Foo { 3862 + int i; 3863 + } Foo; 3864 + static void foo_free(Foo *f) 3865 + { 3866 + g_free(f); 3867 + } 3868 + G_DEFINE_AUTOPTR_CLEANUP_FUNC(Foo, foo_free); 3869 + int main(void) { return 0; } 3870 + EOF 3871 + if ! compile_prog "$glib_cflags -Werror" "$glib_libs" ; then 3872 + if cc_has_warning_flag "-Wno-unused-function"; then 3873 + glib_cflags="$glib_cflags -Wno-unused-function" 3874 + CFLAGS="$CFLAGS -Wno-unused-function" 3875 + fi 3876 + fi 3877 + 3858 3878 ######################################### 3859 3879 # zlib check 3860 3880
+6 -3
include/block/dirty-bitmap.h
··· 105 105 bitmap = bdrv_dirty_bitmap_next(bitmap)) 106 106 107 107 char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp); 108 - int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset, 109 - uint64_t bytes); 108 + int64_t bdrv_dirty_bitmap_next_dirty(BdrvDirtyBitmap *bitmap, int64_t offset, 109 + int64_t bytes); 110 + int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, int64_t offset, 111 + int64_t bytes); 110 112 bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, 111 - uint64_t *offset, uint64_t *bytes); 113 + int64_t start, int64_t end, int64_t max_dirty_count, 114 + int64_t *dirty_start, int64_t *dirty_count); 112 115 BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, 113 116 Error **errp); 114 117
+25 -68
include/qemu/hbitmap.h
··· 297 297 */ 298 298 void hbitmap_iter_init(HBitmapIter *hbi, const HBitmap *hb, uint64_t first); 299 299 300 - /* hbitmap_iter_skip_words: 301 - * @hbi: HBitmapIter to operate on. 300 + /* 301 + * hbitmap_next_dirty: 302 302 * 303 - * Internal function used by hbitmap_iter_next and hbitmap_iter_next_word. 303 + * Find next dirty bit within selected range. If not found, return -1. 304 + * 305 + * @hb: The HBitmap to operate on 306 + * @start: The bit to start from. 307 + * @count: Number of bits to proceed. If @start+@count > bitmap size, the whole 308 + * bitmap is looked through. You can use INT64_MAX as @count to search up to 309 + * the bitmap end. 304 310 */ 305 - unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi); 311 + int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count); 306 312 307 313 /* hbitmap_next_zero: 308 314 * ··· 311 317 * @hb: The HBitmap to operate on 312 318 * @start: The bit to start from. 313 319 * @count: Number of bits to proceed. If @start+@count > bitmap size, the whole 314 - * bitmap is looked through. You can use UINT64_MAX as @count to search up to 320 + * bitmap is looked through. You can use INT64_MAX as @count to search up to 315 321 * the bitmap end. 316 322 */ 317 - int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count); 323 + int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count); 318 324 319 325 /* hbitmap_next_dirty_area: 320 326 * @hb: The HBitmap to operate on 321 - * @start: in-out parameter. 322 - * in: the offset to start from 323 - * out: (if area found) start of found area 324 - * @count: in-out parameter. 325 - * in: length of requested region 326 - * out: length of found area 327 + * @start: the offset to start from 328 + * @end: end of requested area 329 + * @max_dirty_count: limit for out parameter dirty_count 330 + * @dirty_start: on success: start of found area 331 + * @dirty_count: on success: length of found area 327 332 * 328 - * If dirty area found within [@start, @start + @count), returns true and sets 329 - * @offset and @bytes appropriately. Otherwise returns false and leaves @offset 330 - * and @bytes unchanged. 333 + * If dirty area found within [@start, @end), returns true and sets 334 + * @dirty_start and @dirty_count appropriately. @dirty_count will not exceed 335 + * @max_dirty_count. 336 + * If dirty area was not found, returns false and leaves @dirty_start and 337 + * @dirty_count unchanged. 331 338 */ 332 - bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start, 333 - uint64_t *count); 334 - 335 - /* hbitmap_create_meta: 336 - * Create a "meta" hbitmap to track dirtiness of the bits in this HBitmap. 337 - * The caller owns the created bitmap and must call hbitmap_free_meta(hb) to 338 - * free it. 339 - * 340 - * Currently, we only guarantee that if a bit in the hbitmap is changed it 341 - * will be reflected in the meta bitmap, but we do not yet guarantee the 342 - * opposite. 343 - * 344 - * @hb: The HBitmap to operate on. 345 - * @chunk_size: How many bits in @hb does one bit in the meta track. 346 - */ 347 - HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size); 348 - 349 - /* hbitmap_free_meta: 350 - * Free the meta bitmap of @hb. 351 - * 352 - * @hb: The HBitmap whose meta bitmap should be freed. 353 - */ 354 - void hbitmap_free_meta(HBitmap *hb); 339 + bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end, 340 + int64_t max_dirty_count, 341 + int64_t *dirty_start, int64_t *dirty_count); 355 342 356 343 /** 357 344 * hbitmap_iter_next: ··· 361 348 * or -1 if all remaining bits are zero. 362 349 */ 363 350 int64_t hbitmap_iter_next(HBitmapIter *hbi); 364 - 365 - /** 366 - * hbitmap_iter_next_word: 367 - * @hbi: HBitmapIter to operate on. 368 - * @p_cur: Location where to store the next non-zero word. 369 - * 370 - * Return the index of the next nonzero word that is set in @hbi's 371 - * associated HBitmap, and set *p_cur to the content of that word 372 - * (bits before the index that was passed to hbitmap_iter_init are 373 - * trimmed on the first call). Return -1, and set *p_cur to zero, 374 - * if all remaining words are zero. 375 - */ 376 - static inline size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur) 377 - { 378 - unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1]; 379 - 380 - if (cur == 0) { 381 - cur = hbitmap_iter_skip_words(hbi); 382 - if (cur == 0) { 383 - *p_cur = 0; 384 - return -1; 385 - } 386 - } 387 - 388 - /* The next call will resume work from the next word. */ 389 - hbi->cur[HBITMAP_LEVELS - 1] = 0; 390 - *p_cur = cur; 391 - return hbi->pos; 392 - } 393 - 394 351 395 352 #endif
+128 -123
nbd/server.c
··· 1909 1909 return ret; 1910 1910 } 1911 1911 1912 + typedef struct NBDExtentArray { 1913 + NBDExtent *extents; 1914 + unsigned int nb_alloc; 1915 + unsigned int count; 1916 + uint64_t total_length; 1917 + bool can_add; 1918 + bool converted_to_be; 1919 + } NBDExtentArray; 1920 + 1921 + static NBDExtentArray *nbd_extent_array_new(unsigned int nb_alloc) 1922 + { 1923 + NBDExtentArray *ea = g_new0(NBDExtentArray, 1); 1924 + 1925 + ea->nb_alloc = nb_alloc; 1926 + ea->extents = g_new(NBDExtent, nb_alloc); 1927 + ea->can_add = true; 1928 + 1929 + return ea; 1930 + } 1931 + 1932 + static void nbd_extent_array_free(NBDExtentArray *ea) 1933 + { 1934 + g_free(ea->extents); 1935 + g_free(ea); 1936 + } 1937 + G_DEFINE_AUTOPTR_CLEANUP_FUNC(NBDExtentArray, nbd_extent_array_free); 1938 + 1939 + /* Further modifications of the array after conversion are abandoned */ 1940 + static void nbd_extent_array_convert_to_be(NBDExtentArray *ea) 1941 + { 1942 + int i; 1943 + 1944 + assert(!ea->converted_to_be); 1945 + ea->can_add = false; 1946 + ea->converted_to_be = true; 1947 + 1948 + for (i = 0; i < ea->count; i++) { 1949 + ea->extents[i].flags = cpu_to_be32(ea->extents[i].flags); 1950 + ea->extents[i].length = cpu_to_be32(ea->extents[i].length); 1951 + } 1952 + } 1953 + 1912 1954 /* 1913 - * Populate @extents from block status. Update @bytes to be the actual 1914 - * length encoded (which may be smaller than the original), and update 1915 - * @nb_extents to the number of extents used. 1916 - * 1917 - * Returns zero on success and -errno on bdrv_block_status_above failure. 1955 + * Add extent to NBDExtentArray. If extent can't be added (no available space), 1956 + * return -1. 1957 + * For safety, when returning -1 for the first time, .can_add is set to false, 1958 + * further call to nbd_extent_array_add() will crash. 1959 + * (to avoid the situation, when after failing to add an extent (returned -1), 1960 + * user miss this failure and add another extent, which is successfully added 1961 + * (array is full, but new extent may be squashed into the last one), then we 1962 + * have invalid array with skipped extent) 1918 1963 */ 1919 - static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, 1920 - uint64_t *bytes, NBDExtent *extents, 1921 - unsigned int *nb_extents) 1964 + static int nbd_extent_array_add(NBDExtentArray *ea, 1965 + uint32_t length, uint32_t flags) 1922 1966 { 1923 - uint64_t remaining_bytes = *bytes; 1924 - NBDExtent *extent = extents, *extents_end = extents + *nb_extents; 1925 - bool first_extent = true; 1967 + assert(ea->can_add); 1926 1968 1927 - assert(*nb_extents); 1928 - while (remaining_bytes) { 1969 + if (!length) { 1970 + return 0; 1971 + } 1972 + 1973 + /* Extend previous extent if flags are the same */ 1974 + if (ea->count > 0 && flags == ea->extents[ea->count - 1].flags) { 1975 + uint64_t sum = (uint64_t)length + ea->extents[ea->count - 1].length; 1976 + 1977 + if (sum <= UINT32_MAX) { 1978 + ea->extents[ea->count - 1].length = sum; 1979 + ea->total_length += length; 1980 + return 0; 1981 + } 1982 + } 1983 + 1984 + if (ea->count >= ea->nb_alloc) { 1985 + ea->can_add = false; 1986 + return -1; 1987 + } 1988 + 1989 + ea->total_length += length; 1990 + ea->extents[ea->count] = (NBDExtent) {.length = length, .flags = flags}; 1991 + ea->count++; 1992 + 1993 + return 0; 1994 + } 1995 + 1996 + static int blockstatus_to_extents(BlockDriverState *bs, uint64_t offset, 1997 + uint64_t bytes, NBDExtentArray *ea) 1998 + { 1999 + while (bytes) { 1929 2000 uint32_t flags; 1930 2001 int64_t num; 1931 - int ret = bdrv_block_status_above(bs, NULL, offset, remaining_bytes, 1932 - &num, NULL, NULL); 2002 + int ret = bdrv_block_status_above(bs, NULL, offset, bytes, &num, 2003 + NULL, NULL); 1933 2004 1934 2005 if (ret < 0) { 1935 2006 return ret; ··· 1938 2009 flags = (ret & BDRV_BLOCK_ALLOCATED ? 0 : NBD_STATE_HOLE) | 1939 2010 (ret & BDRV_BLOCK_ZERO ? NBD_STATE_ZERO : 0); 1940 2011 1941 - if (first_extent) { 1942 - extent->flags = flags; 1943 - extent->length = num; 1944 - first_extent = false; 1945 - } else if (flags == extent->flags) { 1946 - /* extend current extent */ 1947 - extent->length += num; 1948 - } else { 1949 - if (extent + 1 == extents_end) { 1950 - break; 1951 - } 2012 + if (nbd_extent_array_add(ea, num, flags) < 0) { 2013 + return 0; 2014 + } 1952 2015 1953 - /* start new extent */ 1954 - extent++; 1955 - extent->flags = flags; 1956 - extent->length = num; 1957 - } 1958 2016 offset += num; 1959 - remaining_bytes -= num; 2017 + bytes -= num; 1960 2018 } 1961 2019 1962 - extents_end = extent + 1; 1963 - 1964 - for (extent = extents; extent < extents_end; extent++) { 1965 - extent->flags = cpu_to_be32(extent->flags); 1966 - extent->length = cpu_to_be32(extent->length); 1967 - } 1968 - 1969 - *bytes -= remaining_bytes; 1970 - *nb_extents = extents_end - extents; 1971 - 1972 2020 return 0; 1973 2021 } 1974 2022 1975 - /* nbd_co_send_extents 2023 + /* 2024 + * nbd_co_send_extents 1976 2025 * 1977 - * @length is only for tracing purposes (and may be smaller or larger 1978 - * than the client's original request). @last controls whether 1979 - * NBD_REPLY_FLAG_DONE is sent. @extents should already be in 1980 - * big-endian format. 2026 + * @ea is converted to BE by the function 2027 + * @last controls whether NBD_REPLY_FLAG_DONE is sent. 1981 2028 */ 1982 2029 static int nbd_co_send_extents(NBDClient *client, uint64_t handle, 1983 - NBDExtent *extents, unsigned int nb_extents, 1984 - uint64_t length, bool last, 1985 - uint32_t context_id, Error **errp) 2030 + NBDExtentArray *ea, 2031 + bool last, uint32_t context_id, Error **errp) 1986 2032 { 1987 2033 NBDStructuredMeta chunk; 1988 - 1989 2034 struct iovec iov[] = { 1990 2035 {.iov_base = &chunk, .iov_len = sizeof(chunk)}, 1991 - {.iov_base = extents, .iov_len = nb_extents * sizeof(extents[0])} 2036 + {.iov_base = ea->extents, .iov_len = ea->count * sizeof(ea->extents[0])} 1992 2037 }; 1993 2038 1994 - trace_nbd_co_send_extents(handle, nb_extents, context_id, length, last); 2039 + nbd_extent_array_convert_to_be(ea); 2040 + 2041 + trace_nbd_co_send_extents(handle, ea->count, context_id, ea->total_length, 2042 + last); 1995 2043 set_be_chunk(&chunk.h, last ? NBD_REPLY_FLAG_DONE : 0, 1996 2044 NBD_REPLY_TYPE_BLOCK_STATUS, 1997 2045 handle, sizeof(chunk) - sizeof(chunk.h) + iov[1].iov_len); ··· 2009 2057 { 2010 2058 int ret; 2011 2059 unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; 2012 - NBDExtent *extents = g_new(NBDExtent, nb_extents); 2013 - uint64_t final_length = length; 2060 + g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents); 2014 2061 2015 - ret = blockstatus_to_extents(bs, offset, &final_length, extents, 2016 - &nb_extents); 2062 + ret = blockstatus_to_extents(bs, offset, length, ea); 2017 2063 if (ret < 0) { 2018 - g_free(extents); 2019 2064 return nbd_co_send_structured_error( 2020 2065 client, handle, -ret, "can't get block status", errp); 2021 2066 } 2022 2067 2023 - ret = nbd_co_send_extents(client, handle, extents, nb_extents, 2024 - final_length, last, context_id, errp); 2025 - 2026 - g_free(extents); 2027 - 2028 - return ret; 2068 + return nbd_co_send_extents(client, handle, ea, last, context_id, errp); 2029 2069 } 2030 2070 2031 - /* 2032 - * Populate @extents from a dirty bitmap. Unless @dont_fragment, the 2033 - * final extent may exceed the original @length. Store in @length the 2034 - * byte length encoded (which may be smaller or larger than the 2035 - * original), and return the number of extents used. 2036 - */ 2037 - static unsigned int bitmap_to_extents(BdrvDirtyBitmap *bitmap, uint64_t offset, 2038 - uint64_t *length, NBDExtent *extents, 2039 - unsigned int nb_extents, 2040 - bool dont_fragment) 2071 + /* Populate @ea from a dirty bitmap. */ 2072 + static void bitmap_to_extents(BdrvDirtyBitmap *bitmap, 2073 + uint64_t offset, uint64_t length, 2074 + NBDExtentArray *es) 2041 2075 { 2042 - uint64_t begin = offset, end = offset; 2043 - uint64_t overall_end = offset + *length; 2044 - unsigned int i = 0; 2045 - BdrvDirtyBitmapIter *it; 2046 - bool dirty; 2076 + int64_t start, dirty_start, dirty_count; 2077 + int64_t end = offset + length; 2078 + bool full = false; 2047 2079 2048 2080 bdrv_dirty_bitmap_lock(bitmap); 2049 2081 2050 - it = bdrv_dirty_iter_new(bitmap); 2051 - dirty = bdrv_dirty_bitmap_get_locked(bitmap, offset); 2052 - 2053 - assert(begin < overall_end && nb_extents); 2054 - while (begin < overall_end && i < nb_extents) { 2055 - bool next_dirty = !dirty; 2056 - 2057 - if (dirty) { 2058 - end = bdrv_dirty_bitmap_next_zero(bitmap, begin, UINT64_MAX); 2059 - } else { 2060 - bdrv_set_dirty_iter(it, begin); 2061 - end = bdrv_dirty_iter_next(it); 2062 - } 2063 - if (end == -1 || end - begin > UINT32_MAX) { 2064 - /* Cap to an aligned value < 4G beyond begin. */ 2065 - end = MIN(bdrv_dirty_bitmap_size(bitmap), 2066 - begin + UINT32_MAX + 1 - 2067 - bdrv_dirty_bitmap_granularity(bitmap)); 2068 - next_dirty = dirty; 2069 - } 2070 - if (dont_fragment && end > overall_end) { 2071 - end = overall_end; 2082 + for (start = offset; 2083 + bdrv_dirty_bitmap_next_dirty_area(bitmap, start, end, INT32_MAX, 2084 + &dirty_start, &dirty_count); 2085 + start = dirty_start + dirty_count) 2086 + { 2087 + if ((nbd_extent_array_add(es, dirty_start - start, 0) < 0) || 2088 + (nbd_extent_array_add(es, dirty_count, NBD_STATE_DIRTY) < 0)) 2089 + { 2090 + full = true; 2091 + break; 2072 2092 } 2073 - 2074 - extents[i].length = cpu_to_be32(end - begin); 2075 - extents[i].flags = cpu_to_be32(dirty ? NBD_STATE_DIRTY : 0); 2076 - i++; 2077 - begin = end; 2078 - dirty = next_dirty; 2079 2093 } 2080 2094 2081 - bdrv_dirty_iter_free(it); 2095 + if (!full) { 2096 + /* last non dirty extent */ 2097 + nbd_extent_array_add(es, end - start, 0); 2098 + } 2082 2099 2083 2100 bdrv_dirty_bitmap_unlock(bitmap); 2084 - 2085 - assert(offset < end); 2086 - *length = end - offset; 2087 - return i; 2088 2101 } 2089 2102 2090 2103 static int nbd_co_send_bitmap(NBDClient *client, uint64_t handle, ··· 2092 2105 uint32_t length, bool dont_fragment, bool last, 2093 2106 uint32_t context_id, Error **errp) 2094 2107 { 2095 - int ret; 2096 2108 unsigned int nb_extents = dont_fragment ? 1 : NBD_MAX_BLOCK_STATUS_EXTENTS; 2097 - NBDExtent *extents = g_new(NBDExtent, nb_extents); 2098 - uint64_t final_length = length; 2109 + g_autoptr(NBDExtentArray) ea = nbd_extent_array_new(nb_extents); 2099 2110 2100 - nb_extents = bitmap_to_extents(bitmap, offset, &final_length, extents, 2101 - nb_extents, dont_fragment); 2111 + bitmap_to_extents(bitmap, offset, length, ea); 2102 2112 2103 - ret = nbd_co_send_extents(client, handle, extents, nb_extents, 2104 - final_length, last, context_id, errp); 2105 - 2106 - g_free(extents); 2107 - 2108 - return ret; 2113 + return nbd_co_send_extents(client, handle, ea, last, context_id, errp); 2109 2114 } 2110 2115 2111 2116 /* nbd_co_receive_request
+114 -200
tests/test-hbitmap.c
··· 22 22 23 23 typedef struct TestHBitmapData { 24 24 HBitmap *hb; 25 - HBitmap *meta; 26 25 unsigned long *bits; 27 26 size_t size; 28 27 size_t old_size; ··· 94 93 } 95 94 } 96 95 97 - static void hbitmap_test_init_meta(TestHBitmapData *data, 98 - uint64_t size, int granularity, 99 - int meta_chunk) 100 - { 101 - hbitmap_test_init(data, size, granularity); 102 - data->meta = hbitmap_create_meta(data->hb, meta_chunk); 103 - } 104 - 105 96 static inline size_t hbitmap_test_array_size(size_t bits) 106 97 { 107 98 size_t n = DIV_ROUND_UP(bits, BITS_PER_LONG); ··· 144 135 const void *unused) 145 136 { 146 137 if (data->hb) { 147 - if (data->meta) { 148 - hbitmap_free_meta(data->hb); 149 - } 150 138 hbitmap_free(data->hb); 151 139 data->hb = NULL; 152 140 } ··· 648 636 hbitmap_test_truncate(data, size, -diff, 0); 649 637 } 650 638 651 - static void hbitmap_check_meta(TestHBitmapData *data, 652 - int64_t start, int count) 653 - { 654 - int64_t i; 655 - 656 - for (i = 0; i < data->size; i++) { 657 - if (i >= start && i < start + count) { 658 - g_assert(hbitmap_get(data->meta, i)); 659 - } else { 660 - g_assert(!hbitmap_get(data->meta, i)); 661 - } 662 - } 663 - } 664 - 665 - static void hbitmap_test_meta(TestHBitmapData *data, 666 - int64_t start, int count, 667 - int64_t check_start, int check_count) 668 - { 669 - hbitmap_reset_all(data->hb); 670 - hbitmap_reset_all(data->meta); 671 - 672 - /* Test "unset" -> "unset" will not update meta. */ 673 - hbitmap_reset(data->hb, start, count); 674 - hbitmap_check_meta(data, 0, 0); 675 - 676 - /* Test "unset" -> "set" will update meta */ 677 - hbitmap_set(data->hb, start, count); 678 - hbitmap_check_meta(data, check_start, check_count); 679 - 680 - /* Test "set" -> "set" will not update meta */ 681 - hbitmap_reset_all(data->meta); 682 - hbitmap_set(data->hb, start, count); 683 - hbitmap_check_meta(data, 0, 0); 684 - 685 - /* Test "set" -> "unset" will update meta */ 686 - hbitmap_reset_all(data->meta); 687 - hbitmap_reset(data->hb, start, count); 688 - hbitmap_check_meta(data, check_start, check_count); 689 - } 690 - 691 - static void hbitmap_test_meta_do(TestHBitmapData *data, int chunk_size) 692 - { 693 - uint64_t size = chunk_size * 100; 694 - hbitmap_test_init_meta(data, size, 0, chunk_size); 695 - 696 - hbitmap_test_meta(data, 0, 1, 0, chunk_size); 697 - hbitmap_test_meta(data, 0, chunk_size, 0, chunk_size); 698 - hbitmap_test_meta(data, chunk_size - 1, 1, 0, chunk_size); 699 - hbitmap_test_meta(data, chunk_size - 1, 2, 0, chunk_size * 2); 700 - hbitmap_test_meta(data, chunk_size - 1, chunk_size + 1, 0, chunk_size * 2); 701 - hbitmap_test_meta(data, chunk_size - 1, chunk_size + 2, 0, chunk_size * 3); 702 - hbitmap_test_meta(data, 7 * chunk_size - 1, chunk_size + 2, 703 - 6 * chunk_size, chunk_size * 3); 704 - hbitmap_test_meta(data, size - 1, 1, size - chunk_size, chunk_size); 705 - hbitmap_test_meta(data, 0, size, 0, size); 706 - } 707 - 708 - static void test_hbitmap_meta_byte(TestHBitmapData *data, const void *unused) 709 - { 710 - hbitmap_test_meta_do(data, BITS_PER_BYTE); 711 - } 712 - 713 - static void test_hbitmap_meta_word(TestHBitmapData *data, const void *unused) 714 - { 715 - hbitmap_test_meta_do(data, BITS_PER_LONG); 716 - } 717 - 718 - static void test_hbitmap_meta_sector(TestHBitmapData *data, const void *unused) 719 - { 720 - hbitmap_test_meta_do(data, BDRV_SECTOR_SIZE * BITS_PER_BYTE); 721 - } 722 - 723 - /** 724 - * Create an HBitmap and test set/unset. 725 - */ 726 - static void test_hbitmap_meta_one(TestHBitmapData *data, const void *unused) 727 - { 728 - int i; 729 - int64_t offsets[] = { 730 - 0, 1, L1 - 1, L1, L1 + 1, L2 - 1, L2, L2 + 1, L3 - 1, L3, L3 + 1 731 - }; 732 - 733 - hbitmap_test_init_meta(data, L3 * 2, 0, 1); 734 - for (i = 0; i < ARRAY_SIZE(offsets); i++) { 735 - hbitmap_test_meta(data, offsets[i], 1, offsets[i], 1); 736 - hbitmap_test_meta(data, offsets[i], L1, offsets[i], L1); 737 - hbitmap_test_meta(data, offsets[i], L2, offsets[i], L2); 738 - } 739 - } 740 - 741 639 static void test_hbitmap_serialize_align(TestHBitmapData *data, 742 640 const void *unused) 743 641 { ··· 748 646 749 647 r = hbitmap_serialization_align(data->hb); 750 648 g_assert_cmpint(r, ==, 64 << 3); 751 - } 752 - 753 - static void test_hbitmap_meta_zero(TestHBitmapData *data, const void *unused) 754 - { 755 - hbitmap_test_init_meta(data, 0, 0, 1); 756 - 757 - hbitmap_check_meta(data, 0, 0); 758 649 } 759 650 760 651 static void hbitmap_test_serialize_range(TestHBitmapData *data, ··· 925 816 hbitmap_iter_next(&hbi); 926 817 } 927 818 928 - static void test_hbitmap_next_zero_check_range(TestHBitmapData *data, 929 - uint64_t start, 930 - uint64_t count) 819 + static void test_hbitmap_next_x_check_range(TestHBitmapData *data, 820 + int64_t start, 821 + int64_t count) 931 822 { 932 - int64_t ret1 = hbitmap_next_zero(data->hb, start, count); 933 - int64_t ret2 = start; 823 + int64_t next_zero = hbitmap_next_zero(data->hb, start, count); 824 + int64_t next_dirty = hbitmap_next_dirty(data->hb, start, count); 825 + int64_t next; 934 826 int64_t end = start >= data->size || data->size - start < count ? 935 827 data->size : start + count; 828 + bool first_bit = hbitmap_get(data->hb, start); 936 829 937 - for ( ; ret2 < end && hbitmap_get(data->hb, ret2); ret2++) { 830 + for (next = start; 831 + next < end && hbitmap_get(data->hb, next) == first_bit; 832 + next++) 833 + { 938 834 ; 939 835 } 940 - if (ret2 == end) { 941 - ret2 = -1; 836 + 837 + if (next == end) { 838 + next = -1; 942 839 } 943 840 944 - g_assert_cmpint(ret1, ==, ret2); 841 + g_assert_cmpint(next_dirty, ==, first_bit ? start : next); 842 + g_assert_cmpint(next_zero, ==, first_bit ? next : start); 945 843 } 946 844 947 - static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) 845 + static void test_hbitmap_next_x_check(TestHBitmapData *data, int64_t start) 948 846 { 949 - test_hbitmap_next_zero_check_range(data, start, UINT64_MAX); 847 + test_hbitmap_next_x_check_range(data, start, INT64_MAX); 950 848 } 951 849 952 - static void test_hbitmap_next_zero_do(TestHBitmapData *data, int granularity) 850 + static void test_hbitmap_next_x_do(TestHBitmapData *data, int granularity) 953 851 { 954 852 hbitmap_test_init(data, L3, granularity); 955 - test_hbitmap_next_zero_check(data, 0); 956 - test_hbitmap_next_zero_check(data, L3 - 1); 957 - test_hbitmap_next_zero_check_range(data, 0, 1); 958 - test_hbitmap_next_zero_check_range(data, L3 - 1, 1); 853 + test_hbitmap_next_x_check(data, 0); 854 + test_hbitmap_next_x_check(data, L3 - 1); 855 + test_hbitmap_next_x_check_range(data, 0, 1); 856 + test_hbitmap_next_x_check_range(data, L3 - 1, 1); 959 857 960 858 hbitmap_set(data->hb, L2, 1); 961 - test_hbitmap_next_zero_check(data, 0); 962 - test_hbitmap_next_zero_check(data, L2 - 1); 963 - test_hbitmap_next_zero_check(data, L2); 964 - test_hbitmap_next_zero_check(data, L2 + 1); 965 - test_hbitmap_next_zero_check_range(data, 0, 1); 966 - test_hbitmap_next_zero_check_range(data, 0, L2); 967 - test_hbitmap_next_zero_check_range(data, L2 - 1, 1); 968 - test_hbitmap_next_zero_check_range(data, L2 - 1, 2); 969 - test_hbitmap_next_zero_check_range(data, L2, 1); 970 - test_hbitmap_next_zero_check_range(data, L2 + 1, 1); 859 + test_hbitmap_next_x_check(data, 0); 860 + test_hbitmap_next_x_check(data, L2 - 1); 861 + test_hbitmap_next_x_check(data, L2); 862 + test_hbitmap_next_x_check(data, L2 + 1); 863 + test_hbitmap_next_x_check_range(data, 0, 1); 864 + test_hbitmap_next_x_check_range(data, 0, L2); 865 + test_hbitmap_next_x_check_range(data, L2 - 1, 1); 866 + test_hbitmap_next_x_check_range(data, L2 - 1, 2); 867 + test_hbitmap_next_x_check_range(data, L2, 1); 868 + test_hbitmap_next_x_check_range(data, L2 + 1, 1); 971 869 972 870 hbitmap_set(data->hb, L2 + 5, L1); 973 - test_hbitmap_next_zero_check(data, 0); 974 - test_hbitmap_next_zero_check(data, L2 + 1); 975 - test_hbitmap_next_zero_check(data, L2 + 2); 976 - test_hbitmap_next_zero_check(data, L2 + 5); 977 - test_hbitmap_next_zero_check(data, L2 + L1 - 1); 978 - test_hbitmap_next_zero_check(data, L2 + L1); 979 - test_hbitmap_next_zero_check_range(data, L2, 6); 980 - test_hbitmap_next_zero_check_range(data, L2 + 1, 3); 981 - test_hbitmap_next_zero_check_range(data, L2 + 4, L1); 982 - test_hbitmap_next_zero_check_range(data, L2 + 5, L1); 871 + test_hbitmap_next_x_check(data, 0); 872 + test_hbitmap_next_x_check(data, L2 - L1); 873 + test_hbitmap_next_x_check(data, L2 + 1); 874 + test_hbitmap_next_x_check(data, L2 + 2); 875 + test_hbitmap_next_x_check(data, L2 + 5); 876 + test_hbitmap_next_x_check(data, L2 + L1 - 1); 877 + test_hbitmap_next_x_check(data, L2 + L1); 878 + test_hbitmap_next_x_check(data, L2 + L1 + 1); 879 + test_hbitmap_next_x_check_range(data, L2 - 2, L1); 880 + test_hbitmap_next_x_check_range(data, L2, 4); 881 + test_hbitmap_next_x_check_range(data, L2, 6); 882 + test_hbitmap_next_x_check_range(data, L2 + 1, 3); 883 + test_hbitmap_next_x_check_range(data, L2 + 4, L1); 884 + test_hbitmap_next_x_check_range(data, L2 + 5, L1); 885 + test_hbitmap_next_x_check_range(data, L2 + 5 + L1 - 1, 1); 886 + test_hbitmap_next_x_check_range(data, L2 + 5 + L1, 1); 887 + test_hbitmap_next_x_check_range(data, L2 + 5 + L1 + 1, 1); 983 888 984 889 hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); 985 - test_hbitmap_next_zero_check(data, L2 * 2 - L1); 986 - test_hbitmap_next_zero_check(data, L2 * 2 - 2); 987 - test_hbitmap_next_zero_check(data, L2 * 2 - 1); 988 - test_hbitmap_next_zero_check(data, L2 * 2); 989 - test_hbitmap_next_zero_check(data, L3 - 1); 990 - test_hbitmap_next_zero_check_range(data, L2 * 2 - L1, L1 + 1); 991 - test_hbitmap_next_zero_check_range(data, L2 * 2, L2); 890 + test_hbitmap_next_x_check(data, L2 * 2 - L1); 891 + test_hbitmap_next_x_check(data, L2 * 2 - 2); 892 + test_hbitmap_next_x_check(data, L2 * 2 - 1); 893 + test_hbitmap_next_x_check(data, L2 * 2); 894 + test_hbitmap_next_x_check(data, L2 * 2 + 1); 895 + test_hbitmap_next_x_check(data, L2 * 2 + L1); 896 + test_hbitmap_next_x_check(data, L3 - 1); 897 + test_hbitmap_next_x_check_range(data, L2 * 2 - L1, L1 + 1); 898 + test_hbitmap_next_x_check_range(data, L2 * 2, L2); 992 899 993 900 hbitmap_set(data->hb, 0, L3); 994 - test_hbitmap_next_zero_check(data, 0); 901 + test_hbitmap_next_x_check(data, 0); 995 902 } 996 903 997 - static void test_hbitmap_next_zero_0(TestHBitmapData *data, const void *unused) 904 + static void test_hbitmap_next_x_0(TestHBitmapData *data, const void *unused) 998 905 { 999 - test_hbitmap_next_zero_do(data, 0); 906 + test_hbitmap_next_x_do(data, 0); 1000 907 } 1001 908 1002 - static void test_hbitmap_next_zero_4(TestHBitmapData *data, const void *unused) 909 + static void test_hbitmap_next_x_4(TestHBitmapData *data, const void *unused) 1003 910 { 1004 - test_hbitmap_next_zero_do(data, 4); 911 + test_hbitmap_next_x_do(data, 4); 1005 912 } 1006 913 1007 - static void test_hbitmap_next_zero_after_truncate(TestHBitmapData *data, 1008 - const void *unused) 914 + static void test_hbitmap_next_x_after_truncate(TestHBitmapData *data, 915 + const void *unused) 1009 916 { 1010 917 hbitmap_test_init(data, L1, 0); 1011 918 hbitmap_test_truncate_impl(data, L1 * 2); 1012 919 hbitmap_set(data->hb, 0, L1); 1013 - test_hbitmap_next_zero_check(data, 0); 920 + test_hbitmap_next_x_check(data, 0); 1014 921 } 1015 922 1016 - static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, 1017 - uint64_t offset, 1018 - uint64_t count) 923 + static void test_hbitmap_next_dirty_area_check_limited(TestHBitmapData *data, 924 + int64_t offset, 925 + int64_t count, 926 + int64_t max_dirty) 1019 927 { 1020 - uint64_t off1, off2; 1021 - uint64_t len1 = 0, len2; 928 + int64_t off1, off2; 929 + int64_t len1 = 0, len2; 1022 930 bool ret1, ret2; 1023 931 int64_t end; 1024 932 1025 - off1 = offset; 1026 - len1 = count; 1027 - ret1 = hbitmap_next_dirty_area(data->hb, &off1, &len1); 933 + ret1 = hbitmap_next_dirty_area(data->hb, 934 + offset, count == INT64_MAX ? INT64_MAX : offset + count, max_dirty, 935 + &off1, &len1); 1028 936 1029 937 end = offset > data->size || data->size - offset < count ? data->size : 1030 938 offset + count; ··· 1033 941 ; 1034 942 } 1035 943 1036 - for (len2 = 1; off2 + len2 < end && hbitmap_get(data->hb, off2 + len2); 1037 - len2++) { 944 + for (len2 = 1; (off2 + len2 < end && len2 < max_dirty && 945 + hbitmap_get(data->hb, off2 + len2)); len2++) 946 + { 1038 947 ; 1039 948 } 1040 949 1041 950 ret2 = off2 < end; 1042 - if (!ret2) { 1043 - /* leave unchanged */ 1044 - off2 = offset; 1045 - len2 = count; 951 + g_assert_cmpint(ret1, ==, ret2); 952 + 953 + if (ret2) { 954 + g_assert_cmpint(off1, ==, off2); 955 + g_assert_cmpint(len1, ==, len2); 1046 956 } 957 + } 1047 958 1048 - g_assert_cmpint(ret1, ==, ret2); 1049 - g_assert_cmpint(off1, ==, off2); 1050 - g_assert_cmpint(len1, ==, len2); 959 + static void test_hbitmap_next_dirty_area_check(TestHBitmapData *data, 960 + int64_t offset, int64_t count) 961 + { 962 + test_hbitmap_next_dirty_area_check_limited(data, offset, count, INT64_MAX); 1051 963 } 1052 964 1053 965 static void test_hbitmap_next_dirty_area_do(TestHBitmapData *data, 1054 966 int granularity) 1055 967 { 1056 968 hbitmap_test_init(data, L3, granularity); 1057 - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); 969 + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); 1058 970 test_hbitmap_next_dirty_area_check(data, 0, 1); 1059 971 test_hbitmap_next_dirty_area_check(data, L3 - 1, 1); 972 + test_hbitmap_next_dirty_area_check_limited(data, 0, INT64_MAX, 1); 1060 973 1061 974 hbitmap_set(data->hb, L2, 1); 1062 975 test_hbitmap_next_dirty_area_check(data, 0, 1); 1063 976 test_hbitmap_next_dirty_area_check(data, 0, L2); 1064 - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); 1065 - test_hbitmap_next_dirty_area_check(data, L2 - 1, UINT64_MAX); 977 + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); 978 + test_hbitmap_next_dirty_area_check(data, L2 - 1, INT64_MAX); 1066 979 test_hbitmap_next_dirty_area_check(data, L2 - 1, 1); 1067 980 test_hbitmap_next_dirty_area_check(data, L2 - 1, 2); 1068 981 test_hbitmap_next_dirty_area_check(data, L2 - 1, 3); 1069 - test_hbitmap_next_dirty_area_check(data, L2, UINT64_MAX); 982 + test_hbitmap_next_dirty_area_check(data, L2, INT64_MAX); 1070 983 test_hbitmap_next_dirty_area_check(data, L2, 1); 1071 984 test_hbitmap_next_dirty_area_check(data, L2 + 1, 1); 985 + test_hbitmap_next_dirty_area_check_limited(data, 0, INT64_MAX, 1); 986 + test_hbitmap_next_dirty_area_check_limited(data, L2 - 1, 2, 1); 1072 987 1073 988 hbitmap_set(data->hb, L2 + 5, L1); 1074 - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); 989 + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); 1075 990 test_hbitmap_next_dirty_area_check(data, L2 - 2, 8); 1076 991 test_hbitmap_next_dirty_area_check(data, L2 + 1, 5); 1077 992 test_hbitmap_next_dirty_area_check(data, L2 + 1, 3); ··· 1081 996 test_hbitmap_next_dirty_area_check(data, L2 + L1, L1); 1082 997 test_hbitmap_next_dirty_area_check(data, L2, 0); 1083 998 test_hbitmap_next_dirty_area_check(data, L2 + 1, 0); 999 + test_hbitmap_next_dirty_area_check_limited(data, L2 + 3, INT64_MAX, 3); 1000 + test_hbitmap_next_dirty_area_check_limited(data, L2 + 3, 7, 10); 1084 1001 1085 1002 hbitmap_set(data->hb, L2 * 2, L3 - L2 * 2); 1086 - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); 1087 - test_hbitmap_next_dirty_area_check(data, L2, UINT64_MAX); 1088 - test_hbitmap_next_dirty_area_check(data, L2 + 1, UINT64_MAX); 1089 - test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1 - 1, UINT64_MAX); 1003 + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); 1004 + test_hbitmap_next_dirty_area_check(data, L2, INT64_MAX); 1005 + test_hbitmap_next_dirty_area_check(data, L2 + 1, INT64_MAX); 1006 + test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1 - 1, INT64_MAX); 1090 1007 test_hbitmap_next_dirty_area_check(data, L2 + 5 + L1, 5); 1091 1008 test_hbitmap_next_dirty_area_check(data, L2 * 2 - L1, L1 + 1); 1092 1009 test_hbitmap_next_dirty_area_check(data, L2 * 2, L2); 1010 + test_hbitmap_next_dirty_area_check_limited(data, L2 * 2 + 1, INT64_MAX, 5); 1011 + test_hbitmap_next_dirty_area_check_limited(data, L2 * 2 + 1, 10, 5); 1012 + test_hbitmap_next_dirty_area_check_limited(data, L2 * 2 + 1, 2, 5); 1093 1013 1094 1014 hbitmap_set(data->hb, 0, L3); 1095 - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); 1015 + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); 1096 1016 } 1097 1017 1098 1018 static void test_hbitmap_next_dirty_area_0(TestHBitmapData *data, ··· 1119 1039 hbitmap_test_init(data, L1, 0); 1120 1040 hbitmap_test_truncate_impl(data, L1 * 2); 1121 1041 hbitmap_set(data->hb, L1 + 1, 1); 1122 - test_hbitmap_next_dirty_area_check(data, 0, UINT64_MAX); 1042 + test_hbitmap_next_dirty_area_check(data, 0, INT64_MAX); 1123 1043 } 1124 1044 1125 1045 int main(int argc, char **argv) ··· 1165 1085 hbitmap_test_add("/hbitmap/truncate/shrink/large", 1166 1086 test_hbitmap_truncate_shrink_large); 1167 1087 1168 - hbitmap_test_add("/hbitmap/meta/zero", test_hbitmap_meta_zero); 1169 - hbitmap_test_add("/hbitmap/meta/one", test_hbitmap_meta_one); 1170 - hbitmap_test_add("/hbitmap/meta/byte", test_hbitmap_meta_byte); 1171 - hbitmap_test_add("/hbitmap/meta/word", test_hbitmap_meta_word); 1172 - hbitmap_test_add("/hbitmap/meta/sector", test_hbitmap_meta_sector); 1173 - 1174 1088 hbitmap_test_add("/hbitmap/serialize/align", 1175 1089 test_hbitmap_serialize_align); 1176 1090 hbitmap_test_add("/hbitmap/serialize/basic", ··· 1183 1097 hbitmap_test_add("/hbitmap/iter/iter_and_reset", 1184 1098 test_hbitmap_iter_and_reset); 1185 1099 1186 - hbitmap_test_add("/hbitmap/next_zero/next_zero_0", 1187 - test_hbitmap_next_zero_0); 1188 - hbitmap_test_add("/hbitmap/next_zero/next_zero_4", 1189 - test_hbitmap_next_zero_4); 1190 - hbitmap_test_add("/hbitmap/next_zero/next_zero_after_truncate", 1191 - test_hbitmap_next_zero_after_truncate); 1100 + hbitmap_test_add("/hbitmap/next_zero/next_x_0", 1101 + test_hbitmap_next_x_0); 1102 + hbitmap_test_add("/hbitmap/next_zero/next_x_4", 1103 + test_hbitmap_next_x_4); 1104 + hbitmap_test_add("/hbitmap/next_zero/next_x_after_truncate", 1105 + test_hbitmap_next_x_after_truncate); 1192 1106 1193 1107 hbitmap_test_add("/hbitmap/next_dirty_area/next_dirty_area_0", 1194 1108 test_hbitmap_next_dirty_area_0);
+83 -51
util/hbitmap.c
··· 104 104 /* Advance hbi to the next nonzero word and return it. hbi->pos 105 105 * is updated. Returns zero if we reach the end of the bitmap. 106 106 */ 107 - unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) 107 + static unsigned long hbitmap_iter_skip_words(HBitmapIter *hbi) 108 108 { 109 109 size_t pos = hbi->pos; 110 110 const HBitmap *hb = hbi->hb; ··· 193 193 } 194 194 } 195 195 196 - int64_t hbitmap_next_zero(const HBitmap *hb, uint64_t start, uint64_t count) 196 + int64_t hbitmap_next_dirty(const HBitmap *hb, int64_t start, int64_t count) 197 + { 198 + HBitmapIter hbi; 199 + int64_t first_dirty_off; 200 + uint64_t end; 201 + 202 + assert(start >= 0 && count >= 0); 203 + 204 + if (start >= hb->orig_size || count == 0) { 205 + return -1; 206 + } 207 + 208 + end = count > hb->orig_size - start ? hb->orig_size : start + count; 209 + 210 + hbitmap_iter_init(&hbi, hb, start); 211 + first_dirty_off = hbitmap_iter_next(&hbi); 212 + 213 + if (first_dirty_off < 0 || first_dirty_off >= end) { 214 + return -1; 215 + } 216 + 217 + return MAX(start, first_dirty_off); 218 + } 219 + 220 + int64_t hbitmap_next_zero(const HBitmap *hb, int64_t start, int64_t count) 197 221 { 198 222 size_t pos = (start >> hb->granularity) >> BITS_PER_LEVEL; 199 223 unsigned long *last_lev = hb->levels[HBITMAP_LEVELS - 1]; ··· 201 225 unsigned start_bit_offset; 202 226 uint64_t end_bit, sz; 203 227 int64_t res; 228 + 229 + assert(start >= 0 && count >= 0); 204 230 205 231 if (start >= hb->orig_size || count == 0) { 206 232 return -1; ··· 244 270 return res; 245 271 } 246 272 247 - bool hbitmap_next_dirty_area(const HBitmap *hb, uint64_t *start, 248 - uint64_t *count) 273 + bool hbitmap_next_dirty_area(const HBitmap *hb, int64_t start, int64_t end, 274 + int64_t max_dirty_count, 275 + int64_t *dirty_start, int64_t *dirty_count) 249 276 { 250 - HBitmapIter hbi; 251 - int64_t firt_dirty_off, area_end; 252 - uint32_t granularity = 1UL << hb->granularity; 253 - uint64_t end; 277 + int64_t next_zero; 278 + 279 + assert(start >= 0 && end >= 0 && max_dirty_count > 0); 254 280 255 - if (*start >= hb->orig_size || *count == 0) { 281 + end = MIN(end, hb->orig_size); 282 + if (start >= end) { 256 283 return false; 257 284 } 258 285 259 - end = *count > hb->orig_size - *start ? hb->orig_size : *start + *count; 260 - 261 - hbitmap_iter_init(&hbi, hb, *start); 262 - firt_dirty_off = hbitmap_iter_next(&hbi); 263 - 264 - if (firt_dirty_off < 0 || firt_dirty_off >= end) { 286 + start = hbitmap_next_dirty(hb, start, end - start); 287 + if (start < 0) { 265 288 return false; 266 289 } 267 290 268 - if (firt_dirty_off + granularity >= end) { 269 - area_end = end; 270 - } else { 271 - area_end = hbitmap_next_zero(hb, firt_dirty_off + granularity, 272 - end - firt_dirty_off - granularity); 273 - if (area_end < 0) { 274 - area_end = end; 275 - } 291 + end = start + MIN(end - start, max_dirty_count); 292 + 293 + next_zero = hbitmap_next_zero(hb, start, end - start); 294 + if (next_zero >= 0) { 295 + end = next_zero; 276 296 } 277 297 278 - if (firt_dirty_off > *start) { 279 - *start = firt_dirty_off; 280 - } 281 - *count = area_end - *start; 298 + *dirty_start = start; 299 + *dirty_count = end - start; 282 300 283 301 return true; 284 302 } ··· 296 314 uint64_t hbitmap_count(const HBitmap *hb) 297 315 { 298 316 return hb->count << hb->granularity; 317 + } 318 + 319 + /** 320 + * hbitmap_iter_next_word: 321 + * @hbi: HBitmapIter to operate on. 322 + * @p_cur: Location where to store the next non-zero word. 323 + * 324 + * Return the index of the next nonzero word that is set in @hbi's 325 + * associated HBitmap, and set *p_cur to the content of that word 326 + * (bits before the index that was passed to hbitmap_iter_init are 327 + * trimmed on the first call). Return -1, and set *p_cur to zero, 328 + * if all remaining words are zero. 329 + */ 330 + static size_t hbitmap_iter_next_word(HBitmapIter *hbi, unsigned long *p_cur) 331 + { 332 + unsigned long cur = hbi->cur[HBITMAP_LEVELS - 1]; 333 + 334 + if (cur == 0) { 335 + cur = hbitmap_iter_skip_words(hbi); 336 + if (cur == 0) { 337 + *p_cur = 0; 338 + return -1; 339 + } 340 + } 341 + 342 + /* The next call will resume work from the next word. */ 343 + hbi->cur[HBITMAP_LEVELS - 1] = 0; 344 + *p_cur = cur; 345 + return hbi->pos; 299 346 } 300 347 301 348 /* Count the number of set bits between start and end, not accounting for ··· 716 763 HBitmap *hb = g_new0(struct HBitmap, 1); 717 764 unsigned i; 718 765 766 + assert(size <= INT64_MAX); 719 767 hb->orig_size = size; 720 768 721 769 assert(granularity >= 0 && granularity < 64); ··· 746 794 uint64_t num_elements = size; 747 795 uint64_t old; 748 796 797 + assert(size <= INT64_MAX); 749 798 hb->orig_size = size; 750 799 751 800 /* Size comes in as logical elements, adjust for granularity. */ ··· 803 852 */ 804 853 static void hbitmap_sparse_merge(HBitmap *dst, const HBitmap *src) 805 854 { 806 - uint64_t offset = 0; 807 - uint64_t count = src->orig_size; 855 + int64_t offset; 856 + int64_t count; 808 857 809 - while (hbitmap_next_dirty_area(src, &offset, &count)) { 858 + for (offset = 0; 859 + hbitmap_next_dirty_area(src, offset, src->orig_size, INT64_MAX, 860 + &offset, &count); 861 + offset += count) 862 + { 810 863 hbitmap_set(dst, offset, count); 811 - offset += count; 812 - if (offset >= src->orig_size) { 813 - break; 814 - } 815 - count = src->orig_size - offset; 816 864 } 817 865 } 818 866 ··· 872 920 result->count = hb_count_between(result, 0, result->size - 1); 873 921 874 922 return true; 875 - } 876 - 877 - HBitmap *hbitmap_create_meta(HBitmap *hb, int chunk_size) 878 - { 879 - assert(!(chunk_size & (chunk_size - 1))); 880 - assert(!hb->meta); 881 - hb->meta = hbitmap_alloc(hb->size << hb->granularity, 882 - hb->granularity + ctz32(chunk_size)); 883 - return hb->meta; 884 - } 885 - 886 - void hbitmap_free_meta(HBitmap *hb) 887 - { 888 - assert(hb->meta); 889 - hbitmap_free(hb->meta); 890 - hb->meta = NULL; 891 923 } 892 924 893 925 char *hbitmap_sha256(const HBitmap *bitmap, Error **errp)